aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-swap10
-rw-r--r--Documentation/core-api/kernel-api.rst14
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst147
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile8
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/debug.S4
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts16
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts9
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts5
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi7
-rw-r--r--arch/arm/boot/dts/gemini.dtsi3
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi8
-rw-r--r--arch/arm/boot/dts/moxart.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi16
-rw-r--r--arch/arm/kernel/debug.S8
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts9
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi4
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/loongson32/common/platform.c38
-rw-r--r--arch/mips/math-emu/cp1emu.c2
-rw-r--r--arch/mips/net/ebpf_jit.c2
-rwxr-xr-xarch/mips/tools/generic-board-config.sh6
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S45
-rw-r--r--arch/powerpc/lib/sstep.c6
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/powerpc/perf/imc-pmu.c39
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/x86/entry/entry_32.S4
-rw-r--r--arch/x86/events/intel/uncore.c12
-rw-r--r--arch/x86/hyperv/hv_init.c5
-rw-r--r--arch/x86/hyperv/mmu.c57
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h6
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h8
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h24
-rw-r--r--arch/x86/kernel/apic/apic.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c27
-rw-r--r--arch/x86/kernel/kprobes/common.h13
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/unwind_frame.c38
-rw-r--r--arch/x86/kvm/mmu.c15
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/mm/Makefile11
-rw-r--r--arch/x86/mm/tlb.c153
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--block/bio.c26
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c3
-rw-r--r--crypto/shash.c10
-rw-r--r--crypto/skcipher.c17
-rw-r--r--crypto/xts.c6
-rw-r--r--drivers/acpi/property.c29
-rw-r--r--drivers/android/binder.c93
-rw-r--r--drivers/base/node.c12
-rw-r--r--drivers/base/property.c19
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c15
-rw-r--r--drivers/dma-buf/sync_file.c17
-rw-r--r--drivers/dma/altera-msgdma.c41
-rw-r--r--drivers/dma/edma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c3
-rw-r--r--drivers/gpio/Kconfig3
-rw-r--r--drivers/gpio/gpio-omap.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c7
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/usbhid/hid-core.c12
-rw-r--r--drivers/hv/channel.c6
-rw-r--r--drivers/hv/channel_mgmt.c37
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c162
-rw-r--r--drivers/input/input.c84
-rw-r--r--drivers/input/joydev.c70
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c29
-rw-r--r--drivers/input/misc/axp20x-pek.c2
-rw-r--r--drivers/input/misc/ims-pcu.c16
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/input/touchscreen/goodix.c67
-rw-r--r--drivers/input/touchscreen/stmfts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/media/cec/cec-adap.c13
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c50
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c1
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c3
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c11
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h2
-rw-r--r--drivers/media/tuners/mt2060.c59
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c23
-rw-r--r--drivers/misc/mei/pci-txe.c30
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/net/can/flexcan.c91
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c89
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c2
-rw-r--r--drivers/pci/host/pci-tegra.c22
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c14
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/imx_rproc.c9
-rw-r--r--drivers/reset/reset-socfpga.c17
-rw-r--r--drivers/rpmsg/qcom_glink_native.c14
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/tty/tty_ldisc.c11
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/configfs.c15
-rw-r--r--drivers/usb/gadget/configfs.h11
-rw-r--r--drivers/usb/gadget/function/f_rndis.c12
-rw-r--r--drivers/usb/gadget/function/u_rndis.h1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c9
-rw-r--r--drivers/usb/misc/usbtest.c10
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c2
-rw-r--r--drivers/usb/serial/console.c3
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c4
-rw-r--r--fs/9p/vfs_addr.c10
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/crypto/keyinfo.c5
-rw-r--r--fs/direct-io.c42
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/segment.c6
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/fscache/object-list.c7
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/iomap.c41
-rw-r--r--fs/mpage.c14
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/quota/dquot.c27
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c15
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h27
-rw-r--r--fs/xfs/xfs_acl.c22
-rw-r--r--fs/xfs/xfs_aops.c47
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_bmap_util.h13
-rw-r--r--fs/xfs/xfs_file.c4
-rw-r--r--fs/xfs/xfs_fsmap.c58
-rw-r--r--fs/xfs/xfs_inode_item.c79
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_ondisk.h2
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/kernel.h90
-rw-r--r--include/linux/key.h47
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/of.h10
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/sched/mm.h16
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/sound/control.h3
-rw-r--r--include/sound/seq_virmidi.h1
-rw-r--r--include/uapi/linux/membarrier.h23
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/devmap.c10
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/sockmap.c28
-rw-r--r--kernel/bpf/verifier.c82
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/cpuhotplug.c28
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/livepatch/core.c60
-rw-r--r--kernel/locking/lockdep.c48
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/sync.c9
-rw-r--r--kernel/rcu/tree.c18
-rw-r--r--kernel/sched/fair.c140
-rw-r--r--kernel/sched/features.h3
-rw-r--r--kernel/sched/membarrier.c34
-rw-r--r--kernel/seccomp.c2
-rw-r--r--lib/Kconfig.debug147
-rw-r--r--lib/digsig.c6
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--mm/cma.c2
-rw-r--r--mm/madvise.c7
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/page_vma_mapped.c28
-rw-r--r--mm/percpu.c15
-rw-r--r--mm/swap_state.c41
-rw-r--r--mm/vmalloc.c6
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/can/af_can.c20
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_ioctl.c13
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c32
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sock_reuseport.c12
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/cipso_ipv4.c24
-rw-r--r--net/ipv4/inet_connection_sock.c9
-rw-r--r--net/ipv4/inet_hashtables.c5
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/key.c21
-rw-r--r--net/ncsi/internal.h1
-rw-r--r--net/ncsi/ncsi-aen.c2
-rw-r--r--net/ncsi/ncsi-manage.c52
-rw-r--r--net/ncsi/ncsi-rsp.c2
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/packet/af_packet.c24
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/vmw_vsock/hyperv_transport.c21
-rw-r--r--samples/sockmap/sockmap_kern.c2
-rw-r--r--samples/trace_events/trace-events-sample.c14
-rwxr-xr-xscripts/faddr2line5
-rw-r--r--scripts/kallsyms.c2
-rw-r--r--scripts/mod/devicetable-offsets.c1
-rw-r--r--scripts/mod/file2alias.c6
-rw-r--r--security/commoncap.c3
-rw-r--r--security/keys/Kconfig1
-rw-r--r--security/keys/big_key.c4
-rw-r--r--security/keys/encrypted-keys/encrypted.c9
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c41
-rw-r--r--security/keys/keyctl.c9
-rw-r--r--security/keys/keyring.c14
-rw-r--r--security/keys/permission.c7
-rw-r--r--security/keys/proc.c31
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/keys/request_key.c7
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/keys/user_defined.c4
-rw-r--r--sound/core/seq/seq_clientmgr.c6
-rw-r--r--sound/core/seq/seq_lock.c4
-rw-r--r--sound/core/seq/seq_lock.h12
-rw-r--r--sound/core/seq/seq_ports.c7
-rw-r--r--sound/core/seq/seq_virmidi.c27
-rw-r--r--sound/core/vmaster.c31
-rw-r--r--sound/hda/hdac_controller.c5
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/usb/caiaq/device.c12
-rw-r--r--sound/usb/line6/driver.c7
-rw-r--r--sound/usb/line6/podhd.c8
-rw-r--r--sound/usb/mixer.c12
-rw-r--r--sound/usb/mixer.h2
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/include/uapi/linux/bpf.h5
-rw-r--r--tools/perf/builtin-script.c4
-rw-r--r--tools/perf/util/callchain.c6
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/pmu.c56
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.c10
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/sockmap_verdict_prog.c4
-rw-r--r--tools/testing/selftests/bpf/test_maps.c12
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c510
-rw-r--r--tools/testing/selftests/mqueue/Makefile4
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c25
-rw-r--r--tools/testing/selftests/x86/Makefile2
415 files changed, 4545 insertions, 2001 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap
index 587db52084c7..94672016c268 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-swap
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
14 still used for tmpfs etc. other users. If set to 14 still used for tmpfs etc. other users. If set to
15 false, the global swap readahead algorithm will be 15 false, the global swap readahead algorithm will be
16 used for all swappable pages. 16 used for all swappable pages.
17
18What: /sys/kernel/mm/swap/vma_ra_max_order
19Date: August 2017
20Contact: Linux memory management mailing list <linux-mm@kvack.org>
21Description: The max readahead size in order for VMA based swap readahead
22
23 VMA based swap readahead algorithm will readahead at
24 most 1 << max_order pages for each readahead. The
25 real readahead size for each readahead will be scaled
26 according to the estimation algorithm.
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 8282099e0cbf..5da10184d908 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
352---------------------- 352----------------------
353 353
354.. kernel-doc:: include/linux/rcupdate.h 354.. kernel-doc:: include/linux/rcupdate.h
355 :external:
356 355
357.. kernel-doc:: include/linux/rcupdate_wait.h 356.. kernel-doc:: include/linux/rcupdate_wait.h
358 :external:
359 357
360.. kernel-doc:: include/linux/rcutree.h 358.. kernel-doc:: include/linux/rcutree.h
361 :external:
362 359
363.. kernel-doc:: kernel/rcu/tree.c 360.. kernel-doc:: kernel/rcu/tree.c
364 :external:
365 361
366.. kernel-doc:: kernel/rcu/tree_plugin.h 362.. kernel-doc:: kernel/rcu/tree_plugin.h
367 :external:
368 363
369.. kernel-doc:: kernel/rcu/tree_exp.h 364.. kernel-doc:: kernel/rcu/tree_exp.h
370 :external:
371 365
372.. kernel-doc:: kernel/rcu/update.c 366.. kernel-doc:: kernel/rcu/update.c
373 :external:
374 367
375.. kernel-doc:: include/linux/srcu.h 368.. kernel-doc:: include/linux/srcu.h
376 :external:
377 369
378.. kernel-doc:: kernel/rcu/srcutree.c 370.. kernel-doc:: kernel/rcu/srcutree.c
379 :external:
380 371
381.. kernel-doc:: include/linux/rculist_bl.h 372.. kernel-doc:: include/linux/rculist_bl.h
382 :external:
383 373
384.. kernel-doc:: include/linux/rculist.h 374.. kernel-doc:: include/linux/rculist.h
385 :external:
386 375
387.. kernel-doc:: include/linux/rculist_nulls.h 376.. kernel-doc:: include/linux/rculist_nulls.h
388 :external:
389 377
390.. kernel-doc:: include/linux/rcu_sync.h 378.. kernel-doc:: include/linux/rcu_sync.h
391 :external:
392 379
393.. kernel-doc:: kernel/rcu/sync.c 380.. kernel-doc:: kernel/rcu/sync.c
394 :external:
395 381
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 82fc399fcd33..61e43cc3ed17 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
25 submitting-patches 25 submitting-patches
26 coding-style 26 coding-style
27 email-clients 27 email-clients
28 kernel-enforcement-statement
28 29
29Other guides to the community that are of interest to most developers are: 30Other guides to the community that are of interest to most developers are:
30 31
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644
index 000000000000..1e23d4227337
--- /dev/null
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -0,0 +1,147 @@
1Linux Kernel Enforcement Statement
2----------------------------------
3
4As developers of the Linux kernel, we have a keen interest in how our software
5is used and how the license for our software is enforced. Compliance with the
6reciprocal sharing obligations of GPL-2.0 is critical to the long-term
7sustainability of our software and community.
8
9Although there is a right to enforce the separate copyright interests in the
10contributions made to our community, we share an interest in ensuring that
11individual enforcement actions are conducted in a manner that benefits our
12community and do not have an unintended negative impact on the health and
13growth of our software ecosystem. In order to deter unhelpful enforcement
14actions, we agree that it is in the best interests of our development
15community to undertake the following commitment to users of the Linux kernel
16on behalf of ourselves and any successors to our copyright interests:
17
18 Notwithstanding the termination provisions of the GPL-2.0, we agree that
19 it is in the best interests of our development community to adopt the
20 following provisions of GPL-3.0 as additional permissions under our
21 license with respect to any non-defensive assertion of rights under the
22 license.
23
24 However, if you cease all violation of this License, then your license
25 from a particular copyright holder is reinstated (a) provisionally,
26 unless and until the copyright holder explicitly and finally
27 terminates your license, and (b) permanently, if the copyright holder
28 fails to notify you of the violation by some reasonable means prior to
29 60 days after the cessation.
30
31 Moreover, your license from a particular copyright holder is
32 reinstated permanently if the copyright holder notifies you of the
33 violation by some reasonable means, this is the first time you have
34 received notice of violation of this License (for any work) from that
35 copyright holder, and you cure the violation prior to 30 days after
36 your receipt of the notice.
37
38Our intent in providing these assurances is to encourage more use of the
39software. We want companies and individuals to use, modify and distribute
40this software. We want to work with users in an open and transparent way to
41eliminate any uncertainty about our expectations regarding compliance or
42enforcement that might limit adoption of our software. We view legal action
43as a last resort, to be initiated only when other community efforts have
44failed to resolve the problem.
45
46Finally, once a non-compliance issue is resolved, we hope the user will feel
47welcome to join us in our efforts on this project. Working together, we will
48be stronger.
49
50Except where noted below, we speak only for ourselves, and not for any company
51we might work for today, have in the past, or will in the future.
52
53 - Bjorn Andersson (Linaro)
54 - Andrea Arcangeli (Red Hat)
55 - Neil Armstrong
56 - Jens Axboe
57 - Pablo Neira Ayuso
58 - Khalid Aziz
59 - Ralf Baechle
60 - Felipe Balbi
61 - Arnd Bergmann
62 - Ard Biesheuvel
63 - Paolo Bonzini (Red Hat)
64 - Christian Borntraeger
65 - Mark Brown (Linaro)
66 - Paul Burton
67 - Javier Martinez Canillas
68 - Rob Clark
69 - Jonathan Corbet
70 - Vivien Didelot (Savoir-faire Linux)
71 - Hans de Goede (Red Hat)
72 - Mel Gorman (SUSE)
73 - Sven Eckelmann
74 - Alex Elder (Linaro)
75 - Fabio Estevam
76 - Larry Finger
77 - Bhumika Goyal
78 - Andy Gross
79 - Juergen Gross
80 - Shawn Guo
81 - Ulf Hansson
82 - Tejun Heo
83 - Rob Herring
84 - Masami Hiramatsu
85 - Michal Hocko
86 - Simon Horman
87 - Johan Hovold (Hovold Consulting AB)
88 - Christophe JAILLET
89 - Olof Johansson
90 - Lee Jones (Linaro)
91 - Heiner Kallweit
92 - Srinivas Kandagatla
93 - Jan Kara
94 - Shuah Khan (Samsung)
95 - David Kershner
96 - Jaegeuk Kim
97 - Namhyung Kim
98 - Colin Ian King
99 - Jeff Kirsher
100 - Greg Kroah-Hartman (Linux Foundation)
101 - Christian König
102 - Vinod Koul
103 - Krzysztof Kozlowski
104 - Viresh Kumar
105 - Aneesh Kumar K.V
106 - Julia Lawall
107 - Doug Ledford (Red Hat)
108 - Chuck Lever (Oracle)
109 - Daniel Lezcano
110 - Shaohua Li
111 - Xin Long (Red Hat)
112 - Tony Luck
113 - Mike Marshall
114 - Chris Mason
115 - Paul E. McKenney
116 - David S. Miller
117 - Ingo Molnar
118 - Kuninori Morimoto
119 - Borislav Petkov
120 - Jiri Pirko
121 - Josh Poimboeuf
122 - Sebastian Reichel (Collabora)
123 - Guenter Roeck
124 - Joerg Roedel
125 - Leon Romanovsky
126 - Steven Rostedt (VMware)
127 - Ivan Safonov
128 - Ivan Safonov
129 - Anna Schumaker
130 - Jes Sorensen
131 - K.Y. Srinivasan
132 - Heiko Stuebner
133 - Jiri Kosina (SUSE)
134 - Dmitry Torokhov
135 - Linus Torvalds
136 - Thierry Reding
137 - Rik van Riel
138 - Geert Uytterhoeven (Glider bvba)
139 - Daniel Vetter
140 - Linus Walleij
141 - Richard Weinberger
142 - Dan Williams
143 - Rafael J. Wysocki
144 - Arvind Yadav
145 - Masahiro Yamada
146 - Wei Yongjun
147 - Lv Zheng
diff --git a/MAINTAINERS b/MAINTAINERS
index f218fe1e43fe..e3a7ca9d2783 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5351,9 +5351,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org>
5351L: linux-fsdevel@vger.kernel.org 5351L: linux-fsdevel@vger.kernel.org
5352S: Maintained 5352S: Maintained
5353F: include/linux/fcntl.h 5353F: include/linux/fcntl.h
5354F: include/linux/fs.h
5355F: include/uapi/linux/fcntl.h 5354F: include/uapi/linux/fcntl.h
5356F: include/uapi/linux/fs.h
5357F: fs/fcntl.c 5355F: fs/fcntl.c
5358F: fs/locks.c 5356F: fs/locks.c
5359 5357
@@ -5362,6 +5360,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk>
5362L: linux-fsdevel@vger.kernel.org 5360L: linux-fsdevel@vger.kernel.org
5363S: Maintained 5361S: Maintained
5364F: fs/* 5362F: fs/*
5363F: include/linux/fs.h
5364F: include/uapi/linux/fs.h
5365 5365
5366FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER 5366FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
5367M: Riku Voipio <riku.voipio@iki.fi> 5367M: Riku Voipio <riku.voipio@iki.fi>
@@ -7576,7 +7576,7 @@ F: arch/mips/include/asm/kvm*
7576F: arch/mips/kvm/ 7576F: arch/mips/kvm/
7577 7577
7578KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) 7578KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
7579M: Alexander Graf <agraf@suse.com> 7579M: Paul Mackerras <paulus@ozlabs.org>
7580L: kvm-ppc@vger.kernel.org 7580L: kvm-ppc@vger.kernel.org
7581W: http://www.linux-kvm.org/ 7581W: http://www.linux-kvm.org/
7582T: git git://github.com/agraf/linux-2.6.git 7582T: git git://github.com/agraf/linux-2.6.git
diff --git a/Makefile b/Makefile
index 2835863bdd5a..46bfb0ed2257 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
933 ifeq ($(has_libelf),1) 933 ifeq ($(has_libelf),1)
934 objtool_target := tools/objtool FORCE 934 objtool_target := tools/objtool FORCE
935 else 935 else
936 $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") 936 ifdef CONFIG_ORC_UNWINDER
937 $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
938 else
939 $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
940 endif
937 SKIP_STACK_VALIDATION := 1 941 SKIP_STACK_VALIDATION := 1
938 export SKIP_STACK_VALIDATION 942 export SKIP_STACK_VALIDATION
939 endif 943 endif
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 47d3a1ab08d2..817e5cfef83a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -131,7 +131,7 @@ endif
131KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm 131KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
132KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float 132KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
133 133
134CHECKFLAGS += -D__arm__ 134CHECKFLAGS += -D__arm__ -m32
135 135
136#Default value 136#Default value
137head-y := arch/arm/kernel/head$(MMUEXT).o 137head-y := arch/arm/kernel/head$(MMUEXT).o
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 5392ee63338f..8f6e37177de1 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -23,7 +23,11 @@ ENTRY(putc)
23 strb r0, [r1] 23 strb r0, [r1]
24 mov r0, #0x03 @ SYS_WRITEC 24 mov r0, #0x03 @ SYS_WRITEC
25 ARM( svc #0x123456 ) 25 ARM( svc #0x123456 )
26#ifdef CONFIG_CPU_V7M
27 THUMB( bkpt #0xab )
28#else
26 THUMB( svc #0xab ) 29 THUMB( svc #0xab )
30#endif
27 mov pc, lr 31 mov pc, lr
28 .align 2 32 .align 2
291: .word _GLOBAL_OFFSET_TABLE_ - . 331: .word _GLOBAL_OFFSET_TABLE_ - .
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 7ff0811e61db..4960722aab32 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -178,7 +178,7 @@
178 }; 178 };
179 179
180 i2c0: i2c@11000 { 180 i2c0: i2c@11000 {
181 compatible = "marvell,mv64xxx-i2c"; 181 compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
182 reg = <0x11000 0x20>; 182 reg = <0x11000 0x20>;
183 #address-cells = <1>; 183 #address-cells = <1>;
184 #size-cells = <0>; 184 #size-cells = <0>;
@@ -189,7 +189,7 @@
189 }; 189 };
190 190
191 i2c1: i2c@11100 { 191 i2c1: i2c@11100 {
192 compatible = "marvell,mv64xxx-i2c"; 192 compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
193 reg = <0x11100 0x20>; 193 reg = <0x11100 0x20>;
194 #address-cells = <1>; 194 #address-cells = <1>;
195 #size-cells = <0>; 195 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 63a5af898165..cf0087b4c9e1 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -67,8 +67,8 @@
67 pinctrl-0 = <&pinctrl_macb0_default>; 67 pinctrl-0 = <&pinctrl_macb0_default>;
68 phy-mode = "rmii"; 68 phy-mode = "rmii";
69 69
70 ethernet-phy@1 { 70 ethernet-phy@0 {
71 reg = <0x1>; 71 reg = <0x0>;
72 interrupt-parent = <&pioA>; 72 interrupt-parent = <&pioA>;
73 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; 73 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
74 pinctrl-names = "default"; 74 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index c7e9ccf2bc87..cbc26001247b 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -309,7 +309,7 @@
309 vddana-supply = <&vdd_3v3_lp_reg>; 309 vddana-supply = <&vdd_3v3_lp_reg>;
310 vref-supply = <&vdd_3v3_lp_reg>; 310 vref-supply = <&vdd_3v3_lp_reg>;
311 pinctrl-names = "default"; 311 pinctrl-names = "default";
312 pinctrl-0 = <&pinctrl_adc_default>; 312 pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
313 status = "okay"; 313 status = "okay";
314 }; 314 };
315 315
@@ -340,6 +340,20 @@
340 bias-disable; 340 bias-disable;
341 }; 341 };
342 342
343 /*
344 * The ADTRG pin can work on any edge type.
345 * In here it's being pulled up, so need to
346 * connect it to ground to get an edge e.g.
347 * Trigger can be configured on falling, rise
348 * or any edge, and the pull-up can be changed
349 * to pull-down or left floating according to
350 * needs.
351 */
352 pinctrl_adtrg_default: adtrg_default {
353 pinmux = <PIN_PD31__ADTRG>;
354 bias-pull-up;
355 };
356
343 pinctrl_charger_chglev: charger_chglev { 357 pinctrl_charger_chglev: charger_chglev {
344 pinmux = <PIN_PA12__GPIO>; 358 pinmux = <PIN_PA12__GPIO>;
345 bias-disable; 359 bias-disable;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index 82651c3eb682..b8565fc33eea 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -18,12 +18,9 @@
18 compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; 18 compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
19 model = "Raspberry Pi Zero W"; 19 model = "Raspberry Pi Zero W";
20 20
21 /* Needed by firmware to properly init UARTs */ 21 chosen {
22 aliases { 22 /* 8250 auxiliary UART instead of pl011 */
23 uart0 = "/soc/serial@7e201000"; 23 stdout-path = "serial1:115200n8";
24 uart1 = "/soc/serial@7e215040";
25 serial0 = "/soc/serial@7e201000";
26 serial1 = "/soc/serial@7e215040";
27 }; 24 };
28 25
29 leds { 26 leds {
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index 20725ca487f3..c71a0d73d2a2 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -8,6 +8,11 @@
8 compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; 8 compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
9 model = "Raspberry Pi 3 Model B"; 9 model = "Raspberry Pi 3 Model B";
10 10
11 chosen {
12 /* 8250 auxiliary UART instead of pl011 */
13 stdout-path = "serial1:115200n8";
14 };
15
11 memory { 16 memory {
12 reg = <0 0x40000000>; 17 reg = <0 0x40000000>;
13 }; 18 };
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 431dcfc900c0..013431e3d7c3 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -20,8 +20,13 @@
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 22
23 aliases {
24 serial0 = &uart0;
25 serial1 = &uart1;
26 };
27
23 chosen { 28 chosen {
24 bootargs = "earlyprintk console=ttyAMA0"; 29 stdout-path = "serial0:115200n8";
25 }; 30 };
26 31
27 thermal-zones { 32 thermal-zones {
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index c68e8d430234..f0d178c77153 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -145,11 +145,12 @@
145 }; 145 };
146 146
147 watchdog@41000000 { 147 watchdog@41000000 {
148 compatible = "cortina,gemini-watchdog"; 148 compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
149 reg = <0x41000000 0x1000>; 149 reg = <0x41000000 0x1000>;
150 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; 150 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
151 resets = <&syscon GEMINI_RESET_WDOG>; 151 resets = <&syscon GEMINI_RESET_WDOG>;
152 clocks = <&syscon GEMINI_CLK_APB>; 152 clocks = <&syscon GEMINI_CLK_APB>;
153 clock-names = "PCLK";
153 }; 154 };
154 155
155 uart0: serial@42000000 { 156 uart0: serial@42000000 {
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index f46814a7ea44..4d308d17f040 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -144,10 +144,10 @@
144 interrupt-names = "msi"; 144 interrupt-names = "msi";
145 #interrupt-cells = <1>; 145 #interrupt-cells = <1>;
146 interrupt-map-mask = <0 0 0 0x7>; 146 interrupt-map-mask = <0 0 0 0x7>;
147 interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, 147 interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
148 <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, 148 <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
149 <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 149 <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
150 <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; 150 <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
151 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, 151 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
152 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, 152 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
153 <&clks IMX7D_PCIE_PHY_ROOT_CLK>; 153 <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1f4c795d3f72..da7b3237bfe9 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -87,9 +87,10 @@
87 }; 87 };
88 88
89 watchdog: watchdog@98500000 { 89 watchdog: watchdog@98500000 {
90 compatible = "moxa,moxart-watchdog"; 90 compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
91 reg = <0x98500000 0x10>; 91 reg = <0x98500000 0x10>;
92 clocks = <&clk_apb>; 92 clocks = <&clk_apb>;
93 clock-names = "PCLK";
93 }; 94 };
94 95
95 sdhci: sdhci@98e00000 { 96 sdhci: sdhci@98e00000 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 38d2216c7ead..b1a26b42d190 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1430,6 +1430,7 @@
1430 atmel,min-sample-rate-hz = <200000>; 1430 atmel,min-sample-rate-hz = <200000>;
1431 atmel,max-sample-rate-hz = <20000000>; 1431 atmel,max-sample-rate-hz = <20000000>;
1432 atmel,startup-time-ms = <4>; 1432 atmel,startup-time-ms = <4>;
1433 atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
1433 status = "disabled"; 1434 status = "disabled";
1434 }; 1435 };
1435 1436
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index b147cb0dc14b..eef072a21acc 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -311,8 +311,8 @@
311 #size-cells = <0>; 311 #size-cells = <0>;
312 reg = <0>; 312 reg = <0>;
313 313
314 tcon1_in_drc1: endpoint@0 { 314 tcon1_in_drc1: endpoint@1 {
315 reg = <0>; 315 reg = <1>;
316 remote-endpoint = <&drc1_out_tcon1>; 316 remote-endpoint = <&drc1_out_tcon1>;
317 }; 317 };
318 }; 318 };
@@ -1012,8 +1012,8 @@
1012 #size-cells = <0>; 1012 #size-cells = <0>;
1013 reg = <1>; 1013 reg = <1>;
1014 1014
1015 be1_out_drc1: endpoint@0 { 1015 be1_out_drc1: endpoint@1 {
1016 reg = <0>; 1016 reg = <1>;
1017 remote-endpoint = <&drc1_in_be1>; 1017 remote-endpoint = <&drc1_in_be1>;
1018 }; 1018 };
1019 }; 1019 };
@@ -1042,8 +1042,8 @@
1042 #size-cells = <0>; 1042 #size-cells = <0>;
1043 reg = <0>; 1043 reg = <0>;
1044 1044
1045 drc1_in_be1: endpoint@0 { 1045 drc1_in_be1: endpoint@1 {
1046 reg = <0>; 1046 reg = <1>;
1047 remote-endpoint = <&be1_out_drc1>; 1047 remote-endpoint = <&be1_out_drc1>;
1048 }; 1048 };
1049 }; 1049 };
@@ -1053,8 +1053,8 @@
1053 #size-cells = <0>; 1053 #size-cells = <0>;
1054 reg = <1>; 1054 reg = <1>;
1055 1055
1056 drc1_out_tcon1: endpoint@0 { 1056 drc1_out_tcon1: endpoint@1 {
1057 reg = <0>; 1057 reg = <1>;
1058 remote-endpoint = <&tcon1_in_drc1>; 1058 remote-endpoint = <&tcon1_in_drc1>;
1059 }; 1059 };
1060 }; 1060 };
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index ea9646cc2a0e..0a498cb3fad8 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -115,7 +115,11 @@ ENTRY(printascii)
115 mov r1, r0 115 mov r1, r0
116 mov r0, #0x04 @ SYS_WRITE0 116 mov r0, #0x04 @ SYS_WRITE0
117 ARM( svc #0x123456 ) 117 ARM( svc #0x123456 )
118#ifdef CONFIG_CPU_V7M
119 THUMB( bkpt #0xab )
120#else
118 THUMB( svc #0xab ) 121 THUMB( svc #0xab )
122#endif
119 ret lr 123 ret lr
120ENDPROC(printascii) 124ENDPROC(printascii)
121 125
@@ -124,7 +128,11 @@ ENTRY(printch)
124 strb r0, [r1] 128 strb r0, [r1]
125 mov r0, #0x03 @ SYS_WRITEC 129 mov r0, #0x03 @ SYS_WRITEC
126 ARM( svc #0x123456 ) 130 ARM( svc #0x123456 )
131#ifdef CONFIG_CPU_V7M
132 THUMB( bkpt #0xab )
133#else
127 THUMB( svc #0xab ) 134 THUMB( svc #0xab )
135#endif
128 ret lr 136 ret lr
129ENDPROC(printch) 137ENDPROC(printch)
130 138
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 71a34e8c345a..57058ac46f49 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -32,6 +32,7 @@
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33 33
34#include "db8500-regs.h" 34#include "db8500-regs.h"
35#include "pm_domains.h"
35 36
36static int __init ux500_l2x0_unlock(void) 37static int __init ux500_l2x0_unlock(void)
37{ 38{
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
157 158
158static void __init u8500_init_machine(void) 159static void __init u8500_init_machine(void)
159{ 160{
161 /* Initialize ux500 power domains */
162 ux500_pm_domains_init();
163
160 /* automatically probe child nodes of dbx5x0 devices */ 164 /* automatically probe child nodes of dbx5x0 devices */
161 if (of_machine_is_compatible("st-ericsson,u8540")) 165 if (of_machine_is_compatible("st-ericsson,u8540"))
162 of_platform_populate(NULL, u8500_local_bus_nodes, 166 of_platform_populate(NULL, u8500_local_bus_nodes,
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index a970e7fcba9e..f6c33a0c1c61 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -19,7 +19,6 @@
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20 20
21#include "db8500-regs.h" 21#include "db8500-regs.h"
22#include "pm_domains.h"
23 22
24/* ARM WFI Standby signal register */ 23/* ARM WFI Standby signal register */
25#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130) 24#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
203 202
204 /* Set up ux500 suspend callbacks. */ 203 /* Set up ux500 suspend callbacks. */
205 suspend_set_ops(UX500_SUSPEND_OPS); 204 suspend_set_ops(UX500_SUSPEND_OPS);
206
207 /* Initialize ux500 power domains */
208 ux500_pm_domains_init();
209} 205}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b8e728cc944..91537d90f5f5 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
344 * reserved here. 344 * reserved here.
345 */ 345 */
346#endif 346#endif
347 /*
348 * In any case, always ensure address 0 is never used as many things
349 * get very confused if 0 is returned as a legitimate address.
350 */
351 memblock_reserve(0, 1);
347} 352}
348 353
349void __init adjust_lowmem_bounds(void) 354void __init adjust_lowmem_bounds(void)
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index caf8b6fbe5e3..d06e34b5d192 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -61,13 +61,6 @@
61 chosen { 61 chosen {
62 stdout-path = "serial0:115200n8"; 62 stdout-path = "serial0:115200n8";
63 }; 63 };
64
65 reg_vcc3v3: vcc3v3 {
66 compatible = "regulator-fixed";
67 regulator-name = "vcc3v3";
68 regulator-min-microvolt = <3300000>;
69 regulator-max-microvolt = <3300000>;
70 };
71}; 64};
72 65
73&ehci0 { 66&ehci0 {
@@ -91,7 +84,7 @@
91&mmc0 { 84&mmc0 {
92 pinctrl-names = "default"; 85 pinctrl-names = "default";
93 pinctrl-0 = <&mmc0_pins>; 86 pinctrl-0 = <&mmc0_pins>;
94 vmmc-supply = <&reg_vcc3v3>; 87 vmmc-supply = <&reg_dcdc1>;
95 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; 88 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
96 cd-inverted; 89 cd-inverted;
97 disable-wp; 90 disable-wp;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 8263a8a504a8..f2aa2a81de4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -336,7 +336,7 @@
336 /* non-prefetchable memory */ 336 /* non-prefetchable memory */
337 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; 337 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
338 interrupt-map-mask = <0 0 0 0>; 338 interrupt-map-mask = <0 0 0 0>;
339 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 339 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
340 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 340 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
341 num-lanes = <1>; 341 num-lanes = <1>;
342 clocks = <&cpm_clk 1 13>; 342 clocks = <&cpm_clk 1 13>;
@@ -362,7 +362,7 @@
362 /* non-prefetchable memory */ 362 /* non-prefetchable memory */
363 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>; 363 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
364 interrupt-map-mask = <0 0 0 0>; 364 interrupt-map-mask = <0 0 0 0>;
365 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 365 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
366 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 366 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
367 367
368 num-lanes = <1>; 368 num-lanes = <1>;
@@ -389,7 +389,7 @@
389 /* non-prefetchable memory */ 389 /* non-prefetchable memory */
390 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>; 390 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
391 interrupt-map-mask = <0 0 0 0>; 391 interrupt-map-mask = <0 0 0 0>;
392 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 392 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
393 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 393 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
394 394
395 num-lanes = <1>; 395 num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index b71ee6c83668..4fe70323abb3 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -335,7 +335,7 @@
335 /* non-prefetchable memory */ 335 /* non-prefetchable memory */
336 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>; 336 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
337 interrupt-map-mask = <0 0 0 0>; 337 interrupt-map-mask = <0 0 0 0>;
338 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 338 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
339 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 339 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
340 num-lanes = <1>; 340 num-lanes = <1>;
341 clocks = <&cps_clk 1 13>; 341 clocks = <&cps_clk 1 13>;
@@ -361,7 +361,7 @@
361 /* non-prefetchable memory */ 361 /* non-prefetchable memory */
362 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>; 362 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
363 interrupt-map-mask = <0 0 0 0>; 363 interrupt-map-mask = <0 0 0 0>;
364 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 364 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
365 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 365 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
366 366
367 num-lanes = <1>; 367 num-lanes = <1>;
@@ -388,7 +388,7 @@
388 /* non-prefetchable memory */ 388 /* non-prefetchable memory */
389 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>; 389 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
390 interrupt-map-mask = <0 0 0 0>; 390 interrupt-map-mask = <0 0 0 0>;
391 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 391 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
392 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 392 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
393 393
394 num-lanes = <1>; 394 num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 4786c67b5e65..d9d885006a8e 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -62,6 +62,7 @@
62 brightness-levels = <256 128 64 16 8 4 0>; 62 brightness-levels = <256 128 64 16 8 4 0>;
63 default-brightness-level = <6>; 63 default-brightness-level = <6>;
64 64
65 power-supply = <&reg_12v>;
65 enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>; 66 enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
66 }; 67 };
67 68
@@ -83,6 +84,15 @@
83 regulator-always-on; 84 regulator-always-on;
84 }; 85 };
85 86
87 reg_12v: regulator2 {
88 compatible = "regulator-fixed";
89 regulator-name = "fixed-12V";
90 regulator-min-microvolt = <12000000>;
91 regulator-max-microvolt = <12000000>;
92 regulator-boot-on;
93 regulator-always-on;
94 };
95
86 rsnd_ak4613: sound { 96 rsnd_ak4613: sound {
87 compatible = "simple-audio-card"; 97 compatible = "simple-audio-card";
88 98
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 6d615cb6e64d..41d61840fb99 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -582,7 +582,7 @@
582 vop_mmu: iommu@ff373f00 { 582 vop_mmu: iommu@ff373f00 {
583 compatible = "rockchip,iommu"; 583 compatible = "rockchip,iommu";
584 reg = <0x0 0xff373f00 0x0 0x100>; 584 reg = <0x0 0xff373f00 0x0 0x100>;
585 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>; 585 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
586 interrupt-names = "vop_mmu"; 586 interrupt-names = "vop_mmu";
587 #iommu-cells = <0>; 587 #iommu-cells = <0>;
588 status = "disabled"; 588 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 19fbaa5e7bdd..1070c8264c13 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -740,7 +740,7 @@
740 iep_mmu: iommu@ff900800 { 740 iep_mmu: iommu@ff900800 {
741 compatible = "rockchip,iommu"; 741 compatible = "rockchip,iommu";
742 reg = <0x0 0xff900800 0x0 0x100>; 742 reg = <0x0 0xff900800 0x0 0x100>;
743 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; 743 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
744 interrupt-names = "iep_mmu"; 744 interrupt-names = "iep_mmu";
745 #iommu-cells = <0>; 745 #iommu-cells = <0>;
746 status = "disabled"; 746 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 7fd4bfcaa38e..fef82274a39d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -371,10 +371,10 @@
371 regulator-always-on; 371 regulator-always-on;
372 regulator-boot-on; 372 regulator-boot-on;
373 regulator-min-microvolt = <1800000>; 373 regulator-min-microvolt = <1800000>;
374 regulator-max-microvolt = <3300000>; 374 regulator-max-microvolt = <3000000>;
375 regulator-state-mem { 375 regulator-state-mem {
376 regulator-on-in-suspend; 376 regulator-on-in-suspend;
377 regulator-suspend-microvolt = <3300000>; 377 regulator-suspend-microvolt = <3000000>;
378 }; 378 };
379 }; 379 };
380 380
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 53ff3d191a1d..910628d18add 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -325,12 +325,12 @@
325 vcc_sd: LDO_REG4 { 325 vcc_sd: LDO_REG4 {
326 regulator-name = "vcc_sd"; 326 regulator-name = "vcc_sd";
327 regulator-min-microvolt = <1800000>; 327 regulator-min-microvolt = <1800000>;
328 regulator-max-microvolt = <3300000>; 328 regulator-max-microvolt = <3000000>;
329 regulator-always-on; 329 regulator-always-on;
330 regulator-boot-on; 330 regulator-boot-on;
331 regulator-state-mem { 331 regulator-state-mem {
332 regulator-on-in-suspend; 332 regulator-on-in-suspend;
333 regulator-suspend-microvolt = <3300000>; 333 regulator-suspend-microvolt = <3000000>;
334 }; 334 };
335 }; 335 };
336 336
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 6c30bb02210d..0f873c897d0d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -315,10 +315,10 @@
315 regulator-always-on; 315 regulator-always-on;
316 regulator-boot-on; 316 regulator-boot-on;
317 regulator-min-microvolt = <1800000>; 317 regulator-min-microvolt = <1800000>;
318 regulator-max-microvolt = <3300000>; 318 regulator-max-microvolt = <3000000>;
319 regulator-state-mem { 319 regulator-state-mem {
320 regulator-on-in-suspend; 320 regulator-on-in-suspend;
321 regulator-suspend-microvolt = <3300000>; 321 regulator-suspend-microvolt = <3000000>;
322 }; 322 };
323 }; 323 };
324 324
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 903f3bf48419..7e25c5cc353a 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
155 return __cmpxchg_small(ptr, old, new, size); 155 return __cmpxchg_small(ptr, old, new, size);
156 156
157 case 4: 157 case 4:
158 return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); 158 return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
159 (u32)old, new);
159 160
160 case 8: 161 case 8:
161 /* lld/scd are only available for MIPS64 */ 162 /* lld/scd are only available for MIPS64 */
162 if (!IS_ENABLED(CONFIG_64BIT)) 163 if (!IS_ENABLED(CONFIG_64BIT))
163 return __cmpxchg_called_with_bad_pointer(); 164 return __cmpxchg_called_with_bad_pointer();
164 165
165 return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); 166 return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
167 (u64)old, new);
166 168
167 default: 169 default:
168 return __cmpxchg_called_with_bad_pointer(); 170 return __cmpxchg_called_with_bad_pointer();
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 100f23dfa438..ac584c5823d0 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
183} 183}
184 184
185static struct plat_stmmacenet_data ls1x_eth0_pdata = { 185static struct plat_stmmacenet_data ls1x_eth0_pdata = {
186 .bus_id = 0, 186 .bus_id = 0,
187 .phy_addr = -1, 187 .phy_addr = -1,
188#if defined(CONFIG_LOONGSON1_LS1B) 188#if defined(CONFIG_LOONGSON1_LS1B)
189 .interface = PHY_INTERFACE_MODE_MII, 189 .interface = PHY_INTERFACE_MODE_MII,
190#elif defined(CONFIG_LOONGSON1_LS1C) 190#elif defined(CONFIG_LOONGSON1_LS1C)
191 .interface = PHY_INTERFACE_MODE_RMII, 191 .interface = PHY_INTERFACE_MODE_RMII,
192#endif 192#endif
193 .mdio_bus_data = &ls1x_mdio_bus_data, 193 .mdio_bus_data = &ls1x_mdio_bus_data,
194 .dma_cfg = &ls1x_eth_dma_cfg, 194 .dma_cfg = &ls1x_eth_dma_cfg,
195 .has_gmac = 1, 195 .has_gmac = 1,
196 .tx_coe = 1, 196 .tx_coe = 1,
197 .init = ls1x_eth_mux_init, 197 .rx_queues_to_use = 1,
198 .tx_queues_to_use = 1,
199 .init = ls1x_eth_mux_init,
198}; 200};
199 201
200static struct resource ls1x_eth0_resources[] = { 202static struct resource ls1x_eth0_resources[] = {
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
222 224
223#ifdef CONFIG_LOONGSON1_LS1B 225#ifdef CONFIG_LOONGSON1_LS1B
224static struct plat_stmmacenet_data ls1x_eth1_pdata = { 226static struct plat_stmmacenet_data ls1x_eth1_pdata = {
225 .bus_id = 1, 227 .bus_id = 1,
226 .phy_addr = -1, 228 .phy_addr = -1,
227 .interface = PHY_INTERFACE_MODE_MII, 229 .interface = PHY_INTERFACE_MODE_MII,
228 .mdio_bus_data = &ls1x_mdio_bus_data, 230 .mdio_bus_data = &ls1x_mdio_bus_data,
229 .dma_cfg = &ls1x_eth_dma_cfg, 231 .dma_cfg = &ls1x_eth_dma_cfg,
230 .has_gmac = 1, 232 .has_gmac = 1,
231 .tx_coe = 1, 233 .tx_coe = 1,
232 .init = ls1x_eth_mux_init, 234 .rx_queues_to_use = 1,
235 .tx_queues_to_use = 1,
236 .init = ls1x_eth_mux_init,
233}; 237};
234 238
235static struct resource ls1x_eth1_resources[] = { 239static struct resource ls1x_eth1_resources[] = {
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 192542dbd972..16d9ef5a78c5 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2558,7 +2558,6 @@ dcopuop:
2558 break; 2558 break;
2559 default: 2559 default:
2560 /* Reserved R6 ops */ 2560 /* Reserved R6 ops */
2561 pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
2562 return SIGILL; 2561 return SIGILL;
2563 } 2562 }
2564 } 2563 }
@@ -2719,7 +2718,6 @@ dcopuop:
2719 break; 2718 break;
2720 default: 2719 default:
2721 /* Reserved R6 ops */ 2720 /* Reserved R6 ops */
2722 pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
2723 return SIGILL; 2721 return SIGILL;
2724 } 2722 }
2725 } 2723 }
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 7646891c4e9b..01b7a87ea678 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
667{ 667{
668 int src, dst, r, td, ts, mem_off, b_off; 668 int src, dst, r, td, ts, mem_off, b_off;
669 bool need_swap, did_move, cmp_eq; 669 bool need_swap, did_move, cmp_eq;
670 unsigned int target; 670 unsigned int target = 0;
671 u64 t64; 671 u64 t64;
672 s64 t64s; 672 s64 t64s;
673 int bpf_op = BPF_OP(insn->code); 673 int bpf_op = BPF_OP(insn->code);
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh
index 5c4f93687039..654d652d7fa1 100755
--- a/arch/mips/tools/generic-board-config.sh
+++ b/arch/mips/tools/generic-board-config.sh
@@ -30,8 +30,6 @@ cfg="$4"
30boards_origin="$5" 30boards_origin="$5"
31shift 5 31shift 5
32 32
33cd "${srctree}"
34
35# Only print Skipping... lines if the user explicitly specified BOARDS=. In the 33# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
36# general case it only serves to obscure the useful output about what actually 34# general case it only serves to obscure the useful output about what actually
37# was included. 35# was included.
@@ -48,7 +46,7 @@ environment*)
48esac 46esac
49 47
50for board in $@; do 48for board in $@; do
51 board_cfg="arch/mips/configs/generic/board-${board}.config" 49 board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
52 if [ ! -f "${board_cfg}" ]; then 50 if [ ! -f "${board_cfg}" ]; then
53 echo "WARNING: Board config '${board_cfg}' not found" 51 echo "WARNING: Board config '${board_cfg}' not found"
54 continue 52 continue
@@ -84,7 +82,7 @@ for board in $@; do
84 done || continue 82 done || continue
85 83
86 # Merge this board config fragment into our final config file 84 # Merge this board config fragment into our final config file
87 ./scripts/kconfig/merge_config.sh \ 85 ${srctree}/scripts/kconfig/merge_config.sh \
88 -m -O ${objtree} ${cfg} ${board_cfg} \ 86 -m -O ${objtree} ${cfg} ${board_cfg} \
89 | grep -Ev '^(#|Using)' 87 | grep -Ev '^(#|Using)'
90done 88done
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index c6d6272a934f..7baa2265d439 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
35EXPORT_SYMBOL(__xchg8); 35EXPORT_SYMBOL(__xchg8);
36EXPORT_SYMBOL(__xchg32); 36EXPORT_SYMBOL(__xchg32);
37EXPORT_SYMBOL(__cmpxchg_u32); 37EXPORT_SYMBOL(__cmpxchg_u32);
38EXPORT_SYMBOL(__cmpxchg_u64);
38#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
39EXPORT_SYMBOL(__atomic_hash); 40EXPORT_SYMBOL(__atomic_hash);
40#endif 41#endif
41#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
42EXPORT_SYMBOL(__xchg64); 43EXPORT_SYMBOL(__xchg64);
43EXPORT_SYMBOL(__cmpxchg_u64);
44#endif 44#endif
45 45
46#include <linux/uaccess.h> 46#include <linux/uaccess.h>
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 23de307c3052..41e60a9c7db2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
74210: ldd 0(%r25), %r25 74210: ldd 0(%r25), %r25
74311: ldd 0(%r24), %r24 74311: ldd 0(%r24), %r24
744#else 744#else
745 /* Load new value into r22/r23 - high/low */ 745 /* Load old value into r22/r23 - high/low */
74610: ldw 0(%r25), %r22 74610: ldw 0(%r25), %r22
74711: ldw 4(%r25), %r23 74711: ldw 4(%r25), %r23
748 /* Load new value into fr4 for atomic store later */ 748 /* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
834 copy %r0, %r28 834 copy %r0, %r28
835#else 835#else
836 /* Compare first word */ 836 /* Compare first word */
83719: ldw,ma 0(%r26), %r29 83719: ldw 0(%r26), %r29
838 sub,= %r29, %r22, %r0 838 sub,= %r29, %r22, %r0
839 b,n cas2_end 839 b,n cas2_end
840 /* Compare second word */ 840 /* Compare second word */
84120: ldw,ma 4(%r26), %r29 84120: ldw 4(%r26), %r29
842 sub,= %r29, %r23, %r0 842 sub,= %r29, %r23, %r0
843 b,n cas2_end 843 b,n cas2_end
844 /* Perform the store */ 844 /* Perform the store */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 2d956aa0a38a..8c0105a49839 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
253 cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; 253 cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
254 254
255 for_each_online_cpu(cpu) { 255 for_each_online_cpu(cpu) {
256 if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc) 256 if (cpu == 0)
257 continue;
258 if ((cpu0_loc != 0) &&
259 (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
257 continue; 260 continue;
258 261
259 clocksource_cr16.name = "cr16_unstable"; 262 clocksource_cr16.name = "cr16_unstable";
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index c98e90b4ea7b..b4e2b7165f79 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
181 * - we have no stack frame and can not allocate one 181 * - we have no stack frame and can not allocate one
182 * - LR points back to the original caller (in A) 182 * - LR points back to the original caller (in A)
183 * - CTR holds the new NIP in C 183 * - CTR holds the new NIP in C
184 * - r0 & r12 are free 184 * - r0, r11 & r12 are free
185 *
186 * r0 can't be used as the base register for a DS-form load or store, so
187 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
188 */ 185 */
189livepatch_handler: 186livepatch_handler:
190 CURRENT_THREAD_INFO(r12, r1) 187 CURRENT_THREAD_INFO(r12, r1)
191 188
192 /* Save stack pointer into r0 */
193 mr r0, r1
194
195 /* Allocate 3 x 8 bytes */ 189 /* Allocate 3 x 8 bytes */
196 ld r1, TI_livepatch_sp(r12) 190 ld r11, TI_livepatch_sp(r12)
197 addi r1, r1, 24 191 addi r11, r11, 24
198 std r1, TI_livepatch_sp(r12) 192 std r11, TI_livepatch_sp(r12)
199 193
200 /* Save toc & real LR on livepatch stack */ 194 /* Save toc & real LR on livepatch stack */
201 std r2, -24(r1) 195 std r2, -24(r11)
202 mflr r12 196 mflr r12
203 std r12, -16(r1) 197 std r12, -16(r11)
204 198
205 /* Store stack end marker */ 199 /* Store stack end marker */
206 lis r12, STACK_END_MAGIC@h 200 lis r12, STACK_END_MAGIC@h
207 ori r12, r12, STACK_END_MAGIC@l 201 ori r12, r12, STACK_END_MAGIC@l
208 std r12, -8(r1) 202 std r12, -8(r11)
209
210 /* Restore real stack pointer */
211 mr r1, r0
212 203
213 /* Put ctr in r12 for global entry and branch there */ 204 /* Put ctr in r12 for global entry and branch there */
214 mfctr r12 205 mfctr r12
@@ -216,36 +207,30 @@ livepatch_handler:
216 207
217 /* 208 /*
218 * Now we are returning from the patched function to the original 209 * Now we are returning from the patched function to the original
219 * caller A. We are free to use r0 and r12, and we can use r2 until we 210 * caller A. We are free to use r11, r12 and we can use r2 until we
220 * restore it. 211 * restore it.
221 */ 212 */
222 213
223 CURRENT_THREAD_INFO(r12, r1) 214 CURRENT_THREAD_INFO(r12, r1)
224 215
225 /* Save stack pointer into r0 */ 216 ld r11, TI_livepatch_sp(r12)
226 mr r0, r1
227
228 ld r1, TI_livepatch_sp(r12)
229 217
230 /* Check stack marker hasn't been trashed */ 218 /* Check stack marker hasn't been trashed */
231 lis r2, STACK_END_MAGIC@h 219 lis r2, STACK_END_MAGIC@h
232 ori r2, r2, STACK_END_MAGIC@l 220 ori r2, r2, STACK_END_MAGIC@l
233 ld r12, -8(r1) 221 ld r12, -8(r11)
2341: tdne r12, r2 2221: tdne r12, r2
235 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 223 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
236 224
237 /* Restore LR & toc from livepatch stack */ 225 /* Restore LR & toc from livepatch stack */
238 ld r12, -16(r1) 226 ld r12, -16(r11)
239 mtlr r12 227 mtlr r12
240 ld r2, -24(r1) 228 ld r2, -24(r11)
241 229
242 /* Pop livepatch stack frame */ 230 /* Pop livepatch stack frame */
243 CURRENT_THREAD_INFO(r12, r0) 231 CURRENT_THREAD_INFO(r12, r1)
244 subi r1, r1, 24 232 subi r11, r11, 24
245 std r1, TI_livepatch_sp(r12) 233 std r11, TI_livepatch_sp(r12)
246
247 /* Restore real stack pointer */
248 mr r1, r0
249 234
250 /* Return to original caller of live patched function */ 235 /* Return to original caller of live patched function */
251 blr 236 blr
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5e8418c28bd8..f208f560aecd 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1684 * Logical instructions 1684 * Logical instructions
1685 */ 1685 */
1686 case 26: /* cntlzw */ 1686 case 26: /* cntlzw */
1687 op->val = __builtin_clz((unsigned int) regs->gpr[rd]); 1687 val = (unsigned int) regs->gpr[rd];
1688 op->val = ( val ? __builtin_clz(val) : 32 );
1688 goto logical_done; 1689 goto logical_done;
1689#ifdef __powerpc64__ 1690#ifdef __powerpc64__
1690 case 58: /* cntlzd */ 1691 case 58: /* cntlzd */
1691 op->val = __builtin_clzl(regs->gpr[rd]); 1692 val = regs->gpr[rd];
1693 op->val = ( val ? __builtin_clzl(val) : 64 );
1692 goto logical_done; 1694 goto logical_done;
1693#endif 1695#endif
1694 case 28: /* and */ 1696 case 28: /* and */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b95c584ce19d..a51df9ef529d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1438,7 +1438,6 @@ out:
1438 1438
1439int arch_update_cpu_topology(void) 1439int arch_update_cpu_topology(void)
1440{ 1440{
1441 lockdep_assert_cpus_held();
1442 return numa_update_cpu_topology(true); 1441 return numa_update_cpu_topology(true);
1443} 1442}
1444 1443
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 9ccac86f3463..88126245881b 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
399 399
400 /* Take the mutex lock for this node and then decrement the reference count */ 400 /* Take the mutex lock for this node and then decrement the reference count */
401 mutex_lock(&ref->lock); 401 mutex_lock(&ref->lock);
402 if (ref->refc == 0) {
403 /*
404 * The scenario where this is true is, when perf session is
405 * started, followed by offlining of all cpus in a given node.
406 *
407 * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
408 * function set the ref->count to zero, if the cpu which is
409 * about to offline is the last cpu in a given node and make
410 * an OPAL call to disable the engine in that node.
411 *
412 */
413 mutex_unlock(&ref->lock);
414 return;
415 }
402 ref->refc--; 416 ref->refc--;
403 if (ref->refc == 0) { 417 if (ref->refc == 0) {
404 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 418 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
523 537
524 /* We need only vbase for core counters */ 538 /* We need only vbase for core counters */
525 mem_info->vbase = page_address(alloc_pages_node(phys_id, 539 mem_info->vbase = page_address(alloc_pages_node(phys_id,
526 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 540 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
527 get_order(size))); 541 __GFP_NOWARN, get_order(size)));
528 if (!mem_info->vbase) 542 if (!mem_info->vbase)
529 return -ENOMEM; 543 return -ENOMEM;
530 544
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
646 return; 660 return;
647 661
648 mutex_lock(&ref->lock); 662 mutex_lock(&ref->lock);
663 if (ref->refc == 0) {
664 /*
665 * The scenario where this is true is, when perf session is
666 * started, followed by offlining of all cpus in a given core.
667 *
668 * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
669 * function set the ref->count to zero, if the cpu which is
670 * about to offline is the last cpu in a given core and make
671 * an OPAL call to disable the engine in that core.
672 *
673 */
674 mutex_unlock(&ref->lock);
675 return;
676 }
649 ref->refc--; 677 ref->refc--;
650 if (ref->refc == 0) { 678 if (ref->refc == 0) {
651 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 679 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
763 * free the memory in cpu offline path. 791 * free the memory in cpu offline path.
764 */ 792 */
765 local_mem = page_address(alloc_pages_node(phys_id, 793 local_mem = page_address(alloc_pages_node(phys_id,
766 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 794 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
767 get_order(size))); 795 __GFP_NOWARN, get_order(size)));
768 if (!local_mem) 796 if (!local_mem)
769 return -ENOMEM; 797 return -ENOMEM;
770 798
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1148 } 1176 }
1149 1177
1150 /* Only free the attr_groups which are dynamically allocated */ 1178 /* Only free the attr_groups which are dynamically allocated */
1151 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); 1179 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
1180 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
1152 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); 1181 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
1153 kfree(pmu_ptr); 1182 kfree(pmu_ptr);
1154 return; 1183 return;
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index afa46a7406ea..04e042edbab7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -27,6 +27,7 @@ CONFIG_NET=y
27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
28CONFIG_DEVTMPFS=y 28CONFIG_DEVTMPFS=y
29# CONFIG_FIRMWARE_IN_KERNEL is not set 29# CONFIG_FIRMWARE_IN_KERNEL is not set
30CONFIG_BLK_DEV_RAM=y
30# CONFIG_BLK_DEV_XPRAM is not set 31# CONFIG_BLK_DEV_XPRAM is not set
31# CONFIG_DCSSBLK is not set 32# CONFIG_DCSSBLK is not set
32# CONFIG_DASD is not set 33# CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
59# CONFIG_NETWORK_FILESYSTEMS is not set 60# CONFIG_NETWORK_FILESYSTEMS is not set
60CONFIG_PRINTK_TIME=y 61CONFIG_PRINTK_TIME=y
61CONFIG_DEBUG_INFO=y 62CONFIG_DEBUG_INFO=y
63CONFIG_DEBUG_FS=y
62CONFIG_DEBUG_KERNEL=y 64CONFIG_DEBUG_KERNEL=y
63CONFIG_PANIC_ON_OOPS=y 65CONFIG_PANIC_ON_OOPS=y
64# CONFIG_SCHED_DEBUG is not set 66# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1cee6753d47a..495ff6959dec 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
293 lc->lpp = LPP_MAGIC; 293 lc->lpp = LPP_MAGIC;
294 lc->current_pid = tsk->pid; 294 lc->current_pid = tsk->pid;
295 lc->user_timer = tsk->thread.user_timer; 295 lc->user_timer = tsk->thread.user_timer;
296 lc->guest_timer = tsk->thread.guest_timer;
296 lc->system_timer = tsk->thread.system_timer; 297 lc->system_timer = tsk->thread.system_timer;
298 lc->hardirq_timer = tsk->thread.hardirq_timer;
299 lc->softirq_timer = tsk->thread.softirq_timer;
297 lc->steal_timer = 0; 300 lc->steal_timer = 0;
298} 301}
299 302
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 8a13d468635a..50e0d2bc4528 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -176,7 +176,7 @@
176/* 176/*
177 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The 177 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
178 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding 178 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
179 * is just setting the LSB, which makes it an invalid stack address and is also 179 * is just clearing the MSB, which makes it an invalid stack address and is also
180 * a signal to the unwinder that it's a pt_regs pointer in disguise. 180 * a signal to the unwinder that it's a pt_regs pointer in disguise.
181 * 181 *
182 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the 182 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
@@ -185,7 +185,7 @@
185.macro ENCODE_FRAME_POINTER 185.macro ENCODE_FRAME_POINTER
186#ifdef CONFIG_FRAME_POINTER 186#ifdef CONFIG_FRAME_POINTER
187 mov %esp, %ebp 187 mov %esp, %ebp
188 orl $0x1, %ebp 188 andl $0x7fffffff, %ebp
189#endif 189#endif
190.endm 190.endm
191 191
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 1c5390f1cf09..d45e06346f14 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
822 pmus[i].type = type; 822 pmus[i].type = type;
823 pmus[i].boxes = kzalloc(size, GFP_KERNEL); 823 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
824 if (!pmus[i].boxes) 824 if (!pmus[i].boxes)
825 return -ENOMEM; 825 goto err;
826 } 826 }
827 827
828 type->pmus = pmus; 828 type->pmus = pmus;
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
836 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + 836 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
837 sizeof(*attr_group), GFP_KERNEL); 837 sizeof(*attr_group), GFP_KERNEL);
838 if (!attr_group) 838 if (!attr_group)
839 return -ENOMEM; 839 goto err;
840 840
841 attrs = (struct attribute **)(attr_group + 1); 841 attrs = (struct attribute **)(attr_group + 1);
842 attr_group->name = "events"; 842 attr_group->name = "events";
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
849 } 849 }
850 850
851 type->pmu_group = &uncore_pmu_attr_group; 851 type->pmu_group = &uncore_pmu_attr_group;
852
852 return 0; 853 return 0;
854
855err:
856 for (i = 0; i < type->num_boxes; i++)
857 kfree(pmus[i].boxes);
858 kfree(pmus);
859
860 return -ENOMEM;
853} 861}
854 862
855static int __init 863static int __init
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 1a8eb550c40f..a5db63f728a2 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
85u32 *hv_vp_index; 85u32 *hv_vp_index;
86EXPORT_SYMBOL_GPL(hv_vp_index); 86EXPORT_SYMBOL_GPL(hv_vp_index);
87 87
88u32 hv_max_vp_index;
89
88static int hv_cpu_init(unsigned int cpu) 90static int hv_cpu_init(unsigned int cpu)
89{ 91{
90 u64 msr_vp_index; 92 u64 msr_vp_index;
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
93 95
94 hv_vp_index[smp_processor_id()] = msr_vp_index; 96 hv_vp_index[smp_processor_id()] = msr_vp_index;
95 97
98 if (msr_vp_index > hv_max_vp_index)
99 hv_max_vp_index = msr_vp_index;
100
96 return 0; 101 return 0;
97} 102}
98 103
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 39e7f6e50919..9cc9e1c1e2db 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
36/* Each gva in gva_list encodes up to 4096 pages to flush */ 36/* Each gva in gva_list encodes up to 4096 pages to flush */
37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) 37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
38 38
39static struct hv_flush_pcpu __percpu *pcpu_flush; 39static struct hv_flush_pcpu __percpu **pcpu_flush;
40 40
41static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; 41static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
42 42
43/* 43/*
44 * Fills in gva_list starting from offset. Returns the number of items added. 44 * Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
76{ 76{
77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; 77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
78 78
79 /* valid_bank_mask can represent up to 64 banks */
80 if (hv_max_vp_index / 64 >= 64)
81 return 0;
82
83 /*
84 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
85 * structs are not cleared between calls, we risk flushing unneeded
86 * vCPUs otherwise.
87 */
88 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
89 flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
90
79 /* 91 /*
80 * Some banks may end up being empty but this is acceptable. 92 * Some banks may end up being empty but this is acceptable.
81 */ 93 */
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
83 vcpu = hv_cpu_number_to_vp_number(cpu); 95 vcpu = hv_cpu_number_to_vp_number(cpu);
84 vcpu_bank = vcpu / 64; 96 vcpu_bank = vcpu / 64;
85 vcpu_offset = vcpu % 64; 97 vcpu_offset = vcpu % 64;
86
87 /* valid_bank_mask can represent up to 64 banks */
88 if (vcpu_bank >= 64)
89 return 0;
90
91 __set_bit(vcpu_offset, (unsigned long *) 98 __set_bit(vcpu_offset, (unsigned long *)
92 &flush->hv_vp_set.bank_contents[vcpu_bank]); 99 &flush->hv_vp_set.bank_contents[vcpu_bank]);
93 if (vcpu_bank >= nr_bank) 100 if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
102 const struct flush_tlb_info *info) 109 const struct flush_tlb_info *info)
103{ 110{
104 int cpu, vcpu, gva_n, max_gvas; 111 int cpu, vcpu, gva_n, max_gvas;
112 struct hv_flush_pcpu **flush_pcpu;
105 struct hv_flush_pcpu *flush; 113 struct hv_flush_pcpu *flush;
106 u64 status = U64_MAX; 114 u64 status = U64_MAX;
107 unsigned long flags; 115 unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
116 124
117 local_irq_save(flags); 125 local_irq_save(flags);
118 126
119 flush = this_cpu_ptr(pcpu_flush); 127 flush_pcpu = this_cpu_ptr(pcpu_flush);
128
129 if (unlikely(!*flush_pcpu))
130 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
131
132 flush = *flush_pcpu;
133
134 if (unlikely(!flush)) {
135 local_irq_restore(flags);
136 goto do_native;
137 }
120 138
121 if (info->mm) { 139 if (info->mm) {
122 flush->address_space = virt_to_phys(info->mm->pgd); 140 flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
173 const struct flush_tlb_info *info) 191 const struct flush_tlb_info *info)
174{ 192{
175 int nr_bank = 0, max_gvas, gva_n; 193 int nr_bank = 0, max_gvas, gva_n;
194 struct hv_flush_pcpu_ex **flush_pcpu;
176 struct hv_flush_pcpu_ex *flush; 195 struct hv_flush_pcpu_ex *flush;
177 u64 status = U64_MAX; 196 u64 status = U64_MAX;
178 unsigned long flags; 197 unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
187 206
188 local_irq_save(flags); 207 local_irq_save(flags);
189 208
190 flush = this_cpu_ptr(pcpu_flush_ex); 209 flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
210
211 if (unlikely(!*flush_pcpu))
212 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
213
214 flush = *flush_pcpu;
215
216 if (unlikely(!flush)) {
217 local_irq_restore(flags);
218 goto do_native;
219 }
191 220
192 if (info->mm) { 221 if (info->mm) {
193 flush->address_space = virt_to_phys(info->mm->pgd); 222 flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
222 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; 251 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
223 status = hv_do_rep_hypercall( 252 status = hv_do_rep_hypercall(
224 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, 253 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
225 0, nr_bank + 2, flush, NULL); 254 0, nr_bank, flush, NULL);
226 } else if (info->end && 255 } else if (info->end &&
227 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { 256 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
228 status = hv_do_rep_hypercall( 257 status = hv_do_rep_hypercall(
229 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, 258 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
230 0, nr_bank + 2, flush, NULL); 259 0, nr_bank, flush, NULL);
231 } else { 260 } else {
232 gva_n = fill_gva_list(flush->gva_list, nr_bank, 261 gva_n = fill_gva_list(flush->gva_list, nr_bank,
233 info->start, info->end); 262 info->start, info->end);
234 status = hv_do_rep_hypercall( 263 status = hv_do_rep_hypercall(
235 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, 264 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
236 gva_n, nr_bank + 2, flush, NULL); 265 gva_n, nr_bank, flush, NULL);
237 } 266 }
238 267
239 local_irq_restore(flags); 268 local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
266 return; 295 return;
267 296
268 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) 297 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
269 pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 298 pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
270 else 299 else
271 pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 300 pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
272} 301}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index e7636bac7372..6c98821fef5e 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -62,8 +62,10 @@
62#define new_len2 145f-144f 62#define new_len2 145f-144f
63 63
64/* 64/*
65 * max without conditionals. Idea adapted from: 65 * gas compatible max based on the idea from:
66 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 66 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
67 *
68 * The additional "-" is needed because gas uses a "true" value of -1.
67 */ 69 */
68#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) 70#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
69 71
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index c096624137ae..ccbe24e697c4 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
103 alt_end_marker ":\n" 103 alt_end_marker ":\n"
104 104
105/* 105/*
106 * max without conditionals. Idea adapted from: 106 * gas compatible max based on the idea from:
107 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 107 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
108 * 108 *
109 * The additional "-" is needed because gas works with s32s. 109 * The additional "-" is needed because gas uses a "true" value of -1.
110 */ 110 */
111#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" 111#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
112 112
113/* 113/*
114 * Pad the second replacement alternative with additional NOPs if it is 114 * Pad the second replacement alternative with additional NOPs if it is
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 181264989db5..8edac1de2e35 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -187,7 +187,6 @@ struct mca_msr_regs {
187 187
188extern struct mce_vendor_flags mce_flags; 188extern struct mce_vendor_flags mce_flags;
189 189
190extern struct mca_config mca_cfg;
191extern struct mca_msr_regs msr_ops; 190extern struct mca_msr_regs msr_ops;
192 191
193enum mce_notifier_prios { 192enum mce_notifier_prios {
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index c120b5db178a..3c856a15b98e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
126 DEBUG_LOCKS_WARN_ON(preemptible()); 126 DEBUG_LOCKS_WARN_ON(preemptible());
127} 127}
128 128
129static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 129void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
130{
131 int cpu = smp_processor_id();
132
133 if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
134 cpumask_clear_cpu(cpu, mm_cpumask(mm));
135}
136 130
137static inline int init_new_context(struct task_struct *tsk, 131static inline int init_new_context(struct task_struct *tsk,
138 struct mm_struct *mm) 132 struct mm_struct *mm)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 738503e1f80c..530f448fddaf 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
289 * to this information. 289 * to this information.
290 */ 290 */
291extern u32 *hv_vp_index; 291extern u32 *hv_vp_index;
292extern u32 hv_max_vp_index;
292 293
293/** 294/**
294 * hv_cpu_number_to_vp_number() - Map CPU to VP. 295 * hv_cpu_number_to_vp_number() - Map CPU to VP.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4893abf7f74f..d362161d3291 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -83,6 +83,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
83#endif 83#endif
84 84
85/* 85/*
86 * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
87 * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
88 * it's false, then we immediately switch CR3 when entering a kernel thread.
89 */
90DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
91
92/*
86 * 6 because 6 should be plenty and struct tlb_state will fit in 93 * 6 because 6 should be plenty and struct tlb_state will fit in
87 * two cache lines. 94 * two cache lines.
88 */ 95 */
@@ -105,6 +112,23 @@ struct tlb_state {
105 u16 next_asid; 112 u16 next_asid;
106 113
107 /* 114 /*
115 * We can be in one of several states:
116 *
117 * - Actively using an mm. Our CPU's bit will be set in
118 * mm_cpumask(loaded_mm) and is_lazy == false;
119 *
120 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
121 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
122 *
123 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
124 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
125 * We're heuristically guessing that the CR3 load we
126 * skipped more than makes up for the overhead added by
127 * lazy mode.
128 */
129 bool is_lazy;
130
131 /*
108 * Access to this CR4 shadow and to H/W CR4 is protected by 132 * Access to this CR4 shadow and to H/W CR4 is protected by
109 * disabling interrupts when modifying either one. 133 * disabling interrupts when modifying either one.
110 */ 134 */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d705c769f77d..ff891772c9f8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
573 return ~0U; 573 return ~0U;
574} 574}
575 575
576static u32 skx_deadline_rev(void)
577{
578 switch (boot_cpu_data.x86_mask) {
579 case 0x03: return 0x01000136;
580 case 0x04: return 0x02000014;
581 }
582
583 return ~0U;
584}
585
576static const struct x86_cpu_id deadline_match[] = { 586static const struct x86_cpu_id deadline_match[] = {
577 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), 587 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
578 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), 588 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
579 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), 589 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
580 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014), 590 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
581 591
582 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), 592 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
583 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), 593 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
600 const struct x86_cpu_id *m; 610 const struct x86_cpu_id *m;
601 u32 rev; 611 u32 rev;
602 612
603 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) 613 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
614 boot_cpu_has(X86_FEATURE_HYPERVISOR))
604 return; 615 return;
605 616
606 m = x86_match_cpu(deadline_match); 617 m = x86_match_cpu(deadline_match);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 098530a93bb7..debb974fd17d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,3 +1,6 @@
1#ifndef __X86_MCE_INTERNAL_H__
2#define __X86_MCE_INTERNAL_H__
3
1#include <linux/device.h> 4#include <linux/device.h>
2#include <asm/mce.h> 5#include <asm/mce.h>
3 6
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
108static inline void mce_register_injector_chain(struct notifier_block *nb) { } 111static inline void mce_register_injector_chain(struct notifier_block *nb) { }
109static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } 112static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
110#endif 113#endif
114
115extern struct mca_config mca_cfg;
116
117#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 40e28ed77fbf..486f640b02ef 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -28,6 +28,8 @@
28#include <asm/msr.h> 28#include <asm/msr.h>
29#include <asm/trace/irq_vectors.h> 29#include <asm/trace/irq_vectors.h>
30 30
31#include "mce-internal.h"
32
31#define NR_BLOCKS 5 33#define NR_BLOCKS 5
32#define THRESHOLD_MAX 0xFFF 34#define THRESHOLD_MAX 0xFFF
33#define INT_TYPE_APIC 0x00020000 35#define INT_TYPE_APIC 0x00020000
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 86e8f0b2537b..c4fa4a85d4cb 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
122 bool *res = &dis_ucode_ldr; 122 bool *res = &dis_ucode_ldr;
123#endif 123#endif
124 124
125 if (!have_cpuid_p())
126 return *res;
127
128 /* 125 /*
129 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 126 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
130 * completely accurate as xen pv guests don't see that CPUID bit set but 127 * completely accurate as xen pv guests don't see that CPUID bit set but
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
166void __init load_ucode_bsp(void) 163void __init load_ucode_bsp(void)
167{ 164{
168 unsigned int cpuid_1_eax; 165 unsigned int cpuid_1_eax;
166 bool intel = true;
169 167
170 if (check_loader_disabled_bsp()) 168 if (!have_cpuid_p())
171 return; 169 return;
172 170
173 cpuid_1_eax = native_cpuid_eax(1); 171 cpuid_1_eax = native_cpuid_eax(1);
174 172
175 switch (x86_cpuid_vendor()) { 173 switch (x86_cpuid_vendor()) {
176 case X86_VENDOR_INTEL: 174 case X86_VENDOR_INTEL:
177 if (x86_family(cpuid_1_eax) >= 6) 175 if (x86_family(cpuid_1_eax) < 6)
178 load_ucode_intel_bsp(); 176 return;
179 break; 177 break;
178
180 case X86_VENDOR_AMD: 179 case X86_VENDOR_AMD:
181 if (x86_family(cpuid_1_eax) >= 0x10) 180 if (x86_family(cpuid_1_eax) < 0x10)
182 load_ucode_amd_bsp(cpuid_1_eax); 181 return;
182 intel = false;
183 break; 183 break;
184
184 default: 185 default:
185 break; 186 return;
186 } 187 }
188
189 if (check_loader_disabled_bsp())
190 return;
191
192 if (intel)
193 load_ucode_intel_bsp();
194 else
195 load_ucode_amd_bsp(cpuid_1_eax);
187} 196}
188 197
189static bool check_loader_disabled_ap(void) 198static bool check_loader_disabled_ap(void)
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index db2182d63ed0..3fc0f9a794cb 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -3,6 +3,15 @@
3 3
4/* Kprobes and Optprobes common header */ 4/* Kprobes and Optprobes common header */
5 5
6#include <asm/asm.h>
7
8#ifdef CONFIG_FRAME_POINTER
9# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
10 " mov %" _ASM_SP ", %" _ASM_BP "\n"
11#else
12# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
13#endif
14
6#ifdef CONFIG_X86_64 15#ifdef CONFIG_X86_64
7#define SAVE_REGS_STRING \ 16#define SAVE_REGS_STRING \
8 /* Skip cs, ip, orig_ax. */ \ 17 /* Skip cs, ip, orig_ax. */ \
@@ -17,7 +26,7 @@
17 " pushq %r10\n" \ 26 " pushq %r10\n" \
18 " pushq %r11\n" \ 27 " pushq %r11\n" \
19 " pushq %rbx\n" \ 28 " pushq %rbx\n" \
20 " pushq %rbp\n" \ 29 SAVE_RBP_STRING \
21 " pushq %r12\n" \ 30 " pushq %r12\n" \
22 " pushq %r13\n" \ 31 " pushq %r13\n" \
23 " pushq %r14\n" \ 32 " pushq %r14\n" \
@@ -48,7 +57,7 @@
48 " pushl %es\n" \ 57 " pushl %es\n" \
49 " pushl %ds\n" \ 58 " pushl %ds\n" \
50 " pushl %eax\n" \ 59 " pushl %eax\n" \
51 " pushl %ebp\n" \ 60 SAVE_RBP_STRING \
52 " pushl %edi\n" \ 61 " pushl %edi\n" \
53 " pushl %esi\n" \ 62 " pushl %esi\n" \
54 " pushl %edx\n" \ 63 " pushl %edx\n" \
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f0153714ddac..0742491cbb73 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1080 * raw stack chunk with redzones: 1080 * raw stack chunk with redzones:
1081 */ 1081 */
1082 __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); 1082 __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
1083 regs->flags &= ~X86_EFLAGS_IF;
1084 trace_hardirqs_off();
1085 regs->ip = (unsigned long)(jp->entry); 1083 regs->ip = (unsigned long)(jp->entry);
1086 1084
1087 /* 1085 /*
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 54180fa6f66f..add33f600531 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
105 load_cr3(initial_page_table); 105 load_cr3(initial_page_table);
106#else 106#else
107 write_cr3(real_mode_header->trampoline_pgd); 107 write_cr3(real_mode_header->trampoline_pgd);
108
109 /* Exiting long mode will fail if CR4.PCIDE is set. */
110 if (static_cpu_has(X86_FEATURE_PCID))
111 cr4_clear_bits(X86_CR4_PCIDE);
108#endif 112#endif
109 113
110 /* Jump to the identity-mapped low memory code */ 114 /* Jump to the identity-mapped low memory code */
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index d145a0b1f529..3dc26f95d46e 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
44 state->stack_info.type, state->stack_info.next_sp, 44 state->stack_info.type, state->stack_info.next_sp,
45 state->stack_mask, state->graph_idx); 45 state->stack_mask, state->graph_idx);
46 46
47 for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { 47 for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
48 sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
48 if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) 49 if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
49 break; 50 break;
50 51
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
174 * This determines if the frame pointer actually contains an encoded pointer to 175 * This determines if the frame pointer actually contains an encoded pointer to
175 * pt_regs on the stack. See ENCODE_FRAME_POINTER. 176 * pt_regs on the stack. See ENCODE_FRAME_POINTER.
176 */ 177 */
178#ifdef CONFIG_X86_64
177static struct pt_regs *decode_frame_pointer(unsigned long *bp) 179static struct pt_regs *decode_frame_pointer(unsigned long *bp)
178{ 180{
179 unsigned long regs = (unsigned long)bp; 181 unsigned long regs = (unsigned long)bp;
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
183 185
184 return (struct pt_regs *)(regs & ~0x1); 186 return (struct pt_regs *)(regs & ~0x1);
185} 187}
188#else
189static struct pt_regs *decode_frame_pointer(unsigned long *bp)
190{
191 unsigned long regs = (unsigned long)bp;
192
193 if (regs & 0x80000000)
194 return NULL;
195
196 return (struct pt_regs *)(regs | 0x80000000);
197}
198#endif
199
200#ifdef CONFIG_X86_32
201#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
202#else
203#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
204#endif
186 205
187static bool update_stack_state(struct unwind_state *state, 206static bool update_stack_state(struct unwind_state *state,
188 unsigned long *next_bp) 207 unsigned long *next_bp)
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
202 regs = decode_frame_pointer(next_bp); 221 regs = decode_frame_pointer(next_bp);
203 if (regs) { 222 if (regs) {
204 frame = (unsigned long *)regs; 223 frame = (unsigned long *)regs;
205 len = regs_size(regs); 224 len = KERNEL_REGS_SIZE;
206 state->got_irq = true; 225 state->got_irq = true;
207 } else { 226 } else {
208 frame = next_bp; 227 frame = next_bp;
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
226 frame < prev_frame_end) 245 frame < prev_frame_end)
227 return false; 246 return false;
228 247
248 /*
249 * On 32-bit with user mode regs, make sure the last two regs are safe
250 * to access:
251 */
252 if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
253 !on_stack(info, frame, len + 2*sizeof(long)))
254 return false;
255
229 /* Move state to the next frame: */ 256 /* Move state to the next frame: */
230 if (regs) { 257 if (regs) {
231 state->regs = regs; 258 state->regs = regs;
@@ -328,6 +355,13 @@ bad_address:
328 state->regs->sp < (unsigned long)task_pt_regs(state->task)) 355 state->regs->sp < (unsigned long)task_pt_regs(state->task))
329 goto the_end; 356 goto the_end;
330 357
358 /*
359 * There are some known frame pointer issues on 32-bit. Disable
360 * unwinder warnings on 32-bit until it gets objtool support.
361 */
362 if (IS_ENABLED(CONFIG_X86_32))
363 goto the_end;
364
331 if (state->regs) { 365 if (state->regs) {
332 printk_deferred_once(KERN_WARNING 366 printk_deferred_once(KERN_WARNING
333 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 367 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 106d4a029a8a..7a69cf053711 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
3974 unsigned level, unsigned gpte) 3974 unsigned level, unsigned gpte)
3975{ 3975{
3976 /* 3976 /*
3977 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3978 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3979 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3980 */
3981 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3982
3983 /*
3984 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. 3977 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3985 * If it is clear, there are no large pages at this level, so clear 3978 * If it is clear, there are no large pages at this level, so clear
3986 * PT_PAGE_SIZE_MASK in gpte if that is the case. 3979 * PT_PAGE_SIZE_MASK in gpte if that is the case.
3987 */ 3980 */
3988 gpte &= level - mmu->last_nonleaf_level; 3981 gpte &= level - mmu->last_nonleaf_level;
3989 3982
3983 /*
3984 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3985 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3986 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3987 */
3988 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3989
3990 return gpte & PT_PAGE_SIZE_MASK; 3990 return gpte & PT_PAGE_SIZE_MASK;
3991} 3991}
3992 3992
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4555 4555
4556 update_permission_bitmask(vcpu, context, true); 4556 update_permission_bitmask(vcpu, context, true);
4557 update_pkru_bitmask(vcpu, context, true); 4557 update_pkru_bitmask(vcpu, context, true);
4558 update_last_nonleaf_level(vcpu, context);
4558 reset_rsvds_bits_mask_ept(vcpu, context, execonly); 4559 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4559 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); 4560 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4560} 4561}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 86b68dc5a649..f18d1f8d332b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -334,10 +334,11 @@ retry_walk:
334 --walker->level; 334 --walker->level;
335 335
336 index = PT_INDEX(addr, walker->level); 336 index = PT_INDEX(addr, walker->level);
337
338 table_gfn = gpte_to_gfn(pte); 337 table_gfn = gpte_to_gfn(pte);
339 offset = index * sizeof(pt_element_t); 338 offset = index * sizeof(pt_element_t);
340 pte_gpa = gfn_to_gpa(table_gfn) + offset; 339 pte_gpa = gfn_to_gpa(table_gfn) + offset;
340
341 BUG_ON(walker->level < 1);
341 walker->table_gfn[walker->level - 1] = table_gfn; 342 walker->table_gfn[walker->level - 1] = table_gfn;
342 walker->pte_gpa[walker->level - 1] = pte_gpa; 343 walker->pte_gpa[walker->level - 1] = pte_gpa;
343 344
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a2b804e10c95..95a01609d7ee 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
11297 11297
11298 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 11298 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
11299 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 11299 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
11300 kvm_set_cr4(vcpu, vmcs12->host_cr4); 11300 vmx_set_cr4(vcpu, vmcs12->host_cr4);
11301 11301
11302 nested_ept_uninit_mmu_context(vcpu); 11302 nested_ept_uninit_mmu_context(vcpu);
11303 11303
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 72bf8c01c6e3..e1f095884386 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,12 @@
1# Kernel does not boot with instrumentation of tlb.c. 1# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
2KCOV_INSTRUMENT_tlb.o := n 2KCOV_INSTRUMENT_tlb.o := n
3KCOV_INSTRUMENT_mem_encrypt.o := n
4
5KASAN_SANITIZE_mem_encrypt.o := n
6
7ifdef CONFIG_FUNCTION_TRACER
8CFLAGS_REMOVE_mem_encrypt.o = -pg
9endif
3 10
4obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 11obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
5 pat.o pgtable.o physaddr.o setup_nx.o tlb.o 12 pat.o pgtable.o physaddr.o setup_nx.o tlb.o
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49d9778376d7..658bf0090565 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,6 +30,8 @@
30 30
31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
32 32
33DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
34
33static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, 35static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
34 u16 *new_asid, bool *need_flush) 36 u16 *new_asid, bool *need_flush)
35{ 37{
@@ -80,7 +82,7 @@ void leave_mm(int cpu)
80 return; 82 return;
81 83
82 /* Warn if we're not lazy. */ 84 /* Warn if we're not lazy. */
83 WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))); 85 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
84 86
85 switch_mm(NULL, &init_mm, NULL); 87 switch_mm(NULL, &init_mm, NULL);
86} 88}
@@ -142,45 +144,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
142 __flush_tlb_all(); 144 __flush_tlb_all();
143 } 145 }
144#endif 146#endif
147 this_cpu_write(cpu_tlbstate.is_lazy, false);
145 148
146 if (real_prev == next) { 149 if (real_prev == next) {
147 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != 150 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
148 next->context.ctx_id); 151 next->context.ctx_id);
149 152
150 if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
151 /*
152 * There's nothing to do: we weren't lazy, and we
153 * aren't changing our mm. We don't need to flush
154 * anything, nor do we need to update CR3, CR4, or
155 * LDTR.
156 */
157 return;
158 }
159
160 /* Resume remote flushes and then read tlb_gen. */
161 cpumask_set_cpu(cpu, mm_cpumask(next));
162 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
163
164 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
165 next_tlb_gen) {
166 /*
167 * Ideally, we'd have a flush_tlb() variant that
168 * takes the known CR3 value as input. This would
169 * be faster on Xen PV and on hypothetical CPUs
170 * on which INVPCID is fast.
171 */
172 this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
173 next_tlb_gen);
174 write_cr3(build_cr3(next, prev_asid));
175 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
176 TLB_FLUSH_ALL);
177 }
178
179 /* 153 /*
180 * We just exited lazy mode, which means that CR4 and/or LDTR 154 * We don't currently support having a real mm loaded without
181 * may be stale. (Changes to the required CR4 and LDTR states 155 * our cpu set in mm_cpumask(). We have all the bookkeeping
182 * are not reflected in tlb_gen.) 156 * in place to figure out whether we would need to flush
157 * if our cpu were cleared in mm_cpumask(), but we don't
158 * currently use it.
183 */ 159 */
160 if (WARN_ON_ONCE(real_prev != &init_mm &&
161 !cpumask_test_cpu(cpu, mm_cpumask(next))))
162 cpumask_set_cpu(cpu, mm_cpumask(next));
163
164 return;
184 } else { 165 } else {
185 u16 new_asid; 166 u16 new_asid;
186 bool need_flush; 167 bool need_flush;
@@ -199,10 +180,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
199 } 180 }
200 181
201 /* Stop remote flushes for the previous mm */ 182 /* Stop remote flushes for the previous mm */
202 if (cpumask_test_cpu(cpu, mm_cpumask(real_prev))) 183 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
203 cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); 184 real_prev != &init_mm);
204 185 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
205 VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
206 186
207 /* 187 /*
208 * Start remote flushes and then read tlb_gen. 188 * Start remote flushes and then read tlb_gen.
@@ -233,6 +213,37 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
233} 213}
234 214
235/* 215/*
216 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
217 * kernel thread or other context without an mm. Acceptable implementations
218 * include doing nothing whatsoever, switching to init_mm, or various clever
219 * lazy tricks to try to minimize TLB flushes.
220 *
221 * The scheduler reserves the right to call enter_lazy_tlb() several times
222 * in a row. It will notify us that we're going back to a real mm by
223 * calling switch_mm_irqs_off().
224 */
225void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
226{
227 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
228 return;
229
230 if (static_branch_unlikely(&tlb_use_lazy_mode)) {
231 /*
232 * There's a significant optimization that may be possible
233 * here. We have accurate enough TLB flush tracking that we
234 * don't need to maintain coherence of TLB per se when we're
235 * lazy. We do, however, need to maintain coherence of
236 * paging-structure caches. We could, in principle, leave our
237 * old mm loaded and only switch to init_mm when
238 * tlb_remove_page() happens.
239 */
240 this_cpu_write(cpu_tlbstate.is_lazy, true);
241 } else {
242 switch_mm(NULL, &init_mm, NULL);
243 }
244}
245
246/*
236 * Call this when reinitializing a CPU. It fixes the following potential 247 * Call this when reinitializing a CPU. It fixes the following potential
237 * problems: 248 * problems:
238 * 249 *
@@ -303,16 +314,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
303 /* This code cannot presently handle being reentered. */ 314 /* This code cannot presently handle being reentered. */
304 VM_WARN_ON(!irqs_disabled()); 315 VM_WARN_ON(!irqs_disabled());
305 316
317 if (unlikely(loaded_mm == &init_mm))
318 return;
319
306 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != 320 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
307 loaded_mm->context.ctx_id); 321 loaded_mm->context.ctx_id);
308 322
309 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) { 323 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
310 /* 324 /*
311 * We're in lazy mode -- don't flush. We can get here on 325 * We're in lazy mode. We need to at least flush our
312 * remote flushes due to races and on local flushes if a 326 * paging-structure cache to avoid speculatively reading
313 * kernel thread coincidentally flushes the mm it's lazily 327 * garbage into our TLB. Since switching to init_mm is barely
314 * still using. 328 * slower than a minimal flush, just switch to init_mm.
315 */ 329 */
330 switch_mm_irqs_off(NULL, &init_mm, NULL);
316 return; 331 return;
317 } 332 }
318 333
@@ -611,3 +626,57 @@ static int __init create_tlb_single_page_flush_ceiling(void)
611 return 0; 626 return 0;
612} 627}
613late_initcall(create_tlb_single_page_flush_ceiling); 628late_initcall(create_tlb_single_page_flush_ceiling);
629
630static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
631 size_t count, loff_t *ppos)
632{
633 char buf[2];
634
635 buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
636 buf[1] = '\n';
637
638 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
639}
640
641static ssize_t tlblazy_write_file(struct file *file,
642 const char __user *user_buf, size_t count, loff_t *ppos)
643{
644 bool val;
645
646 if (kstrtobool_from_user(user_buf, count, &val))
647 return -EINVAL;
648
649 if (val)
650 static_branch_enable(&tlb_use_lazy_mode);
651 else
652 static_branch_disable(&tlb_use_lazy_mode);
653
654 return count;
655}
656
657static const struct file_operations fops_tlblazy = {
658 .read = tlblazy_read_file,
659 .write = tlblazy_write_file,
660 .llseek = default_llseek,
661};
662
663static int __init init_tlb_use_lazy_mode(void)
664{
665 if (boot_cpu_has(X86_FEATURE_PCID)) {
666 /*
667 * Heuristic: with PCID on, switching to and from
668 * init_mm is reasonably fast, but remote flush IPIs
669 * as expensive as ever, so turn off lazy TLB mode.
670 *
671 * We can't do this in setup_pcid() because static keys
672 * haven't been initialized yet, and it would blow up
673 * badly.
674 */
675 static_branch_disable(&tlb_use_lazy_mode);
676 }
677
678 debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
679 arch_debugfs_dir, NULL, &fops_tlblazy);
680 return 0;
681}
682late_initcall(init_tlb_use_lazy_mode);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0e7ef69e8531..d669e9d89001 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
93 int rc; 93 int rc;
94 94
95 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, 95 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
96 "x86/xen/hvm_guest:prepare", 96 "x86/xen/guest:prepare",
97 cpu_up_prepare_cb, cpu_dead_cb); 97 cpu_up_prepare_cb, cpu_dead_cb);
98 if (rc >= 0) { 98 if (rc >= 0) {
99 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 99 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
100 "x86/xen/hvm_guest:online", 100 "x86/xen/guest:online",
101 xen_cpu_up_online, NULL); 101 xen_cpu_up_online, NULL);
102 if (rc < 0) 102 if (rc < 0)
103 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); 103 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
diff --git a/block/bio.c b/block/bio.c
index b38e962fa83e..101c2a9b5481 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1239 */ 1239 */
1240 bmd->is_our_pages = map_data ? 0 : 1; 1240 bmd->is_our_pages = map_data ? 0 : 1;
1241 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); 1241 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1242 iov_iter_init(&bmd->iter, iter->type, bmd->iov, 1242 bmd->iter = *iter;
1243 iter->nr_segs, iter->count); 1243 bmd->iter.iov = bmd->iov;
1244 1244
1245 ret = -ENOMEM; 1245 ret = -ENOMEM;
1246 bio = bio_kmalloc(gfp_mask, nr_pages); 1246 bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1331 int ret, offset; 1331 int ret, offset;
1332 struct iov_iter i; 1332 struct iov_iter i;
1333 struct iovec iov; 1333 struct iovec iov;
1334 struct bio_vec *bvec;
1334 1335
1335 iov_for_each(iov, i, *iter) { 1336 iov_for_each(iov, i, *iter) {
1336 unsigned long uaddr = (unsigned long) iov.iov_base; 1337 unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1375 ret = get_user_pages_fast(uaddr, local_nr_pages, 1376 ret = get_user_pages_fast(uaddr, local_nr_pages,
1376 (iter->type & WRITE) != WRITE, 1377 (iter->type & WRITE) != WRITE,
1377 &pages[cur_page]); 1378 &pages[cur_page]);
1378 if (ret < local_nr_pages) { 1379 if (unlikely(ret < local_nr_pages)) {
1380 for (j = cur_page; j < page_limit; j++) {
1381 if (!pages[j])
1382 break;
1383 put_page(pages[j]);
1384 }
1379 ret = -EFAULT; 1385 ret = -EFAULT;
1380 goto out_unmap; 1386 goto out_unmap;
1381 } 1387 }
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1383 offset = offset_in_page(uaddr); 1389 offset = offset_in_page(uaddr);
1384 for (j = cur_page; j < page_limit; j++) { 1390 for (j = cur_page; j < page_limit; j++) {
1385 unsigned int bytes = PAGE_SIZE - offset; 1391 unsigned int bytes = PAGE_SIZE - offset;
1392 unsigned short prev_bi_vcnt = bio->bi_vcnt;
1386 1393
1387 if (len <= 0) 1394 if (len <= 0)
1388 break; 1395 break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1397 bytes) 1404 bytes)
1398 break; 1405 break;
1399 1406
1407 /*
1408 * check if vector was merged with previous
1409 * drop page reference if needed
1410 */
1411 if (bio->bi_vcnt == prev_bi_vcnt)
1412 put_page(pages[j]);
1413
1400 len -= bytes; 1414 len -= bytes;
1401 offset = 0; 1415 offset = 0;
1402 } 1416 }
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1423 return bio; 1437 return bio;
1424 1438
1425 out_unmap: 1439 out_unmap:
1426 for (j = 0; j < nr_pages; j++) { 1440 bio_for_each_segment_all(bvec, bio, j) {
1427 if (!pages[j]) 1441 put_page(bvec->bv_page);
1428 break;
1429 put_page(pages[j]);
1430 } 1442 }
1431 out: 1443 out:
1432 kfree(pages); 1444 kfree(pages);
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index e4b0ed386bc8..39aecad286fe 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
57 char *req, *p; 57 char *req, *p;
58 int len; 58 int len;
59 59
60 BUG_ON(!id_0 && !id_1);
61
60 if (id_0) { 62 if (id_0) {
61 lookup = id_0->data; 63 lookup = id_0->data;
62 len = id_0->len; 64 len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
105 if (id_0 && id_1) { 107 if (id_0 && id_1) {
106 const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); 108 const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
107 109
108 if (!kids->id[0]) { 110 if (!kids->id[1]) {
109 pr_debug("First ID matches, but second is missing\n"); 111 pr_debug("First ID matches, but second is missing\n");
110 goto reject; 112 goto reject;
111 } 113 }
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd8649117..d140d8bb2c96 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
88 bool want = false; 88 bool want = false;
89 89
90 sinfo = msg->signed_infos; 90 sinfo = msg->signed_infos;
91 if (!sinfo)
92 goto inconsistent;
93
91 if (sinfo->authattrs) { 94 if (sinfo->authattrs) {
92 want = true; 95 want = true;
93 msg->have_authattrs = true; 96 msg->have_authattrs = true;
diff --git a/crypto/shash.c b/crypto/shash.c
index 5e31c8d776df..325a14da5827 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
41 int err; 41 int err;
42 42
43 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 43 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
44 buffer = kmalloc(absize, GFP_KERNEL); 44 buffer = kmalloc(absize, GFP_ATOMIC);
45 if (!buffer) 45 if (!buffer)
46 return -ENOMEM; 46 return -ENOMEM;
47 47
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
275 275
276int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) 276int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
277{ 277{
278 struct scatterlist *sg = req->src;
279 unsigned int offset = sg->offset;
280 unsigned int nbytes = req->nbytes; 278 unsigned int nbytes = req->nbytes;
279 struct scatterlist *sg;
280 unsigned int offset;
281 int err; 281 int err;
282 282
283 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 283 if (nbytes &&
284 (sg = req->src, offset = sg->offset,
285 nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
284 void *data; 286 void *data;
285 287
286 data = kmap_atomic(sg_page(sg)); 288 data = kmap_atomic(sg_page(sg));
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 4faa0fd53b0c..d5692e35fab1 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
426 426
427static int skcipher_walk_first(struct skcipher_walk *walk) 427static int skcipher_walk_first(struct skcipher_walk *walk)
428{ 428{
429 walk->nbytes = 0;
430
431 if (WARN_ON_ONCE(in_irq())) 429 if (WARN_ON_ONCE(in_irq()))
432 return -EDEADLK; 430 return -EDEADLK;
433 431
434 if (unlikely(!walk->total))
435 return 0;
436
437 walk->buffer = NULL; 432 walk->buffer = NULL;
438 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 433 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
439 int err = skcipher_copy_iv(walk); 434 int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
452{ 447{
453 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 448 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
454 449
450 walk->total = req->cryptlen;
451 walk->nbytes = 0;
452
453 if (unlikely(!walk->total))
454 return 0;
455
455 scatterwalk_start(&walk->in, req->src); 456 scatterwalk_start(&walk->in, req->src);
456 scatterwalk_start(&walk->out, req->dst); 457 scatterwalk_start(&walk->out, req->dst);
457 458
458 walk->total = req->cryptlen;
459 walk->iv = req->iv; 459 walk->iv = req->iv;
460 walk->oiv = req->iv; 460 walk->oiv = req->iv;
461 461
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
509 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 509 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
510 int err; 510 int err;
511 511
512 walk->nbytes = 0;
513
514 if (unlikely(!walk->total))
515 return 0;
516
512 walk->flags &= ~SKCIPHER_WALK_PHYS; 517 walk->flags &= ~SKCIPHER_WALK_PHYS;
513 518
514 scatterwalk_start(&walk->in, req->src); 519 scatterwalk_start(&walk->in, req->src);
diff --git a/crypto/xts.c b/crypto/xts.c
index d86c11a8c882..e31828ed0046 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
554 ctx->name[len - 1] = 0; 554 ctx->name[len - 1] = 0;
555 555
556 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 556 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
557 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) 557 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
558 return -ENAMETOOLONG; 558 err = -ENAMETOOLONG;
559 goto err_drop_spawn;
560 }
559 } else 561 } else
560 goto err_drop_spawn; 562 goto err_drop_spawn;
561 563
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3fb8ff513461..e26ea209b63e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
571 * } 571 * }
572 * } 572 * }
573 * 573 *
574 * Calling this function with index %2 return %-ENOENT and with index %3 574 * Calling this function with index %2 or index %3 return %-ENOENT. If the
575 * returns the last entry. If the property does not contain any more values 575 * property does not contain any more values %-ENOENT is returned. The NULL
576 * %-ENODATA is returned. The NULL entry must be single integer and 576 * entry must be single integer and preferably contain value %0.
577 * preferably contain value %0.
578 * 577 *
579 * Return: %0 on success, negative error code on failure. 578 * Return: %0 on success, negative error code on failure.
580 */ 579 */
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
590 589
591 data = acpi_device_data_of_node(fwnode); 590 data = acpi_device_data_of_node(fwnode);
592 if (!data) 591 if (!data)
593 return -EINVAL; 592 return -ENOENT;
594 593
595 ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); 594 ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
596 if (ret) 595 if (ret)
597 return ret; 596 return ret == -EINVAL ? -ENOENT : -EINVAL;
598 597
599 /* 598 /*
600 * The simplest case is when the value is a single reference. Just 599 * The simplest case is when the value is a single reference. Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
606 605
607 ret = acpi_bus_get_device(obj->reference.handle, &device); 606 ret = acpi_bus_get_device(obj->reference.handle, &device);
608 if (ret) 607 if (ret)
609 return ret; 608 return ret == -ENODEV ? -EINVAL : ret;
610 609
611 args->adev = device; 610 args->adev = device;
612 args->nargs = 0; 611 args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
622 * The index argument is then used to determine which reference 621 * The index argument is then used to determine which reference
623 * the caller wants (along with the arguments). 622 * the caller wants (along with the arguments).
624 */ 623 */
625 if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) 624 if (obj->type != ACPI_TYPE_PACKAGE)
626 return -EPROTO; 625 return -EINVAL;
626 if (index >= obj->package.count)
627 return -ENOENT;
627 628
628 element = obj->package.elements; 629 element = obj->package.elements;
629 end = element + obj->package.count; 630 end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
635 ret = acpi_bus_get_device(element->reference.handle, 636 ret = acpi_bus_get_device(element->reference.handle,
636 &device); 637 &device);
637 if (ret) 638 if (ret)
638 return -ENODEV; 639 return -EINVAL;
639 640
640 nargs = 0; 641 nargs = 0;
641 element++; 642 element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
649 else if (type == ACPI_TYPE_LOCAL_REFERENCE) 650 else if (type == ACPI_TYPE_LOCAL_REFERENCE)
650 break; 651 break;
651 else 652 else
652 return -EPROTO; 653 return -EINVAL;
653 } 654 }
654 655
655 if (nargs > MAX_ACPI_REFERENCE_ARGS) 656 if (nargs > MAX_ACPI_REFERENCE_ARGS)
656 return -EPROTO; 657 return -EINVAL;
657 658
658 if (idx == index) { 659 if (idx == index) {
659 args->adev = device; 660 args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
670 return -ENOENT; 671 return -ENOENT;
671 element++; 672 element++;
672 } else { 673 } else {
673 return -EPROTO; 674 return -EINVAL;
674 } 675 }
675 676
676 idx++; 677 idx++;
677 } 678 }
678 679
679 return -ENODATA; 680 return -ENOENT;
680} 681}
681EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); 682EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
682 683
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index ab34239a76ee..0621a95b8597 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
2582 return true; 2582 return true;
2583} 2583}
2584 2584
2585/**
2586 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2587 * @node: struct binder_node for which to get refs
2588 * @proc: returns @node->proc if valid
2589 * @error: if no @proc then returns BR_DEAD_REPLY
2590 *
2591 * User-space normally keeps the node alive when creating a transaction
2592 * since it has a reference to the target. The local strong ref keeps it
2593 * alive if the sending process dies before the target process processes
2594 * the transaction. If the source process is malicious or has a reference
2595 * counting bug, relying on the local strong ref can fail.
2596 *
2597 * Since user-space can cause the local strong ref to go away, we also take
2598 * a tmpref on the node to ensure it survives while we are constructing
2599 * the transaction. We also need a tmpref on the proc while we are
2600 * constructing the transaction, so we take that here as well.
2601 *
2602 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2603 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2604 * target proc has died, @error is set to BR_DEAD_REPLY
2605 */
2606static struct binder_node *binder_get_node_refs_for_txn(
2607 struct binder_node *node,
2608 struct binder_proc **procp,
2609 uint32_t *error)
2610{
2611 struct binder_node *target_node = NULL;
2612
2613 binder_node_inner_lock(node);
2614 if (node->proc) {
2615 target_node = node;
2616 binder_inc_node_nilocked(node, 1, 0, NULL);
2617 binder_inc_node_tmpref_ilocked(node);
2618 node->proc->tmp_ref++;
2619 *procp = node->proc;
2620 } else
2621 *error = BR_DEAD_REPLY;
2622 binder_node_inner_unlock(node);
2623
2624 return target_node;
2625}
2626
2585static void binder_transaction(struct binder_proc *proc, 2627static void binder_transaction(struct binder_proc *proc,
2586 struct binder_thread *thread, 2628 struct binder_thread *thread,
2587 struct binder_transaction_data *tr, int reply, 2629 struct binder_transaction_data *tr, int reply,
@@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
2685 ref = binder_get_ref_olocked(proc, tr->target.handle, 2727 ref = binder_get_ref_olocked(proc, tr->target.handle,
2686 true); 2728 true);
2687 if (ref) { 2729 if (ref) {
2688 binder_inc_node(ref->node, 1, 0, NULL); 2730 target_node = binder_get_node_refs_for_txn(
2689 target_node = ref->node; 2731 ref->node, &target_proc,
2690 } 2732 &return_error);
2691 binder_proc_unlock(proc); 2733 } else {
2692 if (target_node == NULL) {
2693 binder_user_error("%d:%d got transaction to invalid handle\n", 2734 binder_user_error("%d:%d got transaction to invalid handle\n",
2694 proc->pid, thread->pid); 2735 proc->pid, thread->pid);
2695 return_error = BR_FAILED_REPLY; 2736 return_error = BR_FAILED_REPLY;
2696 return_error_param = -EINVAL;
2697 return_error_line = __LINE__;
2698 goto err_invalid_target_handle;
2699 } 2737 }
2738 binder_proc_unlock(proc);
2700 } else { 2739 } else {
2701 mutex_lock(&context->context_mgr_node_lock); 2740 mutex_lock(&context->context_mgr_node_lock);
2702 target_node = context->binder_context_mgr_node; 2741 target_node = context->binder_context_mgr_node;
2703 if (target_node == NULL) { 2742 if (target_node)
2743 target_node = binder_get_node_refs_for_txn(
2744 target_node, &target_proc,
2745 &return_error);
2746 else
2704 return_error = BR_DEAD_REPLY; 2747 return_error = BR_DEAD_REPLY;
2705 mutex_unlock(&context->context_mgr_node_lock);
2706 return_error_line = __LINE__;
2707 goto err_no_context_mgr_node;
2708 }
2709 binder_inc_node(target_node, 1, 0, NULL);
2710 mutex_unlock(&context->context_mgr_node_lock); 2748 mutex_unlock(&context->context_mgr_node_lock);
2711 } 2749 }
2712 e->to_node = target_node->debug_id; 2750 if (!target_node) {
2713 binder_node_lock(target_node); 2751 /*
2714 target_proc = target_node->proc; 2752 * return_error is set above
2715 if (target_proc == NULL) { 2753 */
2716 binder_node_unlock(target_node); 2754 return_error_param = -EINVAL;
2717 return_error = BR_DEAD_REPLY;
2718 return_error_line = __LINE__; 2755 return_error_line = __LINE__;
2719 goto err_dead_binder; 2756 goto err_dead_binder;
2720 } 2757 }
2721 binder_inner_proc_lock(target_proc); 2758 e->to_node = target_node->debug_id;
2722 target_proc->tmp_ref++;
2723 binder_inner_proc_unlock(target_proc);
2724 binder_node_unlock(target_node);
2725 if (security_binder_transaction(proc->tsk, 2759 if (security_binder_transaction(proc->tsk,
2726 target_proc->tsk) < 0) { 2760 target_proc->tsk) < 0) {
2727 return_error = BR_FAILED_REPLY; 2761 return_error = BR_FAILED_REPLY;
@@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
3071 if (target_thread) 3105 if (target_thread)
3072 binder_thread_dec_tmpref(target_thread); 3106 binder_thread_dec_tmpref(target_thread);
3073 binder_proc_dec_tmpref(target_proc); 3107 binder_proc_dec_tmpref(target_proc);
3108 if (target_node)
3109 binder_dec_node_tmpref(target_node);
3074 /* 3110 /*
3075 * write barrier to synchronize with initialization 3111 * write barrier to synchronize with initialization
3076 * of log entry 3112 * of log entry
@@ -3090,6 +3126,8 @@ err_bad_parent:
3090err_copy_data_failed: 3126err_copy_data_failed:
3091 trace_binder_transaction_failed_buffer_release(t->buffer); 3127 trace_binder_transaction_failed_buffer_release(t->buffer);
3092 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3128 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3129 if (target_node)
3130 binder_dec_node_tmpref(target_node);
3093 target_node = NULL; 3131 target_node = NULL;
3094 t->buffer->transaction = NULL; 3132 t->buffer->transaction = NULL;
3095 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3133 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3104,13 +3142,14 @@ err_bad_call_stack:
3104err_empty_call_stack: 3142err_empty_call_stack:
3105err_dead_binder: 3143err_dead_binder:
3106err_invalid_target_handle: 3144err_invalid_target_handle:
3107err_no_context_mgr_node:
3108 if (target_thread) 3145 if (target_thread)
3109 binder_thread_dec_tmpref(target_thread); 3146 binder_thread_dec_tmpref(target_thread);
3110 if (target_proc) 3147 if (target_proc)
3111 binder_proc_dec_tmpref(target_proc); 3148 binder_proc_dec_tmpref(target_proc);
3112 if (target_node) 3149 if (target_node) {
3113 binder_dec_node(target_node, 1, 0); 3150 binder_dec_node(target_node, 1, 0);
3151 binder_dec_node_tmpref(target_node);
3152 }
3114 3153
3115 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3154 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3116 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3155 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3855902f2c5b..aae2402f3791 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
27 27
28static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) 28static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
29{ 29{
30 ssize_t n;
31 cpumask_var_t mask;
30 struct node *node_dev = to_node(dev); 32 struct node *node_dev = to_node(dev);
31 const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
32 33
33 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ 34 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
34 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); 35 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
35 36
36 return cpumap_print_to_pagebuf(list, buf, mask); 37 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
38 return 0;
39
40 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
41 n = cpumap_print_to_pagebuf(list, buf, mask);
42 free_cpumask_var(mask);
43
44 return n;
37} 45}
38 46
39static inline ssize_t node_read_cpumask(struct device *dev, 47static inline ssize_t node_read_cpumask(struct device *dev,
diff --git a/drivers/base/property.c b/drivers/base/property.c
index d0b65bbe7e15..7ed99c1b2a8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,6 +21,7 @@
21#include <linux/phy.h> 21#include <linux/phy.h>
22 22
23struct property_set { 23struct property_set {
24 struct device *dev;
24 struct fwnode_handle fwnode; 25 struct fwnode_handle fwnode;
25 const struct property_entry *properties; 26 const struct property_entry *properties;
26}; 27};
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
682 * Caller is responsible to call fwnode_handle_put() on the returned 683 * Caller is responsible to call fwnode_handle_put() on the returned
683 * args->fwnode pointer. 684 * args->fwnode pointer.
684 * 685 *
686 * Returns: %0 on success
687 * %-ENOENT when the index is out of bounds, the index has an empty
688 * reference or the property was not found
689 * %-EINVAL on parse error
685 */ 690 */
686int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, 691int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
687 const char *prop, const char *nargs_prop, 692 const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
891void device_remove_properties(struct device *dev) 896void device_remove_properties(struct device *dev)
892{ 897{
893 struct fwnode_handle *fwnode; 898 struct fwnode_handle *fwnode;
899 struct property_set *pset;
894 900
895 fwnode = dev_fwnode(dev); 901 fwnode = dev_fwnode(dev);
896 if (!fwnode) 902 if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
900 * the pset. If there is no real firmware node (ACPI/DT) primary 906 * the pset. If there is no real firmware node (ACPI/DT) primary
901 * will hold the pset. 907 * will hold the pset.
902 */ 908 */
903 if (is_pset_node(fwnode)) { 909 pset = to_pset_node(fwnode);
910 if (pset) {
904 set_primary_fwnode(dev, NULL); 911 set_primary_fwnode(dev, NULL);
905 pset_free_set(to_pset_node(fwnode));
906 } else { 912 } else {
907 fwnode = fwnode->secondary; 913 pset = to_pset_node(fwnode->secondary);
908 if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { 914 if (pset && dev == pset->dev)
909 set_secondary_fwnode(dev, NULL); 915 set_secondary_fwnode(dev, NULL);
910 pset_free_set(to_pset_node(fwnode));
911 }
912 } 916 }
917 if (pset && dev == pset->dev)
918 pset_free_set(pset);
913} 919}
914EXPORT_SYMBOL_GPL(device_remove_properties); 920EXPORT_SYMBOL_GPL(device_remove_properties);
915 921
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
938 944
939 p->fwnode.ops = &pset_fwnode_ops; 945 p->fwnode.ops = &pset_fwnode_ops;
940 set_secondary_fwnode(dev, &p->fwnode); 946 set_secondary_fwnode(dev, &p->fwnode);
947 p->dev = dev;
941 return 0; 948 return 0;
942} 949}
943EXPORT_SYMBOL_GPL(device_add_properties); 950EXPORT_SYMBOL_GPL(device_add_properties);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 883dfebd3014..baebbdfd74d5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
243 struct nbd_config *config = nbd->config; 243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize; 244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks; 245 config->bytesize = blocksize * nr_blocks;
246 nbd_size_update(nbd);
247} 246}
248 247
249static void nbd_complete_rq(struct request *req) 248static void nbd_complete_rq(struct request *req)
@@ -1094,6 +1093,7 @@ static int nbd_start_device(struct nbd_device *nbd)
1094 args->index = i; 1093 args->index = i;
1095 queue_work(recv_workqueue, &args->work); 1094 queue_work(recv_workqueue, &args->work);
1096 } 1095 }
1096 nbd_size_update(nbd);
1097 return error; 1097 return error;
1098} 1098}
1099 1099
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7cedb4295e9d..64d0fc17c174 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2604 return NULL; 2604 return NULL;
2605 *dma_handle = dma_map_single(dev, buf, s->size, dir); 2605 *dma_handle = dma_map_single(dev, buf, s->size, dir);
2606 if (dma_mapping_error(dev, *dma_handle)) { 2606 if (dma_mapping_error(dev, *dma_handle)) {
2607 kfree(buf); 2607 kmem_cache_free(s, buf);
2608 buf = NULL; 2608 buf = NULL;
2609 } 2609 }
2610 return buf; 2610 return buf;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f396903184..70db4d5638a6 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
720 if (mbus->hw_io_coherency) 720 if (mbus->hw_io_coherency)
721 w->mbus_attr |= ATTR_HW_COHERENCY; 721 w->mbus_attr |= ATTR_HW_COHERENCY;
722 w->base = base & DDR_BASE_CS_LOW_MASK; 722 w->base = base & DDR_BASE_CS_LOW_MASK;
723 w->size = (size | ~DDR_SIZE_MASK) + 1; 723 w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
724 } 724 }
725 } 725 }
726 mvebu_mbus_dram_info.num_cs = cs; 726 mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index d9fbbf01062b..0f9754e07719 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
349/* The crypto framework makes it hard to avoid this global. */ 349/* The crypto framework makes it hard to avoid this global. */
350static struct device *artpec6_crypto_dev; 350static struct device *artpec6_crypto_dev;
351 351
352static struct dentry *dbgfs_root;
353
354#ifdef CONFIG_FAULT_INJECTION 352#ifdef CONFIG_FAULT_INJECTION
355static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); 353static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
356static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); 354static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
2984 char *desc; 2982 char *desc;
2985}; 2983};
2986 2984
2985static struct dentry *dbgfs_root;
2986
2987static void artpec6_crypto_init_debugfs(void) 2987static void artpec6_crypto_init_debugfs(void)
2988{ 2988{
2989 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); 2989 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index b585ce54a802..4835dd4a9e50 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
553{ 553{
554 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); 554 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
555 struct scatterlist sg[1], *tsg; 555 struct scatterlist sg[1], *tsg;
556 int err = 0, len = 0, reg, ncp; 556 int err = 0, len = 0, reg, ncp = 0;
557 unsigned int i; 557 unsigned int i;
558 const u32 *buffer = (const u32 *)rctx->buffer; 558 u32 *buffer = (void *)rctx->buffer;
559 559
560 rctx->sg = hdev->req->src; 560 rctx->sg = hdev->req->src;
561 rctx->total = hdev->req->nbytes; 561 rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
620 reg |= HASH_CR_DMAA; 620 reg |= HASH_CR_DMAA;
621 stm32_hash_write(hdev, HASH_CR, reg); 621 stm32_hash_write(hdev, HASH_CR, reg);
622 622
623 for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) 623 if (ncp) {
624 stm32_hash_write(hdev, HASH_DIN, buffer[i]); 624 memset(buffer + ncp, 0,
625 625 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
626 stm32_hash_set_nblw(hdev, ncp); 626 writesl(hdev->io_base + HASH_DIN, buffer,
627 DIV_ROUND_UP(ncp, sizeof(u32)));
628 }
629 stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
627 reg = stm32_hash_read(hdev, HASH_STR); 630 reg = stm32_hash_read(hdev, HASH_STR);
628 reg |= HASH_STR_DCAL; 631 reg |= HASH_STR_DCAL;
629 stm32_hash_write(hdev, HASH_STR, reg); 632 stm32_hash_write(hdev, HASH_STR, reg);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 66fb40d0ebdb..03830634e141 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -383,7 +383,7 @@ err_put_fd:
383 return err; 383 return err;
384} 384}
385 385
386static void sync_fill_fence_info(struct dma_fence *fence, 386static int sync_fill_fence_info(struct dma_fence *fence,
387 struct sync_fence_info *info) 387 struct sync_fence_info *info)
388{ 388{
389 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), 389 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
399 test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? 399 test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
400 ktime_to_ns(fence->timestamp) : 400 ktime_to_ns(fence->timestamp) :
401 ktime_set(0, 0); 401 ktime_set(0, 0);
402
403 return info->status;
402} 404}
403 405
404static long sync_file_ioctl_fence_info(struct sync_file *sync_file, 406static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
424 * sync_fence_info and return the actual number of fences on 426 * sync_fence_info and return the actual number of fences on
425 * info->num_fences. 427 * info->num_fences.
426 */ 428 */
427 if (!info.num_fences) 429 if (!info.num_fences) {
430 info.status = dma_fence_is_signaled(sync_file->fence);
428 goto no_fences; 431 goto no_fences;
432 } else {
433 info.status = 1;
434 }
429 435
430 if (info.num_fences < num_fences) 436 if (info.num_fences < num_fences)
431 return -EINVAL; 437 return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
435 if (!fence_info) 441 if (!fence_info)
436 return -ENOMEM; 442 return -ENOMEM;
437 443
438 for (i = 0; i < num_fences; i++) 444 for (i = 0; i < num_fences; i++) {
439 sync_fill_fence_info(fences[i], &fence_info[i]); 445 int status = sync_fill_fence_info(fences[i], &fence_info[i]);
446 info.status = info.status <= 0 ? info.status : status;
447 }
440 448
441 if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, 449 if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
442 size)) { 450 size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
446 454
447no_fences: 455no_fences:
448 sync_file_get_name(sync_file, info.name, sizeof(info.name)); 456 sync_file_get_name(sync_file, info.name, sizeof(info.name));
449 info.status = dma_fence_is_signaled(sync_file->fence);
450 info.num_fences = num_fences; 457 info.num_fences = num_fences;
451 458
452 if (copy_to_user((void __user *)arg, &info, sizeof(info))) 459 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 32905d5606ac..55f9c62ee54b 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
212static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 212static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
213{ 213{
214 struct msgdma_sw_desc *desc; 214 struct msgdma_sw_desc *desc;
215 unsigned long flags;
215 216
216 spin_lock_bh(&mdev->lock); 217 spin_lock_irqsave(&mdev->lock, flags);
217 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
218 list_del(&desc->node); 219 list_del(&desc->node);
219 spin_unlock_bh(&mdev->lock); 220 spin_unlock_irqrestore(&mdev->lock, flags);
220 221
221 INIT_LIST_HEAD(&desc->tx_list); 222 INIT_LIST_HEAD(&desc->tx_list);
222 223
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
306 struct msgdma_device *mdev = to_mdev(tx->chan); 307 struct msgdma_device *mdev = to_mdev(tx->chan);
307 struct msgdma_sw_desc *new; 308 struct msgdma_sw_desc *new;
308 dma_cookie_t cookie; 309 dma_cookie_t cookie;
310 unsigned long flags;
309 311
310 new = tx_to_desc(tx); 312 new = tx_to_desc(tx);
311 spin_lock_bh(&mdev->lock); 313 spin_lock_irqsave(&mdev->lock, flags);
312 cookie = dma_cookie_assign(tx); 314 cookie = dma_cookie_assign(tx);
313 315
314 list_add_tail(&new->node, &mdev->pending_list); 316 list_add_tail(&new->node, &mdev->pending_list);
315 spin_unlock_bh(&mdev->lock); 317 spin_unlock_irqrestore(&mdev->lock, flags);
316 318
317 return cookie; 319 return cookie;
318} 320}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
336 struct msgdma_extended_desc *desc; 338 struct msgdma_extended_desc *desc;
337 size_t copy; 339 size_t copy;
338 u32 desc_cnt; 340 u32 desc_cnt;
341 unsigned long irqflags;
339 342
340 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 343 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
341 344
342 spin_lock_bh(&mdev->lock); 345 spin_lock_irqsave(&mdev->lock, irqflags);
343 if (desc_cnt > mdev->desc_free_cnt) { 346 if (desc_cnt > mdev->desc_free_cnt) {
344 spin_unlock_bh(&mdev->lock); 347 spin_unlock_irqrestore(&mdev->lock, irqflags);
345 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 348 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
346 return NULL; 349 return NULL;
347 } 350 }
348 mdev->desc_free_cnt -= desc_cnt; 351 mdev->desc_free_cnt -= desc_cnt;
349 spin_unlock_bh(&mdev->lock); 352 spin_unlock_irqrestore(&mdev->lock, irqflags);
350 353
351 do { 354 do {
352 /* Allocate and populate the descriptor */ 355 /* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
397 u32 desc_cnt = 0, i; 400 u32 desc_cnt = 0, i;
398 struct scatterlist *sg; 401 struct scatterlist *sg;
399 u32 stride; 402 u32 stride;
403 unsigned long irqflags;
400 404
401 for_each_sg(sgl, sg, sg_len, i) 405 for_each_sg(sgl, sg, sg_len, i)
402 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 406 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
403 407
404 spin_lock_bh(&mdev->lock); 408 spin_lock_irqsave(&mdev->lock, irqflags);
405 if (desc_cnt > mdev->desc_free_cnt) { 409 if (desc_cnt > mdev->desc_free_cnt) {
406 spin_unlock_bh(&mdev->lock); 410 spin_unlock_irqrestore(&mdev->lock, irqflags);
407 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 411 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
408 return NULL; 412 return NULL;
409 } 413 }
410 mdev->desc_free_cnt -= desc_cnt; 414 mdev->desc_free_cnt -= desc_cnt;
411 spin_unlock_bh(&mdev->lock); 415 spin_unlock_irqrestore(&mdev->lock, irqflags);
412 416
413 avail = sg_dma_len(sgl); 417 avail = sg_dma_len(sgl);
414 418
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
566static void msgdma_issue_pending(struct dma_chan *chan) 570static void msgdma_issue_pending(struct dma_chan *chan)
567{ 571{
568 struct msgdma_device *mdev = to_mdev(chan); 572 struct msgdma_device *mdev = to_mdev(chan);
573 unsigned long flags;
569 574
570 spin_lock_bh(&mdev->lock); 575 spin_lock_irqsave(&mdev->lock, flags);
571 msgdma_start_transfer(mdev); 576 msgdma_start_transfer(mdev);
572 spin_unlock_bh(&mdev->lock); 577 spin_unlock_irqrestore(&mdev->lock, flags);
573} 578}
574 579
575/** 580/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
634static void msgdma_free_chan_resources(struct dma_chan *dchan) 639static void msgdma_free_chan_resources(struct dma_chan *dchan)
635{ 640{
636 struct msgdma_device *mdev = to_mdev(dchan); 641 struct msgdma_device *mdev = to_mdev(dchan);
642 unsigned long flags;
637 643
638 spin_lock_bh(&mdev->lock); 644 spin_lock_irqsave(&mdev->lock, flags);
639 msgdma_free_descriptors(mdev); 645 msgdma_free_descriptors(mdev);
640 spin_unlock_bh(&mdev->lock); 646 spin_unlock_irqrestore(&mdev->lock, flags);
641 kfree(mdev->sw_desq); 647 kfree(mdev->sw_desq);
642} 648}
643 649
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
682 u32 count; 688 u32 count;
683 u32 __maybe_unused size; 689 u32 __maybe_unused size;
684 u32 __maybe_unused status; 690 u32 __maybe_unused status;
691 unsigned long flags;
685 692
686 spin_lock(&mdev->lock); 693 spin_lock_irqsave(&mdev->lock, flags);
687 694
688 /* Read number of responses that are available */ 695 /* Read number of responses that are available */
689 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); 696 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
698 * bits. So we need to just drop these values. 705 * bits. So we need to just drop these values.
699 */ 706 */
700 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); 707 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
701 status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); 708 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
702 709
703 msgdma_complete_descriptor(mdev); 710 msgdma_complete_descriptor(mdev);
704 msgdma_chan_desc_cleanup(mdev); 711 msgdma_chan_desc_cleanup(mdev);
705 } 712 }
706 713
707 spin_unlock(&mdev->lock); 714 spin_unlock_irqrestore(&mdev->lock, flags);
708} 715}
709 716
710/** 717/**
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3879f80a4815..a7ea20e7b8e9 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1143 struct edma_desc *edesc; 1143 struct edma_desc *edesc;
1144 struct device *dev = chan->device->dev; 1144 struct device *dev = chan->device->dev;
1145 struct edma_chan *echan = to_edma_chan(chan); 1145 struct edma_chan *echan = to_edma_chan(chan);
1146 unsigned int width, pset_len; 1146 unsigned int width, pset_len, array_size;
1147 1147
1148 if (unlikely(!echan || !len)) 1148 if (unlikely(!echan || !len))
1149 return NULL; 1149 return NULL;
1150 1150
1151 /* Align the array size (acnt block) with the transfer properties */
1152 switch (__ffs((src | dest | len))) {
1153 case 0:
1154 array_size = SZ_32K - 1;
1155 break;
1156 case 1:
1157 array_size = SZ_32K - 2;
1158 break;
1159 default:
1160 array_size = SZ_32K - 4;
1161 break;
1162 }
1163
1151 if (len < SZ_64K) { 1164 if (len < SZ_64K) {
1152 /* 1165 /*
1153 * Transfer size less than 64K can be handled with one paRAM 1166 * Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1169 * When the full_length is multibple of 32767 one slot can be 1182 * When the full_length is multibple of 32767 one slot can be
1170 * used to complete the transfer. 1183 * used to complete the transfer.
1171 */ 1184 */
1172 width = SZ_32K - 1; 1185 width = array_size;
1173 pset_len = rounddown(len, width); 1186 pset_len = rounddown(len, width);
1174 /* One slot is enough for lengths multiple of (SZ_32K -1) */ 1187 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1175 if (unlikely(pset_len == len)) 1188 if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1217 } 1230 }
1218 dest += pset_len; 1231 dest += pset_len;
1219 src += pset_len; 1232 src += pset_len;
1220 pset_len = width = len % (SZ_32K - 1); 1233 pset_len = width = len % array_size;
1221 1234
1222 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, 1235 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1223 width, pset_len, DMA_MEM_TO_MEM); 1236 width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2f65a8fde21d..f1d04b70ee67 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
262 mutex_lock(&xbar->mutex); 262 mutex_lock(&xbar->mutex);
263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse, 263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
264 xbar->dma_requests); 264 xbar->dma_requests);
265 mutex_unlock(&xbar->mutex);
266 if (map->xbar_out == xbar->dma_requests) { 265 if (map->xbar_out == xbar->dma_requests) {
266 mutex_unlock(&xbar->mutex);
267 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 267 dev_err(&pdev->dev, "Run out of free DMA requests\n");
268 kfree(map); 268 kfree(map);
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270 } 270 }
271 set_bit(map->xbar_out, xbar->dma_inuse); 271 set_bit(map->xbar_out, xbar->dma_inuse);
272 mutex_unlock(&xbar->mutex);
272 273
273 map->xbar_in = (u16)dma_spec->args[0]; 274 map->xbar_in = (u16)dma_spec->args[0];
274 275
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3388d54ba114..3f80f167ed56 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -453,7 +453,8 @@ config GPIO_TS4800
453config GPIO_THUNDERX 453config GPIO_THUNDERX
454 tristate "Cavium ThunderX/OCTEON-TX GPIO" 454 tristate "Cavium ThunderX/OCTEON-TX GPIO"
455 depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) 455 depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
456 depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY 456 depends on PCI_MSI
457 select IRQ_DOMAIN_HIERARCHY
457 select IRQ_FASTEOI_HIERARCHY_HANDLERS 458 select IRQ_FASTEOI_HIERARCHY_HANDLERS
458 help 459 help
459 Say yes here to support the on-chip GPIO lines on the ThunderX 460 Say yes here to support the on-chip GPIO lines on the ThunderX
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index dbf869fb63ce..3233b72b6828 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
518 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 518 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
519 irq_set_handler_locked(d, handle_level_irq); 519 irq_set_handler_locked(d, handle_level_irq);
520 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 520 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
521 irq_set_handler_locked(d, handle_edge_irq); 521 /*
522 * Edge IRQs are already cleared/acked in irq_handler and
523 * not need to be masked, as result handle_edge_irq()
524 * logic is excessed here and may cause lose of interrupts.
525 * So just use handle_simple_irq.
526 */
527 irq_set_handler_locked(d, handle_simple_irq);
522 528
523 return 0; 529 return 0;
524 530
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
678static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 684static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
679{ 685{
680 void __iomem *isr_reg = NULL; 686 void __iomem *isr_reg = NULL;
681 u32 isr; 687 u32 enabled, isr, level_mask;
682 unsigned int bit; 688 unsigned int bit;
683 struct gpio_bank *bank = gpiobank; 689 struct gpio_bank *bank = gpiobank;
684 unsigned long wa_lock_flags; 690 unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
691 pm_runtime_get_sync(bank->chip.parent); 697 pm_runtime_get_sync(bank->chip.parent);
692 698
693 while (1) { 699 while (1) {
694 u32 isr_saved, level_mask = 0;
695 u32 enabled;
696
697 raw_spin_lock_irqsave(&bank->lock, lock_flags); 700 raw_spin_lock_irqsave(&bank->lock, lock_flags);
698 701
699 enabled = omap_get_gpio_irqbank_mask(bank); 702 enabled = omap_get_gpio_irqbank_mask(bank);
700 isr_saved = isr = readl_relaxed(isr_reg) & enabled; 703 isr = readl_relaxed(isr_reg) & enabled;
701 704
702 if (bank->level_mask) 705 if (bank->level_mask)
703 level_mask = bank->level_mask & enabled; 706 level_mask = bank->level_mask & enabled;
707 else
708 level_mask = 0;
704 709
705 /* clear edge sensitive interrupts before handler(s) are 710 /* clear edge sensitive interrupts before handler(s) are
706 called so that we don't miss any interrupt occurred while 711 called so that we don't miss any interrupt occurred while
707 executing them */ 712 executing them */
708 omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); 713 if (isr & ~level_mask)
709 omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); 714 omap_clear_gpio_irqbank(bank, isr & ~level_mask);
710 omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
711 715
712 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 716 raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
713 717
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1010 1014
1011/*---------------------------------------------------------------------*/ 1015/*---------------------------------------------------------------------*/
1012 1016
1013static void __init omap_gpio_show_rev(struct gpio_bank *bank) 1017static void omap_gpio_show_rev(struct gpio_bank *bank)
1014{ 1018{
1015 static bool called; 1019 static bool called;
1016 u32 rev; 1020 u32 rev;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 4d2113530735..eb4528c87c0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
203 203
204 if (pin <= 255) { 204 if (pin <= 255) {
205 char ev_name[5]; 205 char ev_name[5];
206 sprintf(ev_name, "_%c%02X", 206 sprintf(ev_name, "_%c%02hhX",
207 agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', 207 agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
208 pin); 208 pin);
209 if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) 209 if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7ef6c28a34d9..bc746131987f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
834 placement.busy_placement = &placements; 834 placement.busy_placement = &placements;
835 placements.fpfn = 0; 835 placements.fpfn = 0;
836 placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; 836 placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
837 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 837 placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
838 838
839 r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); 839 r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
840 if (unlikely(r)) 840 if (unlikely(r))
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 97c94f9683fa..38cea6fb25a8 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity) 205 struct amd_sched_entity *entity)
206{ 206{
207 struct amd_sched_rq *rq = entity->rq; 207 struct amd_sched_rq *rq = entity->rq;
208 int r;
209 208
210 if (!amd_sched_entity_is_initialized(sched, entity)) 209 if (!amd_sched_entity_is_initialized(sched, entity))
211 return; 210 return;
211
212 /** 212 /**
213 * The client will not queue more IBs during this fini, consume existing 213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs or discard them on SIGKILL 214 * queued IBs
215 */ 215 */
216 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) 216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
217 r = -ERESTARTSYS;
218 else
219 r = wait_event_killable(sched->job_scheduled,
220 amd_sched_entity_is_idle(entity));
221 amd_sched_rq_remove_entity(rq, entity);
222 if (r) {
223 struct amd_sched_job *job;
224 217
225 /* Park the kernel for a moment to make sure it isn't processing 218 amd_sched_rq_remove_entity(rq, entity);
226 * our enity.
227 */
228 kthread_park(sched->thread);
229 kthread_unpark(sched->thread);
230 while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
231 sched->ops->free_job(job);
232
233 }
234 kfifo_free(&entity->job_queue); 219 kfifo_free(&entity->job_queue);
235} 220}
236 221
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4e53aae9a1fb..0028591f3f95 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2960,6 +2960,7 @@ out:
2960 drm_modeset_backoff(&ctx); 2960 drm_modeset_backoff(&ctx);
2961 } 2961 }
2962 2962
2963 drm_atomic_state_put(state);
2963 drm_modeset_drop_locks(&ctx); 2964 drm_modeset_drop_locks(&ctx);
2964 drm_modeset_acquire_fini(&ctx); 2965 drm_modeset_acquire_fini(&ctx);
2965 2966
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e651a58c18cf..82b72425a42f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
168static int exynos_drm_suspend(struct device *dev) 168static int exynos_drm_suspend(struct device *dev)
169{ 169{
170 struct drm_device *drm_dev = dev_get_drvdata(dev); 170 struct drm_device *drm_dev = dev_get_drvdata(dev);
171 struct exynos_drm_private *private = drm_dev->dev_private; 171 struct exynos_drm_private *private;
172 172
173 if (pm_runtime_suspended(dev) || !drm_dev) 173 if (pm_runtime_suspended(dev) || !drm_dev)
174 return 0; 174 return 0;
175 175
176 private = drm_dev->dev_private;
177
176 drm_kms_helper_poll_disable(drm_dev); 178 drm_kms_helper_poll_disable(drm_dev);
177 exynos_drm_fbdev_suspend(drm_dev); 179 exynos_drm_fbdev_suspend(drm_dev);
178 private->suspend_state = drm_atomic_helper_suspend(drm_dev); 180 private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
188static int exynos_drm_resume(struct device *dev) 190static int exynos_drm_resume(struct device *dev)
189{ 191{
190 struct drm_device *drm_dev = dev_get_drvdata(dev); 192 struct drm_device *drm_dev = dev_get_drvdata(dev);
191 struct exynos_drm_private *private = drm_dev->dev_private; 193 struct exynos_drm_private *private;
192 194
193 if (pm_runtime_suspended(dev) || !drm_dev) 195 if (pm_runtime_suspended(dev) || !drm_dev)
194 return 0; 196 return 0;
195 197
198 private = drm_dev->dev_private;
196 drm_atomic_helper_resume(drm_dev, private->suspend_state); 199 drm_atomic_helper_resume(drm_dev, private->suspend_state);
197 exynos_drm_fbdev_resume(drm_dev); 200 exynos_drm_fbdev_resume(drm_dev);
198 drm_kms_helper_poll_enable(drm_dev); 201 drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
427 430
428 kfree(drm->dev_private); 431 kfree(drm->dev_private);
429 drm->dev_private = NULL; 432 drm->dev_private = NULL;
433 dev_set_drvdata(dev, NULL);
430 434
431 drm_dev_unref(drm); 435 drm_dev_unref(drm);
432} 436}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 436377da41ba..03532dfc0cd5 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
308 308
309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
310{ 310{
311 struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
312 int ring_id;
313
314 kfree(vgpu->sched_data); 311 kfree(vgpu->sched_data);
315 vgpu->sched_data = NULL; 312 vgpu->sched_data = NULL;
316
317 spin_lock_bh(&scheduler->mmio_context_lock);
318 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
319 if (scheduler->engine_owner[ring_id] == vgpu) {
320 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
321 scheduler->engine_owner[ring_id] = NULL;
322 }
323 }
324 spin_unlock_bh(&scheduler->mmio_context_lock);
325} 313}
326 314
327static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 315static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
388{ 376{
389 struct intel_gvt_workload_scheduler *scheduler = 377 struct intel_gvt_workload_scheduler *scheduler =
390 &vgpu->gvt->scheduler; 378 &vgpu->gvt->scheduler;
379 int ring_id;
391 380
392 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); 381 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
393 382
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
401 scheduler->need_reschedule = true; 390 scheduler->need_reschedule = true;
402 scheduler->current_vgpu = NULL; 391 scheduler->current_vgpu = NULL;
403 } 392 }
393
394 spin_lock_bh(&scheduler->mmio_context_lock);
395 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
396 if (scheduler->engine_owner[ring_id] == vgpu) {
397 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
398 scheduler->engine_owner[ring_id] = NULL;
399 }
400 }
401 spin_unlock_bh(&scheduler->mmio_context_lock);
404} 402}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19404c96eeb1..32e857dc507c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2657 if (READ_ONCE(obj->mm.pages)) 2657 if (READ_ONCE(obj->mm.pages))
2658 return -ENODEV; 2658 return -ENODEV;
2659 2659
2660 if (obj->mm.madv != I915_MADV_WILLNEED)
2661 return -EFAULT;
2662
2660 /* Before the pages are instantiated the object is treated as being 2663 /* Before the pages are instantiated the object is treated as being
2661 * in the CPU domain. The pages will be clflushed as required before 2664 * in the CPU domain. The pages will be clflushed as required before
2662 * use, and we can freely write into the pages directly. If userspace 2665 * use, and we can freely write into the pages directly. If userspace
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3013 3016
3014static void nop_submit_request(struct drm_i915_gem_request *request) 3017static void nop_submit_request(struct drm_i915_gem_request *request)
3015{ 3018{
3019 unsigned long flags;
3020
3016 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); 3021 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
3017 dma_fence_set_error(&request->fence, -EIO); 3022 dma_fence_set_error(&request->fence, -EIO);
3018 i915_gem_request_submit(request); 3023
3024 spin_lock_irqsave(&request->engine->timeline->lock, flags);
3025 __i915_gem_request_submit(request);
3019 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3026 intel_engine_init_global_seqno(request->engine, request->global_seqno);
3027 spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3020} 3028}
3021 3029
3022static void engine_set_wedged(struct intel_engine_cs *engine) 3030static void engine_set_wedged(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4df039ef2ce3..e161d383b526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,21 +33,20 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static bool ggtt_is_idle(struct drm_i915_private *dev_priv) 36static bool ggtt_is_idle(struct drm_i915_private *i915)
37{ 37{
38 struct i915_ggtt *ggtt = &dev_priv->ggtt; 38 struct intel_engine_cs *engine;
39 struct intel_engine_cs *engine; 39 enum intel_engine_id id;
40 enum intel_engine_id id;
41 40
42 for_each_engine(engine, dev_priv, id) { 41 if (i915->gt.active_requests)
43 struct intel_timeline *tl; 42 return false;
44 43
45 tl = &ggtt->base.timeline.engine[engine->id]; 44 for_each_engine(engine, i915, id) {
46 if (i915_gem_active_isset(&tl->last_request)) 45 if (engine->last_retired_context != i915->kernel_context)
47 return false; 46 return false;
48 } 47 }
49 48
50 return true; 49 return true;
51} 50}
52 51
53static int ggtt_flush(struct drm_i915_private *i915) 52static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
157 min_size, alignment, cache_level, 156 min_size, alignment, cache_level,
158 start, end, mode); 157 start, end, mode);
159 158
160 /* Retire before we search the active list. Although we have 159 /*
160 * Retire before we search the active list. Although we have
161 * reasonable accuracy in our retirement lists, we may have 161 * reasonable accuracy in our retirement lists, we may have
162 * a stray pin (preventing eviction) that can only be resolved by 162 * a stray pin (preventing eviction) that can only be resolved by
163 * retiring. 163 * retiring.
@@ -182,7 +182,8 @@ search_again:
182 BUG_ON(ret); 182 BUG_ON(ret);
183 } 183 }
184 184
185 /* Can we unpin some objects such as idle hw contents, 185 /*
186 * Can we unpin some objects such as idle hw contents,
186 * or pending flips? But since only the GGTT has global entries 187 * or pending flips? But since only the GGTT has global entries
187 * such as scanouts, rinbuffers and contexts, we can skip the 188 * such as scanouts, rinbuffers and contexts, we can skip the
188 * purge when inspecting per-process local address spaces. 189 * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
190 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 191 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
191 return -ENOSPC; 192 return -ENOSPC;
192 193
193 if (ggtt_is_idle(dev_priv)) { 194 /*
194 /* If we still have pending pageflip completions, drop 195 * Not everything in the GGTT is tracked via VMA using
195 * back to userspace to give our workqueues time to 196 * i915_vma_move_to_active(), otherwise we could evict as required
196 * acquire our locks and unpin the old scanouts. 197 * with minimal stalling. Instead we are forced to idle the GPU and
197 */ 198 * explicitly retire outstanding requests which will then remove
198 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; 199 * the pinning for active objects such as contexts and ring,
199 } 200 * enabling us to evict them on the next iteration.
201 *
202 * To ensure that all user contexts are evictable, we perform
203 * a switch to the perma-pinned kernel context. This all also gives
204 * us a termination condition, when the last retired context is
205 * the kernel's there is no more we can evict.
206 */
207 if (!ggtt_is_idle(dev_priv)) {
208 ret = ggtt_flush(dev_priv);
209 if (ret)
210 return ret;
200 211
201 ret = ggtt_flush(dev_priv); 212 goto search_again;
202 if (ret) 213 }
203 return ret;
204 214
205 goto search_again; 215 /*
216 * If we still have pending pageflip completions, drop
217 * back to userspace to give our workqueues time to
218 * acquire our locks and unpin the old scanouts.
219 */
220 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
206 221
207found: 222found:
208 /* drm_mm doesn't allow any other other operations while 223 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ed7cd9ee2c2a..c9bcc6c45012 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6998,6 +6998,7 @@ enum {
6998 */ 6998 */
6999#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) 6999#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
7000#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) 7000#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
7001#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
7001 7002
7002#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 7003#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
7003#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 7004#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 00c6aee0a9a1..5d4cd3d00564 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
1240{ 1240{
1241 enum port port; 1241 enum port port;
1242 1242
1243 if (!HAS_DDI(dev_priv)) 1243 if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1244 return; 1244 return;
1245 1245
1246 if (!dev_priv->vbt.child_dev_num) 1246 if (!dev_priv->vbt.child_dev_num)
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index ff9ecd211abb..b8315bca852b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,7 +74,7 @@
74#define I9XX_CSC_COEFF_1_0 \ 74#define I9XX_CSC_COEFF_1_0 \
75 ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) 75 ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
76 76
77static bool crtc_state_is_legacy(struct drm_crtc_state *state) 77static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
78{ 78{
79 return !state->degamma_lut && 79 return !state->degamma_lut &&
80 !state->ctm && 80 !state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
288 } 288 }
289 289
290 mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); 290 mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
291 if (!crtc_state_is_legacy(state)) { 291 if (!crtc_state_is_legacy_gamma(state)) {
292 mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | 292 mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
293 (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); 293 (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
294 } 294 }
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
469 struct intel_crtc_state *intel_state = to_intel_crtc_state(state); 469 struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
470 enum pipe pipe = to_intel_crtc(state->crtc)->pipe; 470 enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
471 471
472 if (crtc_state_is_legacy(state)) { 472 if (crtc_state_is_legacy_gamma(state)) {
473 haswell_load_luts(state); 473 haswell_load_luts(state);
474 return; 474 return;
475 } 475 }
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
529 529
530 glk_load_degamma_lut(state); 530 glk_load_degamma_lut(state);
531 531
532 if (crtc_state_is_legacy(state)) { 532 if (crtc_state_is_legacy_gamma(state)) {
533 haswell_load_luts(state); 533 haswell_load_luts(state);
534 return; 534 return;
535 } 535 }
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
551 uint32_t i, lut_size; 551 uint32_t i, lut_size;
552 uint32_t word0, word1; 552 uint32_t word0, word1;
553 553
554 if (crtc_state_is_legacy(state)) { 554 if (crtc_state_is_legacy_gamma(state)) {
555 /* Turn off degamma/gamma on CGM block. */ 555 /* Turn off degamma/gamma on CGM block. */
556 I915_WRITE(CGM_PIPE_MODE(pipe), 556 I915_WRITE(CGM_PIPE_MODE(pipe),
557 (state->ctm ? CGM_PIPE_MODE_CSC : 0)); 557 (state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
632 return 0; 632 return 0;
633 633
634 /* 634 /*
635 * We also allow no degamma lut and a gamma lut at the legacy 635 * We also allow no degamma lut/ctm and a gamma lut at the legacy
636 * size (256 entries). 636 * size (256 entries).
637 */ 637 */
638 if (!crtc_state->degamma_lut && 638 if (crtc_state_is_legacy_gamma(crtc_state))
639 crtc_state->gamma_lut &&
640 crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
641 return 0; 639 return 0;
642 640
643 return -EINVAL; 641 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 476681d5940c..5e5fe03b638c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
664 int *n_entries) 664 int *n_entries)
665{ 665{
666 if (IS_BROADWELL(dev_priv)) { 666 if (IS_BROADWELL(dev_priv)) {
667 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); 667 *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
668 return hsw_ddi_translations_fdi; 668 return bdw_ddi_translations_fdi;
669 } else if (IS_HASWELL(dev_priv)) { 669 } else if (IS_HASWELL(dev_priv)) {
670 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); 670 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
671 return hsw_ddi_translations_fdi; 671 return hsw_ddi_translations_fdi;
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2102 * register writes. 2102 * register writes.
2103 */ 2103 */
2104 val = I915_READ(DPCLKA_CFGCR0); 2104 val = I915_READ(DPCLKA_CFGCR0);
2105 val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | 2105 val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
2106 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
2107 I915_WRITE(DPCLKA_CFGCR0, val); 2106 I915_WRITE(DPCLKA_CFGCR0, val);
2108 } else if (IS_GEN9_BC(dev_priv)) { 2107 } else if (IS_GEN9_BC(dev_priv)) {
2109 /* DDI -> PLL mapping */ 2108 /* DDI -> PLL mapping */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64f7b51ed97c..5c7828c52d12 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10245{ 10245{
10246 struct drm_i915_private *dev_priv = to_i915(dev); 10246 struct drm_i915_private *dev_priv = to_i915(dev);
10247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10248 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10248 enum transcoder cpu_transcoder;
10249 struct drm_display_mode *mode; 10249 struct drm_display_mode *mode;
10250 struct intel_crtc_state *pipe_config; 10250 struct intel_crtc_state *pipe_config;
10251 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10251 u32 htot, hsync, vtot, vsync;
10252 int hsync = I915_READ(HSYNC(cpu_transcoder));
10253 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10254 int vsync = I915_READ(VSYNC(cpu_transcoder));
10255 enum pipe pipe = intel_crtc->pipe; 10252 enum pipe pipe = intel_crtc->pipe;
10256 10253
10257 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10254 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10279 i9xx_crtc_clock_get(intel_crtc, pipe_config); 10276 i9xx_crtc_clock_get(intel_crtc, pipe_config);
10280 10277
10281 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 10278 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10279
10280 cpu_transcoder = pipe_config->cpu_transcoder;
10281 htot = I915_READ(HTOTAL(cpu_transcoder));
10282 hsync = I915_READ(HSYNC(cpu_transcoder));
10283 vtot = I915_READ(VTOTAL(cpu_transcoder));
10284 vsync = I915_READ(VSYNC(cpu_transcoder));
10285
10282 mode->hdisplay = (htot & 0xffff) + 1; 10286 mode->hdisplay = (htot & 0xffff) + 1;
10283 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10287 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10284 mode->hsync_start = (hsync & 0xffff) + 1; 10288 mode->hsync_start = (hsync & 0xffff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 64134947c0aa..203198659ab2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2307 I915_WRITE(pp_ctrl_reg, pp); 2307 I915_WRITE(pp_ctrl_reg, pp);
2308 POSTING_READ(pp_ctrl_reg); 2308 POSTING_READ(pp_ctrl_reg);
2309 2309
2310 intel_dp->panel_power_off_time = ktime_get_boottime();
2311 wait_panel_off(intel_dp); 2310 wait_panel_off(intel_dp);
2311 intel_dp->panel_power_off_time = ktime_get_boottime();
2312 2312
2313 /* We got a reference when we enabled the VDD. */ 2313 /* We got a reference when we enabled the VDD. */
2314 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2314 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5273 * seems sufficient to avoid this problem. 5273 * seems sufficient to avoid this problem.
5274 */ 5274 */
5275 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 5275 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
5276 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); 5276 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
5277 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", 5277 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
5278 vbt.t11_t12); 5278 vbt.t11_t12);
5279 } 5279 }
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2a3d93d67bd..df808a94c511 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1996 1996
1997 /* 3. Configure DPLL_CFGCR0 */ 1997 /* 3. Configure DPLL_CFGCR0 */
1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */ 1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1999 if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { 1999 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2000 val = pll->state.hw_state.cfgcr1; 2000 val = pll->state.hw_state.cfgcr1;
2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); 2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
2002 /* 4. Reab back to ensure writes completed */ 2002 /* 4. Reab back to ensure writes completed */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 9ab596941372..3c2d9cf22ed5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1048 } 1048 }
1049 1049
1050 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1050 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1051 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1051 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1052 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1052 u32 val = I915_READ(GEN8_L3SQCREG1);
1053 L3_HIGH_PRIO_CREDITS(2)); 1053 val &= ~L3_PRIO_CREDITS_MASK;
1054 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1055 I915_WRITE(GEN8_L3SQCREG1, val);
1056 }
1054 1057
1055 /* WaToEnableHwFixForPushConstHWBug:bxt */ 1058 /* WaToEnableHwFixForPushConstHWBug:bxt */
1056 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1059 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ed662937ec3c..0a09f8ff6aff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
8245 int high_prio_credits) 8245 int high_prio_credits)
8246{ 8246{
8247 u32 misccpctl; 8247 u32 misccpctl;
8248 u32 val;
8248 8249
8249 /* WaTempDisableDOPClkGating:bdw */ 8250 /* WaTempDisableDOPClkGating:bdw */
8250 misccpctl = I915_READ(GEN7_MISCCPCTL); 8251 misccpctl = I915_READ(GEN7_MISCCPCTL);
8251 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 8252 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8252 8253
8253 I915_WRITE(GEN8_L3SQCREG1, 8254 val = I915_READ(GEN8_L3SQCREG1);
8254 L3_GENERAL_PRIO_CREDITS(general_prio_credits) | 8255 val &= ~L3_PRIO_CREDITS_MASK;
8255 L3_HIGH_PRIO_CREDITS(high_prio_credits)); 8256 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
8257 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
8258 I915_WRITE(GEN8_L3SQCREG1, val);
8256 8259
8257 /* 8260 /*
8258 * Wait at least 100 clocks before re-enabling clock gating. 8261 * Wait at least 100 clocks before re-enabling clock gating.
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b3a087cb0860..49577eba8e7e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
368{ 368{
369 enum i915_power_well_id id = power_well->id; 369 enum i915_power_well_id id = power_well->id;
370 bool wait_fuses = power_well->hsw.has_fuses; 370 bool wait_fuses = power_well->hsw.has_fuses;
371 enum skl_power_gate pg; 371 enum skl_power_gate uninitialized_var(pg);
372 u32 val; 372 u32 val;
373 373
374 if (wait_fuses) { 374 if (wait_fuses) {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index dbb31a014419..deaf869374ea 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -248,7 +248,7 @@ disable_clks:
248 clk_disable_unprepare(ahb_clk); 248 clk_disable_unprepare(ahb_clk);
249disable_gdsc: 249disable_gdsc:
250 regulator_disable(gdsc_reg); 250 regulator_disable(gdsc_reg);
251 pm_runtime_put_autosuspend(dev); 251 pm_runtime_put_sync(dev);
252put_clk: 252put_clk:
253 clk_put(ahb_clk); 253 clk_put(ahb_clk);
254put_gdsc: 254put_gdsc:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index c2bdad88447e..824067d2d427 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
83 .caps = MDP_LM_CAP_WB }, 83 .caps = MDP_LM_CAP_WB },
84 }, 84 },
85 .nb_stages = 5, 85 .nb_stages = 5,
86 .max_width = 2048,
87 .max_height = 0xFFFF,
86 }, 88 },
87 .dspp = { 89 .dspp = {
88 .count = 3, 90 .count = 3,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6fcb58ab718c..440977677001 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
804 804
805 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 805 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
806 806
807 pm_runtime_put_autosuspend(&pdev->dev);
808
809set_cursor: 807set_cursor:
810 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); 808 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
811 if (ret) { 809 if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f15821a0d900..ea5bb0e1632c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
610 struct dma_fence *fence; 610 struct dma_fence *fence;
611 int i, ret; 611 int i, ret;
612 612
613 if (!exclusive) {
614 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
615 * which makes this a slightly strange place to call it. OTOH this
616 * is a convenient can-fail point to hook it in. (And similar to
617 * how etnaviv and nouveau handle this.)
618 */
619 ret = reservation_object_reserve_shared(msm_obj->resv);
620 if (ret)
621 return ret;
622 }
623
624 fobj = reservation_object_get_list(msm_obj->resv); 613 fobj = reservation_object_get_list(msm_obj->resv);
625 if (!fobj || (fobj->shared_count == 0)) { 614 if (!fobj || (fobj->shared_count == 0)) {
626 fence = reservation_object_get_excl(msm_obj->resv); 615 fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1045 } 1034 }
1046 1035
1047 vaddr = msm_gem_get_vaddr(obj); 1036 vaddr = msm_gem_get_vaddr(obj);
1048 if (!vaddr) { 1037 if (IS_ERR(vaddr)) {
1049 msm_gem_put_iova(obj, aspace); 1038 msm_gem_put_iova(obj, aspace);
1050 drm_gem_object_unreference(obj); 1039 drm_gem_object_unreference(obj);
1051 return ERR_PTR(-ENOMEM); 1040 return ERR_CAST(vaddr);
1052 } 1041 }
1053 1042
1054 if (bo) 1043 if (bo)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5d0a75d4b249..93535cac0676 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -221,7 +221,7 @@ fail:
221 return ret; 221 return ret;
222} 222}
223 223
224static int submit_fence_sync(struct msm_gem_submit *submit) 224static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
225{ 225{
226 int i, ret = 0; 226 int i, ret = 0;
227 227
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
229 struct msm_gem_object *msm_obj = submit->bos[i].obj; 229 struct msm_gem_object *msm_obj = submit->bos[i].obj;
230 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; 230 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
231 231
232 if (!write) {
233 /* NOTE: _reserve_shared() must happen before
234 * _add_shared_fence(), which makes this a slightly
235 * strange place to call it. OTOH this is a
236 * convenient can-fail point to hook it in.
237 */
238 ret = reservation_object_reserve_shared(msm_obj->resv);
239 if (ret)
240 return ret;
241 }
242
243 if (no_implicit)
244 continue;
245
232 ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); 246 ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
233 if (ret) 247 if (ret)
234 break; 248 break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
451 if (ret) 465 if (ret)
452 goto out; 466 goto out;
453 467
454 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { 468 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
455 ret = submit_fence_sync(submit); 469 if (ret)
456 if (ret) 470 goto out;
457 goto out;
458 }
459 471
460 ret = submit_pin_objects(submit); 472 ret = submit_pin_objects(submit);
461 if (ret) 473 if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ffbff27600e0..6a887032c66a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
718 msm_gem_put_iova(gpu->rb->bo, gpu->aspace); 718 msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
719 msm_ringbuffer_destroy(gpu->rb); 719 msm_ringbuffer_destroy(gpu->rb);
720 } 720 }
721 if (gpu->aspace) { 721
722 if (!IS_ERR_OR_NULL(gpu->aspace)) {
722 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 723 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
723 NULL, 0); 724 NULL, 0);
724 msm_gem_address_space_put(gpu->aspace); 725 msm_gem_address_space_put(gpu->aspace);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0366b8092f97..ec56794ad039 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
111 111
112 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); 112 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
113 113
114 /* Note that smp_load_acquire() is not strictly required
115 * as CIRC_SPACE_TO_END() does not access the tail more
116 * than once.
117 */
114 n = min(sz, circ_space_to_end(&rd->fifo)); 118 n = min(sz, circ_space_to_end(&rd->fifo));
115 memcpy(fptr, ptr, n); 119 memcpy(fptr, ptr, n);
116 120
117 fifo->head = (fifo->head + n) & (BUF_SZ - 1); 121 smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
118 sz -= n; 122 sz -= n;
119 ptr += n; 123 ptr += n;
120 124
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
145 if (ret) 149 if (ret)
146 goto out; 150 goto out;
147 151
152 /* Note that smp_load_acquire() is not strictly required
153 * as CIRC_CNT_TO_END() does not access the head more than
154 * once.
155 */
148 n = min_t(int, sz, circ_count_to_end(&rd->fifo)); 156 n = min_t(int, sz, circ_count_to_end(&rd->fifo));
149 if (copy_to_user(buf, fptr, n)) { 157 if (copy_to_user(buf, fptr, n)) {
150 ret = -EFAULT; 158 ret = -EFAULT;
151 goto out; 159 goto out;
152 } 160 }
153 161
154 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); 162 smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
155 *ppos += n; 163 *ppos += n;
156 164
157 wake_up_all(&rd->fifo_event); 165 wake_up_all(&rd->fifo_event);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f7707849bb53..2b12d82aac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -223,7 +223,7 @@ void
223nouveau_fbcon_accel_save_disable(struct drm_device *dev) 223nouveau_fbcon_accel_save_disable(struct drm_device *dev)
224{ 224{
225 struct nouveau_drm *drm = nouveau_drm(dev); 225 struct nouveau_drm *drm = nouveau_drm(dev);
226 if (drm->fbcon) { 226 if (drm->fbcon && drm->fbcon->helper.fbdev) {
227 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; 227 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
228 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 228 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
229 } 229 }
@@ -233,7 +233,7 @@ void
233nouveau_fbcon_accel_restore(struct drm_device *dev) 233nouveau_fbcon_accel_restore(struct drm_device *dev)
234{ 234{
235 struct nouveau_drm *drm = nouveau_drm(dev); 235 struct nouveau_drm *drm = nouveau_drm(dev);
236 if (drm->fbcon) { 236 if (drm->fbcon && drm->fbcon->helper.fbdev) {
237 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; 237 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
238 } 238 }
239} 239}
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
245 struct nouveau_fbdev *fbcon = drm->fbcon; 245 struct nouveau_fbdev *fbcon = drm->fbcon;
246 if (fbcon && drm->channel) { 246 if (fbcon && drm->channel) {
247 console_lock(); 247 console_lock();
248 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 248 if (fbcon->helper.fbdev)
249 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
249 console_unlock(); 250 console_unlock();
250 nouveau_channel_idle(drm->channel); 251 nouveau_channel_idle(drm->channel);
251 nvif_object_fini(&fbcon->twod); 252 nvif_object_fini(&fbcon->twod);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dbf62a2ac41..e4751f92b342 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3265,11 +3265,14 @@ nv50_mstm = {
3265void 3265void
3266nv50_mstm_service(struct nv50_mstm *mstm) 3266nv50_mstm_service(struct nv50_mstm *mstm)
3267{ 3267{
3268 struct drm_dp_aux *aux = mstm->mgr.aux; 3268 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3269 bool handled = true; 3269 bool handled = true;
3270 int ret; 3270 int ret;
3271 u8 esi[8] = {}; 3271 u8 esi[8] = {};
3272 3272
3273 if (!aux)
3274 return;
3275
3273 while (handled) { 3276 while (handled) {
3274 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); 3277 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3275 if (ret != 8) { 3278 if (ret != 8) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a74774..44e116f7880d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@ int
39g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) 39g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
40{ 40{
41 return nvkm_xtensa_new_(&g84_bsp, device, index, 41 return nvkm_xtensa_new_(&g84_bsp, device, index,
42 true, 0x103000, pengine); 42 device->chipset != 0x92, 0x103000, pengine);
43} 43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index d06ad2c372bf..455da298227f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); 241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
242 } 242 }
243 243
244 mmu->func->flush(vm);
245
244 nvkm_memory_del(&pgt); 246 nvkm_memory_del(&pgt);
245 } 247 }
246} 248}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6a573d21d3cc..658fa2d3e40c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
405 return -EINVAL; 405 return -EINVAL;
406 } 406 }
407 407
408 /*
409 * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
410 * i.MX53 channel arbitration locking doesn't seem to work properly.
411 * Allow enabling the lock feature on IPUv3H / i.MX6 only.
412 */
413 if (bursts && ipu->ipu_type != IPUV3H)
414 return -EINVAL;
415
408 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { 416 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
409 if (channel->num == idmac_lock_en_info[i].chnum) 417 if (channel->num == idmac_lock_en_info[i].chnum)
410 break; 418 break;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c35f74c83065..c860a7997cb5 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -73,6 +73,14 @@
73#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) 73#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
74#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) 74#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
75 75
76#define IPU_PRE_STORE_ENG_STATUS 0x120
77#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
78#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
79#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
80#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
81#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
82#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
83
76#define IPU_PRE_STORE_ENG_SIZE 0x130 84#define IPU_PRE_STORE_ENG_SIZE 0x130
77#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) 85#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
78#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) 86#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
93 dma_addr_t buffer_paddr; 101 dma_addr_t buffer_paddr;
94 void *buffer_virt; 102 void *buffer_virt;
95 bool in_use; 103 bool in_use;
104 unsigned int safe_window_end;
96}; 105};
97 106
98static DEFINE_MUTEX(ipu_pre_list_mutex); 107static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
160 u32 active_bpp = info->cpp[0] >> 1; 169 u32 active_bpp = info->cpp[0] >> 1;
161 u32 val; 170 u32 val;
162 171
172 /* calculate safe window for ctrl register updates */
173 pre->safe_window_end = height - 2;
174
163 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); 175 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
164 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 176 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
165 177
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
199 211
200void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) 212void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
201{ 213{
214 unsigned long timeout = jiffies + msecs_to_jiffies(5);
215 unsigned short current_yblock;
216 u32 val;
217
202 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 218 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
219
220 do {
221 if (time_after(jiffies, timeout)) {
222 dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
223 return;
224 }
225
226 val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
227 current_yblock =
228 (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
229 IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
230 } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
231
203 writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); 232 writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
204} 233}
205 234
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index ecc9ea44dc50..0013ca9f72c8 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -14,6 +14,7 @@
14#include <drm/drm_fourcc.h> 14#include <drm/drm_fourcc.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/iopoll.h>
17#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
18#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 19#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
329 val = IPU_PRG_REG_UPDATE_REG_UPDATE; 330 val = IPU_PRG_REG_UPDATE_REG_UPDATE;
330 writel(val, prg->regs + IPU_PRG_REG_UPDATE); 331 writel(val, prg->regs + IPU_PRG_REG_UPDATE);
331 332
333 /* wait for both double buffers to be filled */
334 readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
335 (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
336 (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
337 5, 1000);
338
332 clk_disable_unprepare(prg->clk_ipg); 339 clk_disable_unprepare(prg->clk_ipg);
333 340
334 chan->enabled = true; 341 chan->enabled = true;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 0a3117cc29e7..374301fcbc86 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -281,6 +281,7 @@ config HID_ELECOM
281 Support for ELECOM devices: 281 Support for ELECOM devices:
282 - BM084 Bluetooth Mouse 282 - BM084 Bluetooth Mouse
283 - DEFT Trackball (Wired and wireless) 283 - DEFT Trackball (Wired and wireless)
284 - HUGE Trackball (Wired and wireless)
284 285
285config HID_ELO 286config HID_ELO
286 tristate "ELO USB 4000/4500 touchscreen" 287 tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9bc91160819b..330ca983828b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
2032 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 2032 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
2033 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 2033 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
2034 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 2034 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
2035 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
2036 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
2035#endif 2037#endif
2036#if IS_ENABLED(CONFIG_HID_ELO) 2038#if IS_ENABLED(CONFIG_HID_ELO)
2037 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, 2039 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e2c7465df69f..54aeea57d209 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -3,6 +3,7 @@
3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> 3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
4 * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> 4 * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
5 * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> 5 * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
6 * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
6 */ 7 */
7 8
8/* 9/*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
32 break; 33 break;
33 case USB_DEVICE_ID_ELECOM_DEFT_WIRED: 34 case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
34 case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: 35 case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
35 /* The DEFT trackball has eight buttons, but its descriptor only 36 case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
36 * reports five, disabling the three Fn buttons on the top of 37 case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
37 * the mouse. 38 /* The DEFT/HUGE trackball has eight buttons, but its descriptor
39 * only reports five, disabling the three Fn buttons on the top
40 * of the mouse.
38 * 41 *
39 * Apply the following diff to the descriptor: 42 * Apply the following diff to the descriptor:
40 * 43 *
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
62 * End Collection, End Collection, 65 * End Collection, End Collection,
63 */ 66 */
64 if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { 67 if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
65 hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); 68 hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
66 rdesc[13] = 8; /* Button/Variable Report Count */ 69 rdesc[13] = 8; /* Button/Variable Report Count */
67 rdesc[21] = 8; /* Button/Variable Usage Maximum */ 70 rdesc[21] = 8; /* Button/Variable Usage Maximum */
68 rdesc[29] = 0; /* Button/Constant Report Count */ 71 rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
76 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 79 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
77 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 80 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
78 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 81 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
82 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
83 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
79 { } 84 { }
80}; 85};
81MODULE_DEVICE_TABLE(hid, elecom_devices); 86MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index a98919199858..be2e005c3c51 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -368,6 +368,8 @@
368#define USB_DEVICE_ID_ELECOM_BM084 0x0061 368#define USB_DEVICE_ID_ELECOM_BM084 0x0061
369#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe 369#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
370#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff 370#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
371#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
372#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
371 373
372#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 374#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
373#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 375#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 089bad8a9a21..045b5da9b992 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
975 unsigned int rsize = 0; 975 unsigned int rsize = 0;
976 char *rdesc; 976 char *rdesc;
977 int ret, n; 977 int ret, n;
978 int num_descriptors;
979 size_t offset = offsetof(struct hid_descriptor, desc);
978 980
979 quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), 981 quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
980 le16_to_cpu(dev->descriptor.idProduct)); 982 le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
997 return -ENODEV; 999 return -ENODEV;
998 } 1000 }
999 1001
1002 if (hdesc->bLength < sizeof(struct hid_descriptor)) {
1003 dbg_hid("hid descriptor is too short\n");
1004 return -EINVAL;
1005 }
1006
1000 hid->version = le16_to_cpu(hdesc->bcdHID); 1007 hid->version = le16_to_cpu(hdesc->bcdHID);
1001 hid->country = hdesc->bCountryCode; 1008 hid->country = hdesc->bCountryCode;
1002 1009
1003 for (n = 0; n < hdesc->bNumDescriptors; n++) 1010 num_descriptors = min_t(int, hdesc->bNumDescriptors,
1011 (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
1012
1013 for (n = 0; n < num_descriptors; n++)
1004 if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) 1014 if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
1005 rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); 1015 rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
1006 1016
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index efd5db743319..894b67ac2cae 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
640 */ 640 */
641 return; 641 return;
642 } 642 }
643 mutex_lock(&vmbus_connection.channel_mutex);
643 /* 644 /*
644 * Close all the sub-channels first and then close the 645 * Close all the sub-channels first and then close the
645 * primary channel. 646 * primary channel.
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
648 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 649 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
649 vmbus_close_internal(cur_channel); 650 vmbus_close_internal(cur_channel);
650 if (cur_channel->rescind) { 651 if (cur_channel->rescind) {
651 mutex_lock(&vmbus_connection.channel_mutex); 652 hv_process_channel_removal(
652 hv_process_channel_removal(cur_channel,
653 cur_channel->offermsg.child_relid); 653 cur_channel->offermsg.child_relid);
654 mutex_unlock(&vmbus_connection.channel_mutex);
655 } 654 }
656 } 655 }
657 /* 656 /*
658 * Now close the primary. 657 * Now close the primary.
659 */ 658 */
660 vmbus_close_internal(channel); 659 vmbus_close_internal(channel);
660 mutex_unlock(&vmbus_connection.channel_mutex);
661} 661}
662EXPORT_SYMBOL_GPL(vmbus_close); 662EXPORT_SYMBOL_GPL(vmbus_close);
663 663
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index bcbb031f7263..018d2e0f8ec5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
159 159
160 160
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
162 162 channel->rescind = true;
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
164 msglistentry) { 164 msglistentry) {
165 165
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
381 true); 381 true);
382} 382}
383 383
384void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 384void hv_process_channel_removal(u32 relid)
385{ 385{
386 unsigned long flags; 386 unsigned long flags;
387 struct vmbus_channel *primary_channel; 387 struct vmbus_channel *primary_channel, *channel;
388 388
389 BUG_ON(!channel->rescind);
390 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 389 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
391 390
391 /*
392 * Make sure channel is valid as we may have raced.
393 */
394 channel = relid2channel(relid);
395 if (!channel)
396 return;
397
398 BUG_ON(!channel->rescind);
392 if (channel->target_cpu != get_cpu()) { 399 if (channel->target_cpu != get_cpu()) {
393 put_cpu(); 400 put_cpu();
394 smp_call_function_single(channel->target_cpu, 401 smp_call_function_single(channel->target_cpu,
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
515 if (!fnew) { 522 if (!fnew) {
516 if (channel->sc_creation_callback != NULL) 523 if (channel->sc_creation_callback != NULL)
517 channel->sc_creation_callback(newchannel); 524 channel->sc_creation_callback(newchannel);
525 newchannel->probe_done = true;
518 return; 526 return;
519 } 527 }
520 528
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
834{ 842{
835 struct vmbus_channel_rescind_offer *rescind; 843 struct vmbus_channel_rescind_offer *rescind;
836 struct vmbus_channel *channel; 844 struct vmbus_channel *channel;
837 unsigned long flags;
838 struct device *dev; 845 struct device *dev;
839 846
840 rescind = (struct vmbus_channel_rescind_offer *)hdr; 847 rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
873 return; 880 return;
874 } 881 }
875 882
876 spin_lock_irqsave(&channel->lock, flags);
877 channel->rescind = true;
878 spin_unlock_irqrestore(&channel->lock, flags);
879
880 /*
881 * Now that we have posted the rescind state, perform
882 * rescind related cleanup.
883 */
884 vmbus_rescind_cleanup(channel);
885
886 /* 883 /*
887 * Now wait for offer handling to complete. 884 * Now wait for offer handling to complete.
888 */ 885 */
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
901 if (channel->device_obj) { 898 if (channel->device_obj) {
902 if (channel->chn_rescind_callback) { 899 if (channel->chn_rescind_callback) {
903 channel->chn_rescind_callback(channel); 900 channel->chn_rescind_callback(channel);
901 vmbus_rescind_cleanup(channel);
904 return; 902 return;
905 } 903 }
906 /* 904 /*
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
909 */ 907 */
910 dev = get_device(&channel->device_obj->device); 908 dev = get_device(&channel->device_obj->device);
911 if (dev) { 909 if (dev) {
910 vmbus_rescind_cleanup(channel);
912 vmbus_device_unregister(channel->device_obj); 911 vmbus_device_unregister(channel->device_obj);
913 put_device(dev); 912 put_device(dev);
914 } 913 }
@@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
921 * 1. Close all sub-channels first 920 * 1. Close all sub-channels first
922 * 2. Then close the primary channel. 921 * 2. Then close the primary channel.
923 */ 922 */
923 mutex_lock(&vmbus_connection.channel_mutex);
924 vmbus_rescind_cleanup(channel);
924 if (channel->state == CHANNEL_OPEN_STATE) { 925 if (channel->state == CHANNEL_OPEN_STATE) {
925 /* 926 /*
926 * The channel is currently not open; 927 * The channel is currently not open;
927 * it is safe for us to cleanup the channel. 928 * it is safe for us to cleanup the channel.
928 */ 929 */
929 mutex_lock(&vmbus_connection.channel_mutex); 930 hv_process_channel_removal(rescind->child_relid);
930 hv_process_channel_removal(channel,
931 channel->offermsg.child_relid);
932 mutex_unlock(&vmbus_connection.channel_mutex);
933 } 931 }
932 mutex_unlock(&vmbus_connection.channel_mutex);
934 } 933 }
935} 934}
936 935
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a9d49f6f6501..937801ac2fe0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
768 struct vmbus_channel *channel = hv_dev->channel; 768 struct vmbus_channel *channel = hv_dev->channel;
769 769
770 mutex_lock(&vmbus_connection.channel_mutex); 770 mutex_lock(&vmbus_connection.channel_mutex);
771 hv_process_channel_removal(channel, 771 hv_process_channel_removal(channel->offermsg.child_relid);
772 channel->offermsg.child_relid);
773 mutex_unlock(&vmbus_connection.channel_mutex); 772 mutex_unlock(&vmbus_connection.channel_mutex);
774 kfree(hv_dev); 773 kfree(hv_dev);
775 774
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 54a47b40546f..f96830ffd9f1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1021 } 1021 }
1022 1022
1023 dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", 1023 dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
1024 rinfo->sda_gpio, rinfo->scl_gpio); 1024 rinfo->scl_gpio, rinfo->sda_gpio);
1025 1025
1026 rinfo->prepare_recovery = i2c_imx_prepare_recovery; 1026 rinfo->prepare_recovery = i2c_imx_prepare_recovery;
1027 rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; 1027 rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1100 } 1100 }
1101 1101
1102 /* Request IRQ */ 1102 /* Request IRQ */
1103 ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, 1103 ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
1104 pdev->name, i2c_imx); 1104 pdev->name, i2c_imx);
1105 if (ret) { 1105 if (ret) {
1106 dev_err(&pdev->dev, "can't claim irq %d\n", irq); 1106 dev_err(&pdev->dev, "can't claim irq %d\n", irq);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 22ffcb73c185..b51adffa4841 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
340 data->word = dma_buffer[0] | (dma_buffer[1] << 8); 340 data->word = dma_buffer[0] | (dma_buffer[1] << 8);
341 break; 341 break;
342 case I2C_SMBUS_BLOCK_DATA: 342 case I2C_SMBUS_BLOCK_DATA:
343 case I2C_SMBUS_I2C_BLOCK_DATA:
344 if (desc->rxbytes != dma_buffer[0] + 1) 343 if (desc->rxbytes != dma_buffer[0] + 1)
345 return -EMSGSIZE; 344 return -EMSGSIZE;
346 345
347 memcpy(data->block, dma_buffer, desc->rxbytes); 346 memcpy(data->block, dma_buffer, desc->rxbytes);
348 break; 347 break;
348 case I2C_SMBUS_I2C_BLOCK_DATA:
349 memcpy(&data->block[1], dma_buffer, desc->rxbytes);
350 data->block[0] = desc->rxbytes;
351 break;
349 } 352 }
350 return 0; 353 return 0;
351 } 354 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1ebb5e947e0b..23c2ea2baedc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
360 unsigned long fclk_rate = 12000000; 360 unsigned long fclk_rate = 12000000;
361 unsigned long internal_clk = 0; 361 unsigned long internal_clk = 0;
362 struct clk *fclk; 362 struct clk *fclk;
363 int error;
363 364
364 if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { 365 if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
365 /* 366 /*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
378 * do this bit unconditionally. 379 * do this bit unconditionally.
379 */ 380 */
380 fclk = clk_get(omap->dev, "fck"); 381 fclk = clk_get(omap->dev, "fck");
382 if (IS_ERR(fclk)) {
383 error = PTR_ERR(fclk);
384 dev_err(omap->dev, "could not get fck: %i\n", error);
385
386 return error;
387 }
388
381 fclk_rate = clk_get_rate(fclk); 389 fclk_rate = clk_get_rate(fclk);
382 clk_put(fclk); 390 clk_put(fclk);
383 391
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
410 else 418 else
411 internal_clk = 4000; 419 internal_clk = 4000;
412 fclk = clk_get(omap->dev, "fck"); 420 fclk = clk_get(omap->dev, "fck");
421 if (IS_ERR(fclk)) {
422 error = PTR_ERR(fclk);
423 dev_err(omap->dev, "could not get fck: %i\n", error);
424
425 return error;
426 }
413 fclk_rate = clk_get_rate(fclk) / 1000; 427 fclk_rate = clk_get_rate(fclk) / 1000;
414 clk_put(fclk); 428 clk_put(fclk);
415 429
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 0ecdb47a23ab..174579d32e5f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,6 +85,9 @@
85/* SB800 constants */ 85/* SB800 constants */
86#define SB800_PIIX4_SMB_IDX 0xcd6 86#define SB800_PIIX4_SMB_IDX 0xcd6
87 87
88#define KERNCZ_IMC_IDX 0x3e
89#define KERNCZ_IMC_DATA 0x3f
90
88/* 91/*
89 * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) 92 * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
90 * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. 93 * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
@@ -94,6 +97,12 @@
94#define SB800_PIIX4_PORT_IDX_ALT 0x2e 97#define SB800_PIIX4_PORT_IDX_ALT 0x2e
95#define SB800_PIIX4_PORT_IDX_SEL 0x2f 98#define SB800_PIIX4_PORT_IDX_SEL 0x2f
96#define SB800_PIIX4_PORT_IDX_MASK 0x06 99#define SB800_PIIX4_PORT_IDX_MASK 0x06
100#define SB800_PIIX4_PORT_IDX_SHIFT 1
101
102/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
103#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
104#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
105#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
97 106
98/* insmod parameters */ 107/* insmod parameters */
99 108
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
149 */ 158 */
150static DEFINE_MUTEX(piix4_mutex_sb800); 159static DEFINE_MUTEX(piix4_mutex_sb800);
151static u8 piix4_port_sel_sb800; 160static u8 piix4_port_sel_sb800;
161static u8 piix4_port_mask_sb800;
162static u8 piix4_port_shift_sb800;
152static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { 163static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
153 " port 0", " port 2", " port 3", " port 4" 164 " port 0", " port 2", " port 3", " port 4"
154}; 165};
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
159 170
160 /* SB800 */ 171 /* SB800 */
161 bool sb800_main; 172 bool sb800_main;
173 bool notify_imc;
162 u8 port; /* Port number, shifted */ 174 u8 port; /* Port number, shifted */
163}; 175};
164 176
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
347 359
348 /* Find which register is used for port selection */ 360 /* Find which register is used for port selection */
349 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { 361 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
350 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 362 switch (PIIX4_dev->device) {
363 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
367 break;
368 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
369 default:
370 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
371 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
372 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
373 break;
374 }
351 } else { 375 } else {
352 mutex_lock(&piix4_mutex_sb800); 376 mutex_lock(&piix4_mutex_sb800);
353 outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); 377 outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
355 piix4_port_sel_sb800 = (port_sel & 0x01) ? 379 piix4_port_sel_sb800 = (port_sel & 0x01) ?
356 SB800_PIIX4_PORT_IDX_ALT : 380 SB800_PIIX4_PORT_IDX_ALT :
357 SB800_PIIX4_PORT_IDX; 381 SB800_PIIX4_PORT_IDX;
382 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
383 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
358 mutex_unlock(&piix4_mutex_sb800); 384 mutex_unlock(&piix4_mutex_sb800);
359 } 385 }
360 386
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
572 return 0; 598 return 0;
573} 599}
574 600
601static uint8_t piix4_imc_read(uint8_t idx)
602{
603 outb_p(idx, KERNCZ_IMC_IDX);
604 return inb_p(KERNCZ_IMC_DATA);
605}
606
607static void piix4_imc_write(uint8_t idx, uint8_t value)
608{
609 outb_p(idx, KERNCZ_IMC_IDX);
610 outb_p(value, KERNCZ_IMC_DATA);
611}
612
613static int piix4_imc_sleep(void)
614{
615 int timeout = MAX_TIMEOUT;
616
617 if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
618 return -EBUSY;
619
620 /* clear response register */
621 piix4_imc_write(0x82, 0x00);
622 /* request ownership flag */
623 piix4_imc_write(0x83, 0xB4);
624 /* kick off IMC Mailbox command 96 */
625 piix4_imc_write(0x80, 0x96);
626
627 while (timeout--) {
628 if (piix4_imc_read(0x82) == 0xfa) {
629 release_region(KERNCZ_IMC_IDX, 2);
630 return 0;
631 }
632 usleep_range(1000, 2000);
633 }
634
635 release_region(KERNCZ_IMC_IDX, 2);
636 return -ETIMEDOUT;
637}
638
639static void piix4_imc_wakeup(void)
640{
641 int timeout = MAX_TIMEOUT;
642
643 if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
644 return;
645
646 /* clear response register */
647 piix4_imc_write(0x82, 0x00);
648 /* release ownership flag */
649 piix4_imc_write(0x83, 0xB5);
650 /* kick off IMC Mailbox command 96 */
651 piix4_imc_write(0x80, 0x96);
652
653 while (timeout--) {
654 if (piix4_imc_read(0x82) == 0xfa)
655 break;
656 usleep_range(1000, 2000);
657 }
658
659 release_region(KERNCZ_IMC_IDX, 2);
660}
661
575/* 662/*
576 * Handles access to multiple SMBus ports on the SB800. 663 * Handles access to multiple SMBus ports on the SB800.
577 * The port is selected by bits 2:1 of the smb_en register (0x2c). 664 * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
612 return -EBUSY; 699 return -EBUSY;
613 } 700 }
614 701
702 /*
703 * Notify the IMC (Integrated Micro Controller) if required.
704 * Among other responsibilities, the IMC is in charge of monitoring
705 * the System fans and temperature sensors, and act accordingly.
706 * All this is done through SMBus and can/will collide
707 * with our transactions if they are long (BLOCK_DATA).
708 * Therefore we need to request the ownership flag during those
709 * transactions.
710 */
711 if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
712 int ret;
713
714 ret = piix4_imc_sleep();
715 switch (ret) {
716 case -EBUSY:
717 dev_warn(&adap->dev,
718 "IMC base address index region 0x%x already in use.\n",
719 KERNCZ_IMC_IDX);
720 break;
721 case -ETIMEDOUT:
722 dev_warn(&adap->dev,
723 "Failed to communicate with the IMC.\n");
724 break;
725 default:
726 break;
727 }
728
729 /* If IMC communication fails do not retry */
730 if (ret) {
731 dev_warn(&adap->dev,
732 "Continuing without IMC notification.\n");
733 adapdata->notify_imc = false;
734 }
735 }
736
615 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); 737 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
616 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); 738 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
617 739
618 port = adapdata->port; 740 port = adapdata->port;
619 if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port) 741 if ((smba_en_lo & piix4_port_mask_sb800) != port)
620 outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port, 742 outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
621 SB800_PIIX4_SMB_IDX + 1); 743 SB800_PIIX4_SMB_IDX + 1);
622 744
623 retval = piix4_access(adap, addr, flags, read_write, 745 retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
628 /* Release the semaphore */ 750 /* Release the semaphore */
629 outb_p(smbslvcnt | 0x20, SMBSLVCNT); 751 outb_p(smbslvcnt | 0x20, SMBSLVCNT);
630 752
753 if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
754 piix4_imc_wakeup();
755
631 mutex_unlock(&piix4_mutex_sb800); 756 mutex_unlock(&piix4_mutex_sb800);
632 757
633 return retval; 758 return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
679static struct i2c_adapter *piix4_aux_adapter; 804static struct i2c_adapter *piix4_aux_adapter;
680 805
681static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, 806static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
682 bool sb800_main, u8 port, 807 bool sb800_main, u8 port, bool notify_imc,
683 const char *name, struct i2c_adapter **padap) 808 const char *name, struct i2c_adapter **padap)
684{ 809{
685 struct i2c_adapter *adap; 810 struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
706 831
707 adapdata->smba = smba; 832 adapdata->smba = smba;
708 adapdata->sb800_main = sb800_main; 833 adapdata->sb800_main = sb800_main;
709 adapdata->port = port << 1; 834 adapdata->port = port << piix4_port_shift_sb800;
835 adapdata->notify_imc = notify_imc;
710 836
711 /* set up the sysfs linkage to our parent device */ 837 /* set up the sysfs linkage to our parent device */
712 adap->dev.parent = &dev->dev; 838 adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
728 return 0; 854 return 0;
729} 855}
730 856
731static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) 857static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
858 bool notify_imc)
732{ 859{
733 struct i2c_piix4_adapdata *adapdata; 860 struct i2c_piix4_adapdata *adapdata;
734 int port; 861 int port;
735 int retval; 862 int retval;
736 863
737 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { 864 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
738 retval = piix4_add_adapter(dev, smba, true, port, 865 retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
739 piix4_main_port_names_sb800[port], 866 piix4_main_port_names_sb800[port],
740 &piix4_main_adapters[port]); 867 &piix4_main_adapters[port]);
741 if (retval < 0) 868 if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
769 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && 896 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
770 dev->revision >= 0x40) || 897 dev->revision >= 0x40) ||
771 dev->vendor == PCI_VENDOR_ID_AMD) { 898 dev->vendor == PCI_VENDOR_ID_AMD) {
899 bool notify_imc = false;
772 is_sb800 = true; 900 is_sb800 = true;
773 901
774 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { 902 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
778 return -EBUSY; 906 return -EBUSY;
779 } 907 }
780 908
909 if (dev->vendor == PCI_VENDOR_ID_AMD &&
910 dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
911 u8 imc;
912
913 /*
914 * Detect if IMC is active or not, this method is
915 * described on coreboot's AMD IMC notes
916 */
917 pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
918 0x40, &imc);
919 if (imc & 0x80)
920 notify_imc = true;
921 }
922
781 /* base address location etc changed in SB800 */ 923 /* base address location etc changed in SB800 */
782 retval = piix4_setup_sb800(dev, id, 0); 924 retval = piix4_setup_sb800(dev, id, 0);
783 if (retval < 0) { 925 if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
789 * Try to register multiplexed main SMBus adapter, 931 * Try to register multiplexed main SMBus adapter,
790 * give up if we can't 932 * give up if we can't
791 */ 933 */
792 retval = piix4_add_adapters_sb800(dev, retval); 934 retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
793 if (retval < 0) { 935 if (retval < 0) {
794 release_region(SB800_PIIX4_SMB_IDX, 2); 936 release_region(SB800_PIIX4_SMB_IDX, 2);
795 return retval; 937 return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
800 return retval; 942 return retval;
801 943
802 /* Try to register main SMBus adapter, give up if we can't */ 944 /* Try to register main SMBus adapter, give up if we can't */
803 retval = piix4_add_adapter(dev, retval, false, 0, "", 945 retval = piix4_add_adapter(dev, retval, false, 0, false, "",
804 &piix4_main_adapters[0]); 946 &piix4_main_adapters[0]);
805 if (retval < 0) 947 if (retval < 0)
806 return retval; 948 return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
827 if (retval > 0) { 969 if (retval > 0) {
828 /* Try to add the aux adapter if it exists, 970 /* Try to add the aux adapter if it exists,
829 * piix4_add_adapter will clean up if this fails */ 971 * piix4_add_adapter will clean up if this fails */
830 piix4_add_adapter(dev, retval, false, 0, 972 piix4_add_adapter(dev, retval, false, 0, false,
831 is_sb800 ? piix4_aux_port_name_sb800 : "", 973 is_sb800 ? piix4_aux_port_name_sb800 : "",
832 &piix4_aux_adapter); 974 &piix4_aux_adapter);
833 } 975 }
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d268fdc23c64..762bfb9487dc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
933} 933}
934EXPORT_SYMBOL(input_set_keycode); 934EXPORT_SYMBOL(input_set_keycode);
935 935
936bool input_match_device_id(const struct input_dev *dev,
937 const struct input_device_id *id)
938{
939 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
940 if (id->bustype != dev->id.bustype)
941 return false;
942
943 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
944 if (id->vendor != dev->id.vendor)
945 return false;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
948 if (id->product != dev->id.product)
949 return false;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
952 if (id->version != dev->id.version)
953 return false;
954
955 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
956 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
957 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
958 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
959 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
960 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
961 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
962 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
963 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
964 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
965 return false;
966 }
967
968 return true;
969}
970EXPORT_SYMBOL(input_match_device_id);
971
936static const struct input_device_id *input_match_device(struct input_handler *handler, 972static const struct input_device_id *input_match_device(struct input_handler *handler,
937 struct input_dev *dev) 973 struct input_dev *dev)
938{ 974{
939 const struct input_device_id *id; 975 const struct input_device_id *id;
940 976
941 for (id = handler->id_table; id->flags || id->driver_info; id++) { 977 for (id = handler->id_table; id->flags || id->driver_info; id++) {
942 978 if (input_match_device_id(dev, id) &&
943 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 979 (!handler->match || handler->match(handler, dev))) {
944 if (id->bustype != dev->id.bustype)
945 continue;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
948 if (id->vendor != dev->id.vendor)
949 continue;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
952 if (id->product != dev->id.product)
953 continue;
954
955 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
956 if (id->version != dev->id.version)
957 continue;
958
959 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
960 continue;
961
962 if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
963 continue;
964
965 if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
966 continue;
967
968 if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
969 continue;
970
971 if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
972 continue;
973
974 if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
975 continue;
976
977 if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
978 continue;
979
980 if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
981 continue;
982
983 if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
984 continue;
985
986 if (!handler->match || handler->match(handler, dev))
987 return id; 980 return id;
981 }
988 } 982 }
989 983
990 return NULL; 984 return NULL;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 29d677c714d2..7b29a8944039 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
747 input_close_device(handle); 747 input_close_device(handle);
748} 748}
749 749
750/*
751 * These codes are copied from from hid-ids.h, unfortunately there is no common
752 * usb_ids/bt_ids.h header.
753 */
754#define USB_VENDOR_ID_SONY 0x054c
755#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
756#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
757#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
758#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
759
760#define USB_VENDOR_ID_THQ 0x20d6
761#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
762
763#define ACCEL_DEV(vnd, prd) \
764 { \
765 .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \
766 INPUT_DEVICE_ID_MATCH_PRODUCT | \
767 INPUT_DEVICE_ID_MATCH_PROPBIT, \
768 .vendor = (vnd), \
769 .product = (prd), \
770 .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \
771 }
772
773static const struct input_device_id joydev_blacklist[] = {
774 /* Avoid touchpads and touchscreens */
775 {
776 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
777 INPUT_DEVICE_ID_MATCH_KEYBIT,
778 .evbit = { BIT_MASK(EV_KEY) },
779 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
780 },
781 /* Avoid tablets, digitisers and similar devices */
782 {
783 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
784 INPUT_DEVICE_ID_MATCH_KEYBIT,
785 .evbit = { BIT_MASK(EV_KEY) },
786 .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
787 },
788 /* Disable accelerometers on composite devices */
789 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
790 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
791 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
792 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
793 ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
794 { /* sentinel */ }
795};
796
797static bool joydev_dev_is_blacklisted(struct input_dev *dev)
798{
799 const struct input_device_id *id;
800
801 for (id = joydev_blacklist; id->flags; id++) {
802 if (input_match_device_id(dev, id)) {
803 dev_dbg(&dev->dev,
804 "joydev: blacklisting '%s'\n", dev->name);
805 return true;
806 }
807 }
808
809 return false;
810}
811
750static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) 812static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
751{ 813{
752 DECLARE_BITMAP(jd_scratch, KEY_CNT); 814 DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
807 869
808static bool joydev_match(struct input_handler *handler, struct input_dev *dev) 870static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
809{ 871{
810 /* Avoid touchpads and touchscreens */ 872 /* Disable blacklisted devices */
811 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) 873 if (joydev_dev_is_blacklisted(dev))
812 return false;
813
814 /* Avoid tablets, digitisers and similar devices */
815 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
816 return false; 874 return false;
817 875
818 /* Avoid absolute mice */ 876 /* Avoid absolute mice */
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index e37e335e406f..6da607d3b811 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
234static int tca8418_configure(struct tca8418_keypad *keypad_data, 234static int tca8418_configure(struct tca8418_keypad *keypad_data,
235 u32 rows, u32 cols) 235 u32 rows, u32 cols)
236{ 236{
237 int reg, error; 237 int reg, error = 0;
238
239 /* Write config register, if this fails assume device not present */
240 error = tca8418_write_byte(keypad_data, REG_CFG,
241 CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
242 if (error < 0)
243 return -ENODEV;
244
245 238
246 /* Assemble a mask for row and column registers */ 239 /* Assemble a mask for row and column registers */
247 reg = ~(~0 << rows); 240 reg = ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
257 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); 250 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
258 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); 251 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
259 252
253 if (error)
254 return error;
255
256 error = tca8418_write_byte(keypad_data, REG_CFG,
257 CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
258
260 return error; 259 return error;
261} 260}
262 261
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
268 struct input_dev *input; 267 struct input_dev *input;
269 u32 rows = 0, cols = 0; 268 u32 rows = 0, cols = 0;
270 int error, row_shift, max_keys; 269 int error, row_shift, max_keys;
270 u8 reg;
271 271
272 /* Check i2c driver capabilities */ 272 /* Check i2c driver capabilities */
273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { 273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
301 keypad_data->client = client; 301 keypad_data->client = client;
302 keypad_data->row_shift = row_shift; 302 keypad_data->row_shift = row_shift;
303 303
304 /* Initialize the chip or fail if chip isn't present */ 304 /* Read key lock register, if this fails assume device not present */
305 error = tca8418_configure(keypad_data, rows, cols); 305 error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
306 if (error < 0) 306 if (error)
307 return error; 307 return -ENODEV;
308 308
309 /* Configure input device */ 309 /* Configure input device */
310 input = devm_input_allocate_device(dev); 310 input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
340 return error; 340 return error;
341 } 341 }
342 342
343 /* Initialize the chip */
344 error = tca8418_configure(keypad_data, rows, cols);
345 if (error < 0)
346 return error;
347
343 error = input_register_device(input); 348 error = input_register_device(input);
344 if (error) { 349 if (error) {
345 dev_err(dev, "Unable to register input device, error: %d\n", 350 dev_err(dev, "Unable to register input device, error: %d\n",
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 6cee5adc3b5c..debeeaeb8812 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
403 }, 403 },
404 { /* sentinel */ } 404 { /* sentinel */ }
405}; 405};
406MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
406 407
407static struct platform_driver axp20x_pek_driver = { 408static struct platform_driver axp20x_pek_driver = {
408 .probe = axp20x_pek_probe, 409 .probe = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
417MODULE_DESCRIPTION("axp20x Power Button"); 418MODULE_DESCRIPTION("axp20x Power Button");
418MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 419MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
419MODULE_LICENSE("GPL"); 420MODULE_LICENSE("GPL");
420MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6bf82ea8c918..ae473123583b 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
1635 return NULL; 1635 return NULL;
1636 } 1636 }
1637 1637
1638 while (buflen > 0) { 1638 while (buflen >= sizeof(*union_desc)) {
1639 union_desc = (struct usb_cdc_union_desc *)buf; 1639 union_desc = (struct usb_cdc_union_desc *)buf;
1640 1640
1641 if (union_desc->bLength > buflen) {
1642 dev_err(&intf->dev, "Too large descriptor\n");
1643 return NULL;
1644 }
1645
1641 if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && 1646 if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
1642 union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { 1647 union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
1643 dev_dbg(&intf->dev, "Found union header\n"); 1648 dev_dbg(&intf->dev, "Found union header\n");
1644 return union_desc; 1649
1650 if (union_desc->bLength >= sizeof(*union_desc))
1651 return union_desc;
1652
1653 dev_err(&intf->dev,
1654 "Union descriptor to short (%d vs %zd\n)",
1655 union_desc->bLength, sizeof(*union_desc));
1656 return NULL;
1645 } 1657 }
1646 1658
1647 buflen -= union_desc->bLength; 1659 buflen -= union_desc->bLength;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5af0b7d200bc..ee5466a374bf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
1709 .sensor_pdata = { 1709 .sensor_pdata = {
1710 .sensor_type = rmi_sensor_touchpad, 1710 .sensor_type = rmi_sensor_touchpad,
1711 .axis_align.flip_y = true, 1711 .axis_align.flip_y = true,
1712 /* to prevent cursors jumps: */ 1712 .kernel_tracking = false,
1713 .kernel_tracking = true,
1714 .topbuttonpad = topbuttonpad, 1713 .topbuttonpad = topbuttonpad,
1715 }, 1714 },
1716 .f30_data = { 1715 .f30_data = {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 32d2762448aa..b3bbad7d2282 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -72,6 +72,9 @@ struct goodix_ts_data {
72#define GOODIX_REG_CONFIG_DATA 0x8047 72#define GOODIX_REG_CONFIG_DATA 0x8047
73#define GOODIX_REG_ID 0x8140 73#define GOODIX_REG_ID 0x8140
74 74
75#define GOODIX_BUFFER_STATUS_READY BIT(7)
76#define GOODIX_BUFFER_STATUS_TIMEOUT 20
77
75#define RESOLUTION_LOC 1 78#define RESOLUTION_LOC 1
76#define MAX_CONTACTS_LOC 5 79#define MAX_CONTACTS_LOC 5
77#define TRIGGER_LOC 6 80#define TRIGGER_LOC 6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
195 198
196static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) 199static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
197{ 200{
201 unsigned long max_timeout;
198 int touch_num; 202 int touch_num;
199 int error; 203 int error;
200 204
201 error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data, 205 /*
202 GOODIX_CONTACT_SIZE + 1); 206 * The 'buffer status' bit, which indicates that the data is valid, is
203 if (error) { 207 * not set as soon as the interrupt is raised, but slightly after.
204 dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); 208 * This takes around 10 ms to happen, so we poll for 20 ms.
205 return error; 209 */
206 } 210 max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
211 do {
212 error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
213 data, GOODIX_CONTACT_SIZE + 1);
214 if (error) {
215 dev_err(&ts->client->dev, "I2C transfer error: %d\n",
216 error);
217 return error;
218 }
207 219
208 if (!(data[0] & 0x80)) 220 if (data[0] & GOODIX_BUFFER_STATUS_READY) {
209 return -EAGAIN; 221 touch_num = data[0] & 0x0f;
222 if (touch_num > ts->max_touch_num)
223 return -EPROTO;
224
225 if (touch_num > 1) {
226 data += 1 + GOODIX_CONTACT_SIZE;
227 error = goodix_i2c_read(ts->client,
228 GOODIX_READ_COOR_ADDR +
229 1 + GOODIX_CONTACT_SIZE,
230 data,
231 GOODIX_CONTACT_SIZE *
232 (touch_num - 1));
233 if (error)
234 return error;
235 }
236
237 return touch_num;
238 }
210 239
211 touch_num = data[0] & 0x0f; 240 usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
212 if (touch_num > ts->max_touch_num) 241 } while (time_before(jiffies, max_timeout));
213 return -EPROTO;
214
215 if (touch_num > 1) {
216 data += 1 + GOODIX_CONTACT_SIZE;
217 error = goodix_i2c_read(ts->client,
218 GOODIX_READ_COOR_ADDR +
219 1 + GOODIX_CONTACT_SIZE,
220 data,
221 GOODIX_CONTACT_SIZE * (touch_num - 1));
222 if (error)
223 return error;
224 }
225 242
226 return touch_num; 243 /*
244 * The Goodix panel will send spurious interrupts after a
245 * 'finger up' event, which will always cause a timeout.
246 */
247 return 0;
227} 248}
228 249
229static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) 250static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 157fdb4bb2e8..8c6c6178ec12 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
663 sdata->input->open = stmfts_input_open; 663 sdata->input->open = stmfts_input_open;
664 sdata->input->close = stmfts_input_close; 664 sdata->input->close = stmfts_input_close;
665 665
666 input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
667 input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
666 touchscreen_parse_properties(sdata->input, true, &sdata->prop); 668 touchscreen_parse_properties(sdata->input, true, &sdata->prop);
667 669
668 input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
669 sdata->prop.max_x, 0, 0);
670 input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
671 sdata->prop.max_y, 0, 0);
672 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); 670 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
673 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); 671 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
674 input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); 672 input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 7953381d939a..f1043ae71dcc 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
161 break; 161 break;
162 case 5: 162 case 5:
163 config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | 163 config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
164 ts_dev->bit_xn | ts_dev->bit_yp; 164 STEPCONFIG_XNP | STEPCONFIG_YPN;
165 break; 165 break;
166 case 8: 166 case 8:
167 config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); 167 config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 51f8215877f5..8e8874d23717 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
2773 2773
2774int __init amd_iommu_init_dma_ops(void) 2774int __init amd_iommu_init_dma_ops(void)
2775{ 2775{
2776 swiotlb = iommu_pass_through ? 1 : 0; 2776 swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
2777 iommu_detected = 1; 2777 iommu_detected = 1;
2778 2778
2779 /* 2779 /*
2780 * In case we don't initialize SWIOTLB (actually the common case 2780 * In case we don't initialize SWIOTLB (actually the common case
2781 * when AMD IOMMU is enabled), make sure there are global 2781 * when AMD IOMMU is enabled and SME is not active), make sure there
2782 * dma_ops set as a fall-back for devices not handled by this 2782 * are global dma_ops set as a fall-back for devices not handled by
2783 * driver (for example non-PCI devices). 2783 * this driver (for example non-PCI devices). When SME is active,
2784 * make sure that swiotlb variable remains set so the global dma_ops
2785 * continue to be SWIOTLB.
2784 */ 2786 */
2785 if (!swiotlb) 2787 if (!swiotlb)
2786 dma_ops = &nommu_dma_ops; 2788 dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3046 mutex_unlock(&domain->api_lock); 3048 mutex_unlock(&domain->api_lock);
3047 3049
3048 domain_flush_tlb_pde(domain); 3050 domain_flush_tlb_pde(domain);
3051 domain_flush_complete(domain);
3049 3052
3050 return unmap_size; 3053 return unmap_size;
3051} 3054}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index f596fcc32898..25c2c75f5332 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
709 pm_runtime_force_resume) 709 pm_runtime_force_resume)
710}; 710};
711 711
712static const struct of_device_id sysmmu_of_match[] __initconst = { 712static const struct of_device_id sysmmu_of_match[] = {
713 { .compatible = "samsung,exynos-sysmmu", }, 713 { .compatible = "samsung,exynos-sysmmu", },
714 { }, 714 { },
715}; 715};
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index eed6c397d840..f8a808d45034 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1797 */ 1797 */
1798 switch (msg->msg[1]) { 1798 switch (msg->msg[1]) {
1799 case CEC_MSG_GET_CEC_VERSION: 1799 case CEC_MSG_GET_CEC_VERSION:
1800 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1801 case CEC_MSG_ABORT: 1800 case CEC_MSG_ABORT:
1802 case CEC_MSG_GIVE_DEVICE_POWER_STATUS: 1801 case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
1803 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1804 case CEC_MSG_GIVE_OSD_NAME: 1802 case CEC_MSG_GIVE_OSD_NAME:
1803 /*
1804 * These messages reply with a directed message, so ignore if
1805 * the initiator is Unregistered.
1806 */
1807 if (!adap->passthrough && from_unregistered)
1808 return 0;
1809 /* Fall through */
1810 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1805 case CEC_MSG_GIVE_FEATURES: 1811 case CEC_MSG_GIVE_FEATURES:
1812 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1806 /* 1813 /*
1807 * Skip processing these messages if the passthrough mode 1814 * Skip processing these messages if the passthrough mode
1808 * is on. 1815 * is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1810 if (adap->passthrough) 1817 if (adap->passthrough)
1811 goto skip_processing; 1818 goto skip_processing;
1812 /* Ignore if addressing is wrong */ 1819 /* Ignore if addressing is wrong */
1813 if (is_broadcast || from_unregistered) 1820 if (is_broadcast)
1814 return 0; 1821 return 0;
1815 break; 1822 break;
1816 1823
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2fcba1616168..9139d01ba7ed 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
141static void dvb_frontend_invoke_release(struct dvb_frontend *fe, 141static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
142 void (*release)(struct dvb_frontend *fe)); 142 void (*release)(struct dvb_frontend *fe));
143 143
144static void dvb_frontend_free(struct kref *ref) 144static void __dvb_frontend_free(struct dvb_frontend *fe)
145{ 145{
146 struct dvb_frontend *fe =
147 container_of(ref, struct dvb_frontend, refcount);
148 struct dvb_frontend_private *fepriv = fe->frontend_priv; 146 struct dvb_frontend_private *fepriv = fe->frontend_priv;
149 147
148 if (!fepriv)
149 return;
150
150 dvb_free_device(fepriv->dvbdev); 151 dvb_free_device(fepriv->dvbdev);
151 152
152 dvb_frontend_invoke_release(fe, fe->ops.release); 153 dvb_frontend_invoke_release(fe, fe->ops.release);
153 154
154 kfree(fepriv); 155 kfree(fepriv);
156 fe->frontend_priv = NULL;
157}
158
159static void dvb_frontend_free(struct kref *ref)
160{
161 struct dvb_frontend *fe =
162 container_of(ref, struct dvb_frontend, refcount);
163
164 __dvb_frontend_free(fe);
155} 165}
156 166
157static void dvb_frontend_put(struct dvb_frontend *fe) 167static void dvb_frontend_put(struct dvb_frontend *fe)
158{ 168{
159 kref_put(&fe->refcount, dvb_frontend_free); 169 /*
170 * Check if the frontend was registered, as otherwise
171 * kref was not initialized yet.
172 */
173 if (fe->frontend_priv)
174 kref_put(&fe->refcount, dvb_frontend_free);
175 else
176 __dvb_frontend_free(fe);
160} 177}
161 178
162static void dvb_frontend_get(struct dvb_frontend *fe) 179static void dvb_frontend_get(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 224283fe100a..4d086a7248e9 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -55,29 +55,57 @@ struct dib3000mc_state {
55 55
56static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) 56static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
57{ 57{
58 u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
59 u8 rb[2];
60 struct i2c_msg msg[2] = { 58 struct i2c_msg msg[2] = {
61 { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 }, 59 { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 },
62 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, 60 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
63 }; 61 };
62 u16 word;
63 u8 *b;
64
65 b = kmalloc(4, GFP_KERNEL);
66 if (!b)
67 return 0;
68
69 b[0] = (reg >> 8) | 0x80;
70 b[1] = reg;
71 b[2] = 0;
72 b[3] = 0;
73
74 msg[0].buf = b;
75 msg[1].buf = b + 2;
64 76
65 if (i2c_transfer(state->i2c_adap, msg, 2) != 2) 77 if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
66 dprintk("i2c read error on %d\n",reg); 78 dprintk("i2c read error on %d\n",reg);
67 79
68 return (rb[0] << 8) | rb[1]; 80 word = (b[2] << 8) | b[3];
81 kfree(b);
82
83 return word;
69} 84}
70 85
71static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) 86static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
72{ 87{
73 u8 b[4] = {
74 (reg >> 8) & 0xff, reg & 0xff,
75 (val >> 8) & 0xff, val & 0xff,
76 };
77 struct i2c_msg msg = { 88 struct i2c_msg msg = {
78 .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4 89 .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
79 }; 90 };
80 return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; 91 int rc;
92 u8 *b;
93
94 b = kmalloc(4, GFP_KERNEL);
95 if (!b)
96 return -ENOMEM;
97
98 b[0] = reg >> 8;
99 b[1] = reg;
100 b[2] = val >> 8;
101 b[3] = val;
102
103 msg.buf = b;
104
105 rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
106 kfree(b);
107
108 return rc;
81} 109}
82 110
83static int dib3000mc_identify(struct dib3000mc_state *state) 111static int dib3000mc_identify(struct dib3000mc_state *state)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 7bec3e028bee..5553b89b804e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
753 struct i2c_adapter *i2c, 753 struct i2c_adapter *i2c,
754 unsigned int pll_desc_id) 754 unsigned int pll_desc_id)
755{ 755{
756 u8 b1 [] = { 0 }; 756 u8 *b1;
757 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, 757 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
758 .buf = b1, .len = 1 };
759 struct dvb_pll_priv *priv = NULL; 758 struct dvb_pll_priv *priv = NULL;
760 int ret; 759 int ret;
761 const struct dvb_pll_desc *desc; 760 const struct dvb_pll_desc *desc;
762 761
762 b1 = kmalloc(1, GFP_KERNEL);
763 if (!b1)
764 return NULL;
765
766 b1[0] = 0;
767 msg.buf = b1;
768
763 if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && 769 if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
764 (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) 770 (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
765 pll_desc_id = id[dvb_pll_devcount]; 771 pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
773 fe->ops.i2c_gate_ctrl(fe, 1); 779 fe->ops.i2c_gate_ctrl(fe, 1);
774 780
775 ret = i2c_transfer (i2c, &msg, 1); 781 ret = i2c_transfer (i2c, &msg, 1);
776 if (ret != 1) 782 if (ret != 1) {
783 kfree(b1);
777 return NULL; 784 return NULL;
785 }
778 if (fe->ops.i2c_gate_ctrl) 786 if (fe->ops.i2c_gate_ctrl)
779 fe->ops.i2c_gate_ctrl(fe, 0); 787 fe->ops.i2c_gate_ctrl(fe, 0);
780 } 788 }
781 789
782 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); 790 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
783 if (priv == NULL) 791 if (!priv) {
792 kfree(b1);
784 return NULL; 793 return NULL;
794 }
785 795
786 priv->pll_i2c_address = pll_addr; 796 priv->pll_i2c_address = pll_addr;
787 priv->i2c = i2c; 797 priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
811 "insmod option" : "autodetected"); 821 "insmod option" : "autodetected");
812 } 822 }
813 823
824 kfree(b1);
825
814 return fe; 826 return fe;
815} 827}
816EXPORT_SYMBOL(dvb_pll_attach); 828EXPORT_SYMBOL(dvb_pll_attach);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 7e7cc49b8674..3c4f7fa7b9d8 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
112 112
113config VIDEO_QCOM_CAMSS 113config VIDEO_QCOM_CAMSS
114 tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" 114 tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
115 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 115 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
116 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST 116 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
117 select VIDEOBUF2_DMA_SG 117 select VIDEOBUF2_DMA_SG
118 select V4L2_FWNODE 118 select V4L2_FWNODE
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b21b3c2dc77f..b22d2dfcd3c2 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
2660 * 2660 *
2661 * Return -EINVAL or zero on success 2661 * Return -EINVAL or zero on success
2662 */ 2662 */
2663int vfe_set_selection(struct v4l2_subdev *sd, 2663static int vfe_set_selection(struct v4l2_subdev *sd,
2664 struct v4l2_subdev_pad_config *cfg, 2664 struct v4l2_subdev_pad_config *cfg,
2665 struct v4l2_subdev_selection *sel) 2665 struct v4l2_subdev_selection *sel)
2666{ 2666{
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 68933d208063..9b2a401a4891 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
682 hfi_session_abort(inst); 682 hfi_session_abort(inst);
683 683
684 load_scale_clocks(core); 684 load_scale_clocks(core);
685 INIT_LIST_HEAD(&inst->registeredbufs);
685 } 686 }
686 687
687 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); 688 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
index 1edf667d562a..146ae6f25cdb 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
172{ 172{
173 u32 status = 0; 173 u32 status = 0;
174 174
175 status = readb(cec->reg + S5P_CEC_STATUS_0); 175 status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
176 status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
176 status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; 177 status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
177 status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; 178 status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
178 status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; 179 status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 58d200e7c838..8837e2678bde 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
92 dev_dbg(cec->dev, "irq received\n"); 92 dev_dbg(cec->dev, "irq received\n");
93 93
94 if (status & CEC_STATUS_TX_DONE) { 94 if (status & CEC_STATUS_TX_DONE) {
95 if (status & CEC_STATUS_TX_ERROR) { 95 if (status & CEC_STATUS_TX_NACK) {
96 dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
97 cec->tx = STATE_NACK;
98 } else if (status & CEC_STATUS_TX_ERROR) {
96 dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); 99 dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
97 cec->tx = STATE_ERROR; 100 cec->tx = STATE_ERROR;
98 } else { 101 } else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
135 cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); 138 cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
136 cec->tx = STATE_IDLE; 139 cec->tx = STATE_IDLE;
137 break; 140 break;
141 case STATE_NACK:
142 cec_transmit_done(cec->adap,
143 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
144 0, 1, 0, 0);
145 cec->tx = STATE_IDLE;
146 break;
138 case STATE_ERROR: 147 case STATE_ERROR:
139 cec_transmit_done(cec->adap, 148 cec_transmit_done(cec->adap,
140 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, 149 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
index 8bcd8dc1aeb9..86ded522ef27 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -35,6 +35,7 @@
35#define CEC_STATUS_TX_TRANSFERRING (1 << 1) 35#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
36#define CEC_STATUS_TX_DONE (1 << 2) 36#define CEC_STATUS_TX_DONE (1 << 2)
37#define CEC_STATUS_TX_ERROR (1 << 3) 37#define CEC_STATUS_TX_ERROR (1 << 3)
38#define CEC_STATUS_TX_NACK (1 << 4)
38#define CEC_STATUS_TX_BYTES (0xFF << 8) 39#define CEC_STATUS_TX_BYTES (0xFF << 8)
39#define CEC_STATUS_RX_RUNNING (1 << 16) 40#define CEC_STATUS_RX_RUNNING (1 << 16)
40#define CEC_STATUS_RX_RECEIVING (1 << 17) 41#define CEC_STATUS_RX_RECEIVING (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
55 STATE_IDLE, 56 STATE_IDLE,
56 STATE_BUSY, 57 STATE_BUSY,
57 STATE_DONE, 58 STATE_DONE,
59 STATE_NACK,
58 STATE_ERROR 60 STATE_ERROR
59}; 61};
60 62
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 2e487f9a2cc3..4983eeb39f36 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
38static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) 38static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
39{ 39{
40 struct i2c_msg msg[2] = { 40 struct i2c_msg msg[2] = {
41 { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, 41 { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
42 { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, 42 { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
43 }; 43 };
44 int rc = 0;
45 u8 *b;
46
47 b = kmalloc(2, GFP_KERNEL);
48 if (!b)
49 return -ENOMEM;
50
51 b[0] = reg;
52 b[1] = 0;
53
54 msg[0].buf = b;
55 msg[1].buf = b + 1;
44 56
45 if (i2c_transfer(priv->i2c, msg, 2) != 2) { 57 if (i2c_transfer(priv->i2c, msg, 2) != 2) {
46 printk(KERN_WARNING "mt2060 I2C read failed\n"); 58 printk(KERN_WARNING "mt2060 I2C read failed\n");
47 return -EREMOTEIO; 59 rc = -EREMOTEIO;
48 } 60 }
49 return 0; 61 *val = b[1];
62 kfree(b);
63
64 return rc;
50} 65}
51 66
52// Writes a single register 67// Writes a single register
53static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) 68static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
54{ 69{
55 u8 buf[2] = { reg, val };
56 struct i2c_msg msg = { 70 struct i2c_msg msg = {
57 .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 71 .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
58 }; 72 };
73 u8 *buf;
74 int rc = 0;
75
76 buf = kmalloc(2, GFP_KERNEL);
77 if (!buf)
78 return -ENOMEM;
79
80 buf[0] = reg;
81 buf[1] = val;
82
83 msg.buf = buf;
59 84
60 if (i2c_transfer(priv->i2c, &msg, 1) != 1) { 85 if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
61 printk(KERN_WARNING "mt2060 I2C write failed\n"); 86 printk(KERN_WARNING "mt2060 I2C write failed\n");
62 return -EREMOTEIO; 87 rc = -EREMOTEIO;
63 } 88 }
64 return 0; 89 kfree(buf);
90 return rc;
65} 91}
66 92
67// Writes a set of consecutive registers 93// Writes a set of consecutive registers
68static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) 94static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
69{ 95{
70 int rem, val_len; 96 int rem, val_len;
71 u8 xfer_buf[16]; 97 u8 *xfer_buf;
98 int rc = 0;
72 struct i2c_msg msg = { 99 struct i2c_msg msg = {
73 .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf 100 .addr = priv->cfg->i2c_address, .flags = 0
74 }; 101 };
75 102
103 xfer_buf = kmalloc(16, GFP_KERNEL);
104 if (!xfer_buf)
105 return -ENOMEM;
106
107 msg.buf = xfer_buf;
108
76 for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { 109 for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
77 val_len = min_t(int, rem, priv->i2c_max_regs); 110 val_len = min_t(int, rem, priv->i2c_max_regs);
78 msg.len = 1 + val_len; 111 msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
81 114
82 if (i2c_transfer(priv->i2c, &msg, 1) != 1) { 115 if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
83 printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); 116 printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
84 return -EREMOTEIO; 117 rc = -EREMOTEIO;
118 break;
85 } 119 }
86 } 120 }
87 121
88 return 0; 122 kfree(xfer_buf);
123 return rc;
89} 124}
90 125
91// Initialisation sequences 126// Initialisation sequences
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c8307e8b4c16..0ccccbaf530d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
129 129
130#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
131
130#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
131#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ 133#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
132 134
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4ff40d319676..78b3172c8e6e 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
95 95
96 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
97
96 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
97 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
98 100
@@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
226 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 228 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
227 229
228 /* 230 /*
229 * For not wake-able HW runtime pm framework 231 * ME maps runtime suspend/resume to D0i states,
230 * can't be used on pci device level. 232 * hence we need to go around native PCI runtime service which
231 * Use domain runtime pm callbacks instead. 233 * eventually brings the device into D3cold/hot state,
232 */ 234 * but the mei device cannot wake up from D3 unlike from D0i3.
233 if (!pci_dev_run_wake(pdev)) 235 * To get around the PCI device native runtime pm,
234 mei_me_set_pm_domain(dev); 236 * ME uses runtime pm domain handlers which take precedence
237 * over the driver's pm handlers.
238 */
239 mei_me_set_pm_domain(dev);
235 240
236 if (mei_pg_is_enabled(dev)) 241 if (mei_pg_is_enabled(dev))
237 pm_runtime_put_noidle(&pdev->dev); 242 pm_runtime_put_noidle(&pdev->dev);
@@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
271 dev_dbg(&pdev->dev, "shutdown\n"); 276 dev_dbg(&pdev->dev, "shutdown\n");
272 mei_stop(dev); 277 mei_stop(dev);
273 278
274 if (!pci_dev_run_wake(pdev)) 279 mei_me_unset_pm_domain(dev);
275 mei_me_unset_pm_domain(dev);
276 280
277 mei_disable_interrupts(dev); 281 mei_disable_interrupts(dev);
278 free_irq(pdev->irq, dev); 282 free_irq(pdev->irq, dev);
@@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev)
300 dev_dbg(&pdev->dev, "stop\n"); 304 dev_dbg(&pdev->dev, "stop\n");
301 mei_stop(dev); 305 mei_stop(dev);
302 306
303 if (!pci_dev_run_wake(pdev)) 307 mei_me_unset_pm_domain(dev);
304 mei_me_unset_pm_domain(dev);
305 308
306 mei_disable_interrupts(dev); 309 mei_disable_interrupts(dev);
307 310
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index e38a5f144373..0566f9bfa7de 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
145 145
146 /* 146 /*
147 * For not wake-able HW runtime pm framework 147 * TXE maps runtime suspend/resume to own power gating states,
148 * can't be used on pci device level. 148 * hence we need to go around native PCI runtime service which
149 * Use domain runtime pm callbacks instead. 149 * eventually brings the device into D3cold/hot state.
150 */ 150 * But the TXE device cannot wake up from D3 unlike from own
151 if (!pci_dev_run_wake(pdev)) 151 * power gating. To get around PCI device native runtime pm,
152 mei_txe_set_pm_domain(dev); 152 * TXE uses runtime pm domain handlers which take precedence.
153 */
154 mei_txe_set_pm_domain(dev);
153 155
154 pm_runtime_put_noidle(&pdev->dev); 156 pm_runtime_put_noidle(&pdev->dev);
155 157
@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
186 dev_dbg(&pdev->dev, "shutdown\n"); 188 dev_dbg(&pdev->dev, "shutdown\n");
187 mei_stop(dev); 189 mei_stop(dev);
188 190
189 if (!pci_dev_run_wake(pdev)) 191 mei_txe_unset_pm_domain(dev);
190 mei_txe_unset_pm_domain(dev);
191 192
192 mei_disable_interrupts(dev); 193 mei_disable_interrupts(dev);
193 free_irq(pdev->irq, dev); 194 free_irq(pdev->irq, dev);
@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
215 216
216 mei_stop(dev); 217 mei_stop(dev);
217 218
218 if (!pci_dev_run_wake(pdev)) 219 mei_txe_unset_pm_domain(dev);
219 mei_txe_unset_pm_domain(dev);
220 220
221 mei_disable_interrupts(dev); 221 mei_disable_interrupts(dev);
222 free_irq(pdev->irq, dev); 222 free_irq(pdev->irq, dev);
@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
318 else 318 else
319 ret = -EAGAIN; 319 ret = -EAGAIN;
320 320
321 /* 321 /* keep irq on we are staying in D0 */
322 * If everything is okay we're about to enter PCI low
323 * power state (D3) therefor we need to disable the
324 * interrupts towards host.
325 * However if device is not wakeable we do not enter
326 * D-low state and we need to keep the interrupt kicking
327 */
328 if (!ret && pci_dev_run_wake(pdev))
329 mei_disable_interrupts(dev);
330 322
331 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 323 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
332 324
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index d0ccc6729fd2..67d787fa3306 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
448 int err; 448 int err;
449 u32 val; 449 u32 val;
450 450
451 intel_host->d3_retune = true;
452
451 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 453 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
452 if (err) { 454 if (err) {
453 pr_debug("%s: DSM not supported, error %d\n", 455 pr_debug("%s: DSM not supported, error %d\n",
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 13f0f219d8aa..a13a4896a8bd 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -182,22 +182,23 @@
182/* FLEXCAN hardware feature flags 182/* FLEXCAN hardware feature flags
183 * 183 *
184 * Below is some version info we got: 184 * Below is some version info we got:
185 * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- 185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
186 * Filter? connected? detection ception in MB 186 * Filter? connected? Passive detection ception in MB
187 * MX25 FlexCAN2 03.00.00.00 no no no no 187 * MX25 FlexCAN2 03.00.00.00 no no ? no no
188 * MX28 FlexCAN2 03.00.04.00 yes yes no no 188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no
189 * MX35 FlexCAN2 03.00.00.00 no no no no 189 * MX35 FlexCAN2 03.00.00.00 no no ? no no
190 * MX53 FlexCAN2 03.00.00.00 yes no no no 190 * MX53 FlexCAN2 03.00.00.00 yes no no no no
191 * MX6s FlexCAN3 10.00.12.00 yes yes no yes 191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
192 * VF610 FlexCAN3 ? no yes yes yes? 192 * VF610 FlexCAN3 ? no yes ? yes yes?
193 * 193 *
194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
195 */ 195 */
196#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ 196#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ 197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ 198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ 199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ 200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
201#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
201 202
202/* Structure of the message buffer */ 203/* Structure of the message buffer */
203struct flexcan_mb { 204struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
281}; 282};
282 283
283static const struct flexcan_devtype_data fsl_p1010_devtype_data = { 284static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
284 .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, 285 .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
286 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
285}; 287};
286 288
287static const struct flexcan_devtype_data fsl_imx28_devtype_data; 289static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
291};
288 292
289static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { 293static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | 294 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
291 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, 295 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
292}; 296};
293 297
294static const struct flexcan_devtype_data fsl_vf610_devtype_data = { 298static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
335} 339}
336#endif 340#endif
337 341
342static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
343{
344 struct flexcan_regs __iomem *regs = priv->regs;
345 u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
346
347 flexcan_write(reg_ctrl, &regs->ctrl);
348}
349
350static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
351{
352 struct flexcan_regs __iomem *regs = priv->regs;
353 u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
354
355 flexcan_write(reg_ctrl, &regs->ctrl);
356}
357
338static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) 358static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
339{ 359{
340 if (!priv->reg_xceiver) 360 if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
713 struct flexcan_regs __iomem *regs = priv->regs; 733 struct flexcan_regs __iomem *regs = priv->regs;
714 irqreturn_t handled = IRQ_NONE; 734 irqreturn_t handled = IRQ_NONE;
715 u32 reg_iflag1, reg_esr; 735 u32 reg_iflag1, reg_esr;
736 enum can_state last_state = priv->can.state;
716 737
717 reg_iflag1 = flexcan_read(&regs->iflag1); 738 reg_iflag1 = flexcan_read(&regs->iflag1);
718 739
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
765 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr); 786 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
766 } 787 }
767 788
768 /* state change interrupt */ 789 /* state change interrupt or broken error state quirk fix is enabled */
769 if (reg_esr & FLEXCAN_ESR_ERR_STATE) 790 if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
791 (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
792 FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
770 flexcan_irq_state(dev, reg_esr); 793 flexcan_irq_state(dev, reg_esr);
771 794
772 /* bus error IRQ - handle if bus error reporting is activated */ 795 /* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
774 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 797 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
775 flexcan_irq_bus_err(dev, reg_esr); 798 flexcan_irq_bus_err(dev, reg_esr);
776 799
800 /* availability of error interrupt among state transitions in case
801 * bus error reporting is de-activated and
802 * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
803 * +--------------------------------------------------------------+
804 * | +----------------------------------------------+ [stopped / |
805 * | | | sleeping] -+
806 * +-+-> active <-> warning <-> passive -> bus off -+
807 * ___________^^^^^^^^^^^^_______________________________
808 * disabled(1) enabled disabled
809 *
810 * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
811 */
812 if ((last_state != priv->can.state) &&
813 (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
814 !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
815 switch (priv->can.state) {
816 case CAN_STATE_ERROR_ACTIVE:
817 if (priv->devtype_data->quirks &
818 FLEXCAN_QUIRK_BROKEN_WERR_STATE)
819 flexcan_error_irq_enable(priv);
820 else
821 flexcan_error_irq_disable(priv);
822 break;
823
824 case CAN_STATE_ERROR_WARNING:
825 flexcan_error_irq_enable(priv);
826 break;
827
828 case CAN_STATE_ERROR_PASSIVE:
829 case CAN_STATE_BUS_OFF:
830 flexcan_error_irq_disable(priv);
831 break;
832
833 default:
834 break;
835 }
836 }
837
777 return handled; 838 return handled;
778} 839}
779 840
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
887 * on most Flexcan cores, too. Otherwise we don't get 948 * on most Flexcan cores, too. Otherwise we don't get
888 * any error warning or passive interrupts. 949 * any error warning or passive interrupts.
889 */ 950 */
890 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || 951 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
891 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 952 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
892 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; 953 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
893 else 954 else
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce62d32..9fdb0f0bfa06 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
333 } 333 }
334 334
335 cf->can_id = id & ESD_IDMASK; 335 cf->can_id = id & ESD_IDMASK;
336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); 336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
337 337
338 if (id & ESD_EXTID) 338 if (id & ESD_EXTID)
339 cf->can_id |= CAN_EFF_FLAG; 339 cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index afcc1312dbaf..68ac3e88a8ce 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
375 375
376 gs_free_tx_context(txc); 376 gs_free_tx_context(txc);
377 377
378 atomic_dec(&dev->active_tx_urbs);
379
378 netif_wake_queue(netdev); 380 netif_wake_queue(netdev);
379 } 381 }
380 382
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
463 urb->transfer_buffer_length, 465 urb->transfer_buffer_length,
464 urb->transfer_buffer, 466 urb->transfer_buffer,
465 urb->transfer_dma); 467 urb->transfer_dma);
466
467 atomic_dec(&dev->active_tx_urbs);
468
469 if (!netif_device_present(netdev))
470 return;
471
472 if (netif_queue_stopped(netdev))
473 netif_wake_queue(netdev);
474} 468}
475 469
476static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, 470static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 0d97311a1b26..060cb18fa659 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -743,8 +743,8 @@ static void ena_get_channels(struct net_device *netdev,
743{ 743{
744 struct ena_adapter *adapter = netdev_priv(netdev); 744 struct ena_adapter *adapter = netdev_priv(netdev);
745 745
746 channels->max_rx = ENA_MAX_NUM_IO_QUEUES; 746 channels->max_rx = adapter->num_queues;
747 channels->max_tx = ENA_MAX_NUM_IO_QUEUES; 747 channels->max_tx = adapter->num_queues;
748 channels->max_other = 0; 748 channels->max_other = 0;
749 channels->max_combined = 0; 749 channels->max_combined = 0;
750 channels->rx_count = adapter->num_queues; 750 channels->rx_count = adapter->num_queues;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 47bdbf9bdefb..5417e4da64ca 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
966 u64_stats_update_begin(&rx_ring->syncp); 966 u64_stats_update_begin(&rx_ring->syncp);
967 rx_ring->rx_stats.bad_csum++; 967 rx_ring->rx_stats.bad_csum++;
968 u64_stats_update_end(&rx_ring->syncp); 968 u64_stats_update_end(&rx_ring->syncp);
969 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 969 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
970 "RX IPv4 header checksum error\n"); 970 "RX IPv4 header checksum error\n");
971 return; 971 return;
972 } 972 }
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
979 u64_stats_update_begin(&rx_ring->syncp); 979 u64_stats_update_begin(&rx_ring->syncp);
980 rx_ring->rx_stats.bad_csum++; 980 rx_ring->rx_stats.bad_csum++;
981 u64_stats_update_end(&rx_ring->syncp); 981 u64_stats_update_end(&rx_ring->syncp);
982 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 982 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
983 "RX L4 checksum error\n"); 983 "RX L4 checksum error\n");
984 skb->ip_summed = CHECKSUM_NONE; 984 skb->ip_summed = CHECKSUM_NONE;
985 return; 985 return;
@@ -3051,7 +3051,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3051 if (ena_dev->mem_bar) 3051 if (ena_dev->mem_bar)
3052 devm_iounmap(&pdev->dev, ena_dev->mem_bar); 3052 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3053 3053
3054 devm_iounmap(&pdev->dev, ena_dev->reg_bar); 3054 if (ena_dev->reg_bar)
3055 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3055 3056
3056 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3057 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3057 pci_release_selected_regions(pdev, release_bars); 3058 pci_release_selected_regions(pdev, release_bars);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0fdaaa643073..57e796870595 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -22,8 +22,12 @@
22 22
23#define AQ_CFG_FORCE_LEGACY_INT 0U 23#define AQ_CFG_FORCE_LEGACY_INT 0U
24 24
25#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U 25#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
26#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU 26#define AQ_CFG_INTERRUPT_MODERATION_ON 1
27#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
28
29#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
30
27#define AQ_CFG_IRQ_MASK 0x1FFU 31#define AQ_CFG_IRQ_MASK 0x1FFU
28 32
29#define AQ_CFG_VECS_MAX 8U 33#define AQ_CFG_VECS_MAX 8U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a761e91471df..d5e99b468870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
56 return aq_nic_set_link_ksettings(aq_nic, cmd); 56 return aq_nic_set_link_ksettings(aq_nic, cmd);
57} 57}
58 58
59/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
60static const unsigned int aq_ethtool_stat_queue_lines = 5U;
61static const unsigned int aq_ethtool_stat_queue_chars =
62 5U * ETH_GSTRING_LEN;
63static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { 59static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
64 "InPackets", 60 "InPackets",
65 "InUCast", 61 "InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
83 "InOctetsDma", 79 "InOctetsDma",
84 "OutOctetsDma", 80 "OutOctetsDma",
85 "InDroppedDma", 81 "InDroppedDma",
86 "Queue[0] InPackets", 82};
87 "Queue[0] OutPackets", 83
88 "Queue[0] InJumboPackets", 84static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
89 "Queue[0] InLroPackets", 85 "Queue[%d] InPackets",
90 "Queue[0] InErrors", 86 "Queue[%d] OutPackets",
91 "Queue[1] InPackets", 87 "Queue[%d] Restarts",
92 "Queue[1] OutPackets", 88 "Queue[%d] InJumboPackets",
93 "Queue[1] InJumboPackets", 89 "Queue[%d] InLroPackets",
94 "Queue[1] InLroPackets", 90 "Queue[%d] InErrors",
95 "Queue[1] InErrors",
96 "Queue[2] InPackets",
97 "Queue[2] OutPackets",
98 "Queue[2] InJumboPackets",
99 "Queue[2] InLroPackets",
100 "Queue[2] InErrors",
101 "Queue[3] InPackets",
102 "Queue[3] OutPackets",
103 "Queue[3] InJumboPackets",
104 "Queue[3] InLroPackets",
105 "Queue[3] InErrors",
106 "Queue[4] InPackets",
107 "Queue[4] OutPackets",
108 "Queue[4] InJumboPackets",
109 "Queue[4] InLroPackets",
110 "Queue[4] InErrors",
111 "Queue[5] InPackets",
112 "Queue[5] OutPackets",
113 "Queue[5] InJumboPackets",
114 "Queue[5] InLroPackets",
115 "Queue[5] InErrors",
116 "Queue[6] InPackets",
117 "Queue[6] OutPackets",
118 "Queue[6] InJumboPackets",
119 "Queue[6] InLroPackets",
120 "Queue[6] InErrors",
121 "Queue[7] InPackets",
122 "Queue[7] OutPackets",
123 "Queue[7] InJumboPackets",
124 "Queue[7] InLroPackets",
125 "Queue[7] InErrors",
126}; 91};
127 92
128static void aq_ethtool_stats(struct net_device *ndev, 93static void aq_ethtool_stats(struct net_device *ndev,
129 struct ethtool_stats *stats, u64 *data) 94 struct ethtool_stats *stats, u64 *data)
130{ 95{
131 struct aq_nic_s *aq_nic = netdev_priv(ndev); 96 struct aq_nic_s *aq_nic = netdev_priv(ndev);
97 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
132 98
133/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ 99 memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
134 BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); 100 ARRAY_SIZE(aq_ethtool_queue_stat_names) *
135 memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); 101 cfg->vecs) * sizeof(u64));
136 aq_nic_get_stats(aq_nic, data); 102 aq_nic_get_stats(aq_nic, data);
137} 103}
138 104
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
154 120
155 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", 121 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
156 sizeof(drvinfo->bus_info)); 122 sizeof(drvinfo->bus_info));
157 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - 123 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
158 (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; 124 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
159 drvinfo->testinfo_len = 0; 125 drvinfo->testinfo_len = 0;
160 drvinfo->regdump_len = regs_count; 126 drvinfo->regdump_len = regs_count;
161 drvinfo->eedump_len = 0; 127 drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
164static void aq_ethtool_get_strings(struct net_device *ndev, 130static void aq_ethtool_get_strings(struct net_device *ndev,
165 u32 stringset, u8 *data) 131 u32 stringset, u8 *data)
166{ 132{
133 int i, si;
167 struct aq_nic_s *aq_nic = netdev_priv(ndev); 134 struct aq_nic_s *aq_nic = netdev_priv(ndev);
168 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); 135 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
169 136 u8 *p = data;
170 if (stringset == ETH_SS_STATS) 137
171 memcpy(data, *aq_ethtool_stat_names, 138 if (stringset == ETH_SS_STATS) {
172 sizeof(aq_ethtool_stat_names) - 139 memcpy(p, *aq_ethtool_stat_names,
173 (AQ_CFG_VECS_MAX - cfg->vecs) * 140 sizeof(aq_ethtool_stat_names));
174 aq_ethtool_stat_queue_chars); 141 p = p + sizeof(aq_ethtool_stat_names);
142 for (i = 0; i < cfg->vecs; i++) {
143 for (si = 0;
144 si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
145 si++) {
146 snprintf(p, ETH_GSTRING_LEN,
147 aq_ethtool_queue_stat_names[si], i);
148 p += ETH_GSTRING_LEN;
149 }
150 }
151 }
175} 152}
176 153
177static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) 154static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
182 159
183 switch (stringset) { 160 switch (stringset) {
184 case ETH_SS_STATS: 161 case ETH_SS_STATS:
185 ret = ARRAY_SIZE(aq_ethtool_stat_names) - 162 ret = ARRAY_SIZE(aq_ethtool_stat_names) +
186 (AQ_CFG_VECS_MAX - cfg->vecs) * 163 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
187 aq_ethtool_stat_queue_lines;
188 break; 164 break;
189 default: 165 default:
190 ret = -EOPNOTSUPP; 166 ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
245 return err; 221 return err;
246} 222}
247 223
224int aq_ethtool_get_coalesce(struct net_device *ndev,
225 struct ethtool_coalesce *coal)
226{
227 struct aq_nic_s *aq_nic = netdev_priv(ndev);
228 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
229
230 if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
231 cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
232 coal->rx_coalesce_usecs = cfg->rx_itr;
233 coal->tx_coalesce_usecs = cfg->tx_itr;
234 coal->rx_max_coalesced_frames = 0;
235 coal->tx_max_coalesced_frames = 0;
236 } else {
237 coal->rx_coalesce_usecs = 0;
238 coal->tx_coalesce_usecs = 0;
239 coal->rx_max_coalesced_frames = 1;
240 coal->tx_max_coalesced_frames = 1;
241 }
242 return 0;
243}
244
245int aq_ethtool_set_coalesce(struct net_device *ndev,
246 struct ethtool_coalesce *coal)
247{
248 struct aq_nic_s *aq_nic = netdev_priv(ndev);
249 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
250
251 /* This is not yet supported
252 */
253 if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
254 return -EOPNOTSUPP;
255
256 /* Atlantic only supports timing based coalescing
257 */
258 if (coal->rx_max_coalesced_frames > 1 ||
259 coal->rx_coalesce_usecs_irq ||
260 coal->rx_max_coalesced_frames_irq)
261 return -EOPNOTSUPP;
262
263 if (coal->tx_max_coalesced_frames > 1 ||
264 coal->tx_coalesce_usecs_irq ||
265 coal->tx_max_coalesced_frames_irq)
266 return -EOPNOTSUPP;
267
268 /* We do not support frame counting. Check this
269 */
270 if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
271 return -EOPNOTSUPP;
272 if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
273 return -EOPNOTSUPP;
274
275 if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
276 coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
277 return -EINVAL;
278
279 cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
280
281 cfg->rx_itr = coal->rx_coalesce_usecs;
282 cfg->tx_itr = coal->tx_coalesce_usecs;
283
284 return aq_nic_update_interrupt_moderation_settings(aq_nic);
285}
286
248const struct ethtool_ops aq_ethtool_ops = { 287const struct ethtool_ops aq_ethtool_ops = {
249 .get_link = aq_ethtool_get_link, 288 .get_link = aq_ethtool_get_link,
250 .get_regs_len = aq_ethtool_get_regs_len, 289 .get_regs_len = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
259 .get_ethtool_stats = aq_ethtool_stats, 298 .get_ethtool_stats = aq_ethtool_stats,
260 .get_link_ksettings = aq_ethtool_get_link_ksettings, 299 .get_link_ksettings = aq_ethtool_get_link_ksettings,
261 .set_link_ksettings = aq_ethtool_set_link_ksettings, 300 .set_link_ksettings = aq_ethtool_set_link_ksettings,
301 .get_coalesce = aq_ethtool_get_coalesce,
302 .set_coalesce = aq_ethtool_set_coalesce,
262}; 303};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bf9b3f020e10..0207927dc8a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -151,8 +151,7 @@ struct aq_hw_ops {
151 [ETH_ALEN], 151 [ETH_ALEN],
152 u32 count); 152 u32 count);
153 153
154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, 154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
155 bool itr_enabled);
156 155
157 int (*hw_rss_set)(struct aq_hw_s *self, 156 int (*hw_rss_set)(struct aq_hw_s *self,
158 struct aq_rss_parameters *rss_params); 157 struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
163 int (*hw_get_regs)(struct aq_hw_s *self, 162 int (*hw_get_regs)(struct aq_hw_s *self,
164 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); 163 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
165 164
165 int (*hw_update_stats)(struct aq_hw_s *self);
166
166 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
167 unsigned int *p_count); 168 unsigned int *p_count);
168 169
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0a5bb4114eb4..483e97691eea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -16,6 +16,7 @@
16#include "aq_pci_func.h" 16#include "aq_pci_func.h"
17#include "aq_nic_internal.h" 17#include "aq_nic_internal.h"
18 18
19#include <linux/moduleparam.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
21#include <linux/timer.h> 22#include <linux/timer.h>
@@ -24,6 +25,18 @@
24#include <linux/tcp.h> 25#include <linux/tcp.h>
25#include <net/ip.h> 26#include <net/ip.h>
26 27
28static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
29module_param_named(aq_itr, aq_itr, uint, 0644);
30MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
31
32static unsigned int aq_itr_tx;
33module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
34MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
35
36static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39
27static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
28{ 41{
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
61 74
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 75 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
63 76
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; 77 cfg->itr = aq_itr;
65 cfg->itr = cfg->is_interrupt_moderation ? 78 cfg->tx_itr = aq_itr_tx;
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; 79 cfg->rx_itr = aq_itr_rx;
67 80
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 81 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 82 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
126 if (err) 139 if (err)
127 return err; 140 return err;
128 141
129 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) 142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
130 pr_info("%s: link change old %d new %d\n", 143 pr_info("%s: link change old %d new %d\n",
131 AQ_CFG_DRV_NAME, self->link_status.mbps, 144 AQ_CFG_DRV_NAME, self->link_status.mbps,
132 self->aq_hw->aq_link_status.mbps); 145 self->aq_hw->aq_link_status.mbps);
146 aq_nic_update_interrupt_moderation_settings(self);
147 }
133 148
134 self->link_status = self->aq_hw->aq_link_status; 149 self->link_status = self->aq_hw->aq_link_status;
135 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { 150 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
@@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param)
164 if (err) 179 if (err)
165 goto err_exit; 180 goto err_exit;
166 181
167 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 182 if (self->aq_hw_ops.hw_update_stats)
168 self->aq_nic_cfg.is_interrupt_moderation); 183 self->aq_hw_ops.hw_update_stats(self->aq_hw);
169 184
170 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
171 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
334 } 349 }
335 if (netif_running(ndev)) 350 if (netif_running(ndev))
336 netif_tx_disable(ndev); 351 netif_tx_disable(ndev);
352 netif_carrier_off(self->ndev);
337 353
338 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 354 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
339 self->aq_vecs++) { 355 self->aq_vecs++) {
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
421 if (err < 0) 437 if (err < 0)
422 goto err_exit; 438 goto err_exit;
423 439
424 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 440 err = aq_nic_update_interrupt_moderation_settings(self);
425 self->aq_nic_cfg.is_interrupt_moderation); 441 if (err)
426 if (err < 0)
427 goto err_exit; 442 goto err_exit;
428 setup_timer(&self->service_timer, &aq_nic_service_timer_cb, 443 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
429 (unsigned long)self); 444 (unsigned long)self);
@@ -645,6 +660,11 @@ err_exit:
645 return err; 660 return err;
646} 661}
647 662
663int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
664{
665 return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
666}
667
648int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 668int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
649{ 669{
650 int err = 0; 670 int err = 0;
@@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self)
899 unsigned int i = 0U; 919 unsigned int i = 0U;
900 920
901 netif_tx_disable(self->ndev); 921 netif_tx_disable(self->ndev);
922 netif_carrier_off(self->ndev);
902 923
903 del_timer_sync(&self->service_timer); 924 del_timer_sync(&self->service_timer);
904 925
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0ddd556ff901..4309983acdd6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
40 u32 vecs; /* vecs==allocated irqs */ 40 u32 vecs; /* vecs==allocated irqs */
41 u32 irq_type; 41 u32 irq_type;
42 u32 itr; 42 u32 itr;
43 u16 rx_itr;
44 u16 tx_itr;
43 u32 num_rss_queues; 45 u32 num_rss_queues;
44 u32 mtu; 46 u32 mtu;
45 u32 ucp_0x364; 47 u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
49 u16 is_mc_list_enabled; 51 u16 is_mc_list_enabled;
50 u16 mc_list_count; 52 u16 mc_list_count;
51 bool is_autoneg; 53 bool is_autoneg;
52 bool is_interrupt_moderation;
53 bool is_polling; 54 bool is_polling;
54 bool is_rss; 55 bool is_rss;
55 bool is_lro; 56 bool is_lro;
@@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
104struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); 105struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
105u32 aq_nic_get_fw_version(struct aq_nic_s *self); 106u32 aq_nic_get_fw_version(struct aq_nic_s *self);
106int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 107int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
108int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
107 109
108#endif /* AQ_NIC_H */ 110#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 4c6c882c6a1c..cadaa646c89f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
85 int err = 0; 85 int err = 0;
86 unsigned int bar = 0U; 86 unsigned int bar = 0U;
87 unsigned int port = 0U; 87 unsigned int port = 0U;
88 unsigned int numvecs = 0U;
88 89
89 err = pci_enable_device(self->pdev); 90 err = pci_enable_device(self->pdev);
90 if (err < 0) 91 if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
142 } 143 }
143 } 144 }
144 145
145 /*enable interrupts */ 146 numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
147 numvecs = min(numvecs, num_online_cpus());
148
149 /* enable interrupts */
146#if !AQ_CFG_FORCE_LEGACY_INT 150#if !AQ_CFG_FORCE_LEGACY_INT
147 err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, 151 err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
148 self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
149 152
150 if (err < 0) { 153 if (err < 0) {
151 err = pci_alloc_irq_vectors(self->pdev, 1, 1, 154 err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
153 if (err < 0) 156 if (err < 0)
154 goto err_exit; 157 goto err_exit;
155 } 158 }
156#endif 159#endif /* AQ_CFG_FORCE_LEGACY_INT */
157 160
158 /* net device init */ 161 /* net device init */
159 for (port = 0; port < self->ports; ++port) { 162 for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
265 aq_nic_ndev_free(self->port[port]); 268 aq_nic_ndev_free(self->port[port]);
266 } 269 }
267 270
271 if (self->mmio)
272 iounmap(self->mmio);
273
268 kfree(self); 274 kfree(self);
269 275
270err_exit:; 276err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 305ff8ffac2c..5fecc9a099ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
374 aq_vec_add_stats(self, &stats_rx, &stats_tx); 374 aq_vec_add_stats(self, &stats_rx, &stats_tx);
375 375
376 /* This data should mimic aq_ethtool_queue_stat_names structure
377 */
376 data[count] += stats_rx.packets; 378 data[count] += stats_rx.packets;
377 data[++count] += stats_tx.packets; 379 data[++count] += stats_tx.packets;
380 data[++count] += stats_tx.queue_restarts;
378 data[++count] += stats_rx.jumbo_packets; 381 data[++count] += stats_rx.jumbo_packets;
379 data[++count] += stats_rx.lro_packets; 382 data[++count] += stats_rx.lro_packets;
380 data[++count] += stats_rx.errors; 383 data[++count] += stats_rx.errors;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c5a02df7a48b..07b3c49a16a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,24 +765,23 @@ err_exit:
765 return err; 765 return err;
766} 766}
767 767
768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, 768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
769 bool itr_enabled)
770{ 769{
771 unsigned int i = 0U; 770 unsigned int i = 0U;
771 u32 itr_rx;
772 772
773 if (itr_enabled && self->aq_nic_cfg->itr) { 773 if (self->aq_nic_cfg->itr) {
774 if (self->aq_nic_cfg->itr != 0xFFFFU) { 774 if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
775 u32 itr_ = (self->aq_nic_cfg->itr >> 1); 775 u32 itr_ = (self->aq_nic_cfg->itr >> 1);
776 776
777 itr_ = min(AQ_CFG_IRQ_MASK, itr_); 777 itr_ = min(AQ_CFG_IRQ_MASK, itr_);
778 778
779 PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | 779 itr_rx = 0x80000000U | (itr_ << 0x10);
780 (itr_ << 0x10);
781 } else { 780 } else {
782 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); 781 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
783 782
784 if (n < self->aq_link_status.mbps) { 783 if (n < self->aq_link_status.mbps) {
785 PHAL_ATLANTIC_A0->itr_rx = 0U; 784 itr_rx = 0U;
786 } else { 785 } else {
787 static unsigned int hw_timers_tbl_[] = { 786 static unsigned int hw_timers_tbl_[] = {
788 0x01CU, /* 10Gbit */ 787 0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
797 hw_atl_utils_mbps_2_speed_index( 796 hw_atl_utils_mbps_2_speed_index(
798 self->aq_link_status.mbps); 797 self->aq_link_status.mbps);
799 798
800 PHAL_ATLANTIC_A0->itr_rx = 799 itr_rx = 0x80000000U |
801 0x80000000U |
802 (hw_timers_tbl_[speed_index] << 0x10U); 800 (hw_timers_tbl_[speed_index] << 0x10U);
803 } 801 }
804 802
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
806 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); 804 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
807 } 805 }
808 } else { 806 } else {
809 PHAL_ATLANTIC_A0->itr_rx = 0U; 807 itr_rx = 0U;
810 } 808 }
811 809
812 for (i = HW_ATL_A0_RINGS_MAX; i--;) 810 for (i = HW_ATL_A0_RINGS_MAX; i--;)
813 reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); 811 reg_irq_thr_set(self, itr_rx, i);
814 812
815 return aq_hw_err_from_flags(self); 813 return aq_hw_err_from_flags(self);
816} 814}
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
885 .hw_rss_set = hw_atl_a0_hw_rss_set, 883 .hw_rss_set = hw_atl_a0_hw_rss_set,
886 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, 884 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
887 .hw_get_regs = hw_atl_utils_hw_get_regs, 885 .hw_get_regs = hw_atl_utils_hw_get_regs,
886 .hw_update_stats = hw_atl_utils_update_stats,
888 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 887 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
889 .hw_get_fw_version = hw_atl_utils_get_fw_version, 888 .hw_get_fw_version = hw_atl_utils_get_fw_version,
890}; 889};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 21784cc39dab..ec68c20efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -788,39 +788,45 @@ err_exit:
788 return err; 788 return err;
789} 789}
790 790
791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, 791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
792 bool itr_enabled)
793{ 792{
794 unsigned int i = 0U; 793 unsigned int i = 0U;
794 u32 itr_tx = 2U;
795 u32 itr_rx = 2U;
795 796
796 if (itr_enabled && self->aq_nic_cfg->itr) { 797 switch (self->aq_nic_cfg->itr) {
798 case AQ_CFG_INTERRUPT_MODERATION_ON:
799 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
797 tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 800 tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
798 tdm_tdm_intr_moder_en_set(self, 1U); 801 tdm_tdm_intr_moder_en_set(self, 1U);
799 rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 802 rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
800 rdm_rdm_intr_moder_en_set(self, 1U); 803 rdm_rdm_intr_moder_en_set(self, 1U);
801 804
802 PHAL_ATLANTIC_B0->itr_tx = 2U; 805 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
803 PHAL_ATLANTIC_B0->itr_rx = 2U; 806 /* HW timers are in 2us units */
807 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
808 int tx_min_timer = tx_max_timer / 2;
804 809
805 if (self->aq_nic_cfg->itr != 0xFFFFU) { 810 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
806 unsigned int max_timer = self->aq_nic_cfg->itr / 2U; 811 int rx_min_timer = rx_max_timer / 2;
807 unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
808 812
809 max_timer = min(0x1FFU, max_timer); 813 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
810 min_timer = min(0xFFU, min_timer); 814 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
815 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
816 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
811 817
812 PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; 818 itr_tx |= tx_min_timer << 0x8U;
813 PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; 819 itr_tx |= tx_max_timer << 0x10U;
814 PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; 820 itr_rx |= rx_min_timer << 0x8U;
815 PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; 821 itr_rx |= rx_max_timer << 0x10U;
816 } else { 822 } else {
817 static unsigned int hw_atl_b0_timers_table_tx_[][2] = { 823 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
818 {0xffU, 0xffU}, /* 10Gbit */ 824 {0xfU, 0xffU}, /* 10Gbit */
819 {0xffU, 0x1ffU}, /* 5Gbit */ 825 {0xfU, 0x1ffU}, /* 5Gbit */
820 {0xffU, 0x1ffU}, /* 5Gbit 5GS */ 826 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
821 {0xffU, 0x1ffU}, /* 2.5Gbit */ 827 {0xfU, 0x1ffU}, /* 2.5Gbit */
822 {0xffU, 0x1ffU}, /* 1Gbit */ 828 {0xfU, 0x1ffU}, /* 1Gbit */
823 {0xffU, 0x1ffU}, /* 100Mbit */ 829 {0xfU, 0x1ffU}, /* 100Mbit */
824 }; 830 };
825 831
826 static unsigned int hw_atl_b0_timers_table_rx_[][2] = { 832 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
836 hw_atl_utils_mbps_2_speed_index( 842 hw_atl_utils_mbps_2_speed_index(
837 self->aq_link_status.mbps); 843 self->aq_link_status.mbps);
838 844
839 PHAL_ATLANTIC_B0->itr_tx |= 845 /* Update user visible ITR settings */
840 hw_atl_b0_timers_table_tx_[speed_index] 846 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
841 [0] << 0x8U; /* set min timer value */ 847 [speed_index][1] * 2;
842 PHAL_ATLANTIC_B0->itr_tx |= 848 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
843 hw_atl_b0_timers_table_tx_[speed_index] 849 [speed_index][1] * 2;
844 [1] << 0x10U; /* set max timer value */ 850
845 851 itr_tx |= hw_atl_b0_timers_table_tx_
846 PHAL_ATLANTIC_B0->itr_rx |= 852 [speed_index][0] << 0x8U;
847 hw_atl_b0_timers_table_rx_[speed_index] 853 itr_tx |= hw_atl_b0_timers_table_tx_
848 [0] << 0x8U; /* set min timer value */ 854 [speed_index][1] << 0x10U;
849 PHAL_ATLANTIC_B0->itr_rx |= 855
850 hw_atl_b0_timers_table_rx_[speed_index] 856 itr_rx |= hw_atl_b0_timers_table_rx_
851 [1] << 0x10U; /* set max timer value */ 857 [speed_index][0] << 0x8U;
858 itr_rx |= hw_atl_b0_timers_table_rx_
859 [speed_index][1] << 0x10U;
852 } 860 }
853 } else { 861 break;
862 case AQ_CFG_INTERRUPT_MODERATION_OFF:
854 tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 863 tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
855 tdm_tdm_intr_moder_en_set(self, 0U); 864 tdm_tdm_intr_moder_en_set(self, 0U);
856 rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 865 rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
857 rdm_rdm_intr_moder_en_set(self, 0U); 866 rdm_rdm_intr_moder_en_set(self, 0U);
858 PHAL_ATLANTIC_B0->itr_tx = 0U; 867 itr_tx = 0U;
859 PHAL_ATLANTIC_B0->itr_rx = 0U; 868 itr_rx = 0U;
869 break;
860 } 870 }
861 871
862 for (i = HW_ATL_B0_RINGS_MAX; i--;) { 872 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
863 reg_tx_intr_moder_ctrl_set(self, 873 reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
864 PHAL_ATLANTIC_B0->itr_tx, i); 874 reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
865 reg_rx_intr_moder_ctrl_set(self,
866 PHAL_ATLANTIC_B0->itr_rx, i);
867 } 875 }
868 876
869 return aq_hw_err_from_flags(self); 877 return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
939 .hw_rss_set = hw_atl_b0_hw_rss_set, 947 .hw_rss_set = hw_atl_b0_hw_rss_set,
940 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 948 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
941 .hw_get_regs = hw_atl_utils_hw_get_regs, 949 .hw_get_regs = hw_atl_utils_hw_get_regs,
950 .hw_update_stats = hw_atl_utils_update_stats,
942 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 951 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
943 .hw_get_fw_version = hw_atl_utils_get_fw_version, 952 .hw_get_fw_version = hw_atl_utils_get_fw_version,
944}; 953};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index fcf89e25a773..9aa2c6edfca2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -139,6 +139,9 @@
139 139
140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U 140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
141 141
142#define HW_ATL_INTR_MODER_MAX 0x1FF
143#define HW_ATL_INTR_MODER_MIN 0xFF
144
142/* Hardware tx descriptor */ 145/* Hardware tx descriptor */
143struct __packed hw_atl_txd_s { 146struct __packed hw_atl_txd_s {
144 u64 buf_addr; 147 u64 buf_addr;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index bf734b32e44b..1fe016fc4bc7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -255,6 +255,15 @@ err_exit:
255 return err; 255 return err;
256} 256}
257 257
258int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox_header *pmbox)
260{
261 return hw_atl_utils_fw_downld_dwords(self,
262 PHAL_ATLANTIC->mbox_addr,
263 (u32 *)(void *)pmbox,
264 sizeof(*pmbox) / sizeof(u32));
265}
266
258void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 267void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox *pmbox) 268 struct hw_aq_atl_utils_mbox *pmbox)
260{ 269{
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
267 if (err < 0) 276 if (err < 0)
268 goto err_exit; 277 goto err_exit;
269 278
270 if (pmbox != &PHAL_ATLANTIC->mbox)
271 memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
272
273 if (IS_CHIP_FEATURE(REVISION_A0)) { 279 if (IS_CHIP_FEATURE(REVISION_A0)) {
274 unsigned int mtu = self->aq_nic_cfg ? 280 unsigned int mtu = self->aq_nic_cfg ?
275 self->aq_nic_cfg->mtu : 1514U; 281 self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
299{ 305{
300 int err = 0; 306 int err = 0;
301 u32 transaction_id = 0; 307 u32 transaction_id = 0;
308 struct hw_aq_atl_utils_mbox_header mbox;
302 309
303 if (state == MPI_RESET) { 310 if (state == MPI_RESET) {
304 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); 311 hw_atl_utils_mpi_read_mbox(self, &mbox);
305 312
306 transaction_id = PHAL_ATLANTIC->mbox.transaction_id; 313 transaction_id = mbox.transaction_id;
307 314
308 AQ_HW_WAIT_FOR(transaction_id != 315 AQ_HW_WAIT_FOR(transaction_id !=
309 (hw_atl_utils_mpi_read_stats 316 (hw_atl_utils_mpi_read_mbox(self, &mbox),
310 (self, &PHAL_ATLANTIC->mbox), 317 mbox.transaction_id),
311 PHAL_ATLANTIC->mbox.transaction_id), 318 1000U, 100U);
312 1000U, 100U);
313 if (err < 0) 319 if (err < 0)
314 goto err_exit; 320 goto err_exit;
315 } 321 }
@@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
492 return 0; 498 return 0;
493} 499}
494 500
501int hw_atl_utils_update_stats(struct aq_hw_s *self)
502{
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox;
505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox);
510
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_)
513
514 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt);
518
519 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr);
523
524 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc);
531
532#undef AQ_SDELTA
533
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535
536 return 0;
537}
538
495int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
496 u64 *data, unsigned int *p_count) 540 u64 *data, unsigned int *p_count)
497{ 541{
498 struct hw_atl_stats_s *stats = NULL; 542 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
499 int i = 0; 544 int i = 0;
500 545
501 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
502
503 stats = &PHAL_ATLANTIC->mbox.stats;
504
505 data[i] = stats->uprc + stats->mprc + stats->bprc; 546 data[i] = stats->uprc + stats->mprc + stats->bprc;
506 data[++i] = stats->uprc; 547 data[++i] = stats->uprc;
507 data[++i] = stats->mprc; 548 data[++i] = stats->mprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index e0360a6b2202..c99cc690e425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
115 }; 115 };
116}; 116};
117 117
118struct __packed hw_aq_atl_utils_mbox { 118struct __packed hw_aq_atl_utils_mbox_header {
119 u32 version; 119 u32 version;
120 u32 transaction_id; 120 u32 transaction_id;
121 int error; 121 u32 error;
122};
123
124struct __packed hw_aq_atl_utils_mbox {
125 struct hw_aq_atl_utils_mbox_header header;
122 struct hw_atl_stats_s stats; 126 struct hw_atl_stats_s stats;
123}; 127};
124 128
125struct __packed hw_atl_s { 129struct __packed hw_atl_s {
126 struct aq_hw_s base; 130 struct aq_hw_s base;
127 struct hw_aq_atl_utils_mbox mbox; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats;
128 u64 speed; 133 u64 speed;
129 u32 itr_tx;
130 u32 itr_rx;
131 unsigned int chip_features; 134 unsigned int chip_features;
132 u32 fw_ver_actual; 135 u32 fw_ver_actual;
133 atomic_t dpc; 136 atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
170 173
171void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); 174void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
172 175
176int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
177 struct hw_aq_atl_utils_mbox_header *pmbox);
178
173void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 179void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
174 struct hw_aq_atl_utils_mbox *pmbox); 180 struct hw_aq_atl_utils_mbox *pmbox);
175 181
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
199 205
200int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); 206int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
201 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209
202int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
203 u64 *data, 211 u64 *data,
204 unsigned int *p_count); 212 unsigned int *p_count);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b31bdec26fce..24d55724ceff 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -215,6 +215,8 @@ static const u16 bnxt_async_events_arr[] = {
215 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 215 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
216}; 216};
217 217
218static struct workqueue_struct *bnxt_pf_wq;
219
218static bool bnxt_vf_pciid(enum board_idx idx) 220static bool bnxt_vf_pciid(enum board_idx idx)
219{ 221{
220 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 222 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1025,12 +1027,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1025 return 0; 1027 return 0;
1026} 1028}
1027 1029
1030static void bnxt_queue_sp_work(struct bnxt *bp)
1031{
1032 if (BNXT_PF(bp))
1033 queue_work(bnxt_pf_wq, &bp->sp_task);
1034 else
1035 schedule_work(&bp->sp_task);
1036}
1037
1038static void bnxt_cancel_sp_work(struct bnxt *bp)
1039{
1040 if (BNXT_PF(bp))
1041 flush_workqueue(bnxt_pf_wq);
1042 else
1043 cancel_work_sync(&bp->sp_task);
1044}
1045
1028static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1046static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1029{ 1047{
1030 if (!rxr->bnapi->in_reset) { 1048 if (!rxr->bnapi->in_reset) {
1031 rxr->bnapi->in_reset = true; 1049 rxr->bnapi->in_reset = true;
1032 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1050 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1033 schedule_work(&bp->sp_task); 1051 bnxt_queue_sp_work(bp);
1034 } 1052 }
1035 rxr->rx_next_cons = 0xffff; 1053 rxr->rx_next_cons = 0xffff;
1036} 1054}
@@ -1718,7 +1736,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
1718 default: 1736 default:
1719 goto async_event_process_exit; 1737 goto async_event_process_exit;
1720 } 1738 }
1721 schedule_work(&bp->sp_task); 1739 bnxt_queue_sp_work(bp);
1722async_event_process_exit: 1740async_event_process_exit:
1723 bnxt_ulp_async_events(bp, cmpl); 1741 bnxt_ulp_async_events(bp, cmpl);
1724 return 0; 1742 return 0;
@@ -1752,7 +1770,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1752 1770
1753 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1771 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1754 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1772 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1755 schedule_work(&bp->sp_task); 1773 bnxt_queue_sp_work(bp);
1756 break; 1774 break;
1757 1775
1758 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1776 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3449,6 +3467,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3449 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3467 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3450} 3468}
3451 3469
3470int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3471 int timeout)
3472{
3473 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3474}
3475
3452int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3476int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3453{ 3477{
3454 int rc; 3478 int rc;
@@ -6328,7 +6352,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6328 } 6352 }
6329 6353
6330 if (link_re_init) { 6354 if (link_re_init) {
6355 mutex_lock(&bp->link_lock);
6331 rc = bnxt_update_phy_setting(bp); 6356 rc = bnxt_update_phy_setting(bp);
6357 mutex_unlock(&bp->link_lock);
6332 if (rc) 6358 if (rc)
6333 netdev_warn(bp->dev, "failed to update phy settings\n"); 6359 netdev_warn(bp->dev, "failed to update phy settings\n");
6334 } 6360 }
@@ -6648,7 +6674,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
6648 vnic->rx_mask = mask; 6674 vnic->rx_mask = mask;
6649 6675
6650 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6676 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6651 schedule_work(&bp->sp_task); 6677 bnxt_queue_sp_work(bp);
6652 } 6678 }
6653} 6679}
6654 6680
@@ -6921,7 +6947,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
6921 6947
6922 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6948 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6923 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6949 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6924 schedule_work(&bp->sp_task); 6950 bnxt_queue_sp_work(bp);
6925} 6951}
6926 6952
6927#ifdef CONFIG_NET_POLL_CONTROLLER 6953#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6953,7 +6979,7 @@ static void bnxt_timer(unsigned long data)
6953 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6979 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6954 bp->stats_coal_ticks) { 6980 bp->stats_coal_ticks) {
6955 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6981 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6956 schedule_work(&bp->sp_task); 6982 bnxt_queue_sp_work(bp);
6957 } 6983 }
6958bnxt_restart_timer: 6984bnxt_restart_timer:
6959 mod_timer(&bp->timer, jiffies + bp->current_interval); 6985 mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7026,30 +7052,28 @@ static void bnxt_sp_task(struct work_struct *work)
7026 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7052 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7027 bnxt_hwrm_port_qstats(bp); 7053 bnxt_hwrm_port_qstats(bp);
7028 7054
7029 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7030 * must be the last functions to be called before exiting.
7031 */
7032 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7055 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7033 int rc = 0; 7056 int rc;
7034 7057
7058 mutex_lock(&bp->link_lock);
7035 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7059 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7036 &bp->sp_event)) 7060 &bp->sp_event))
7037 bnxt_hwrm_phy_qcaps(bp); 7061 bnxt_hwrm_phy_qcaps(bp);
7038 7062
7039 bnxt_rtnl_lock_sp(bp); 7063 rc = bnxt_update_link(bp, true);
7040 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7064 mutex_unlock(&bp->link_lock);
7041 rc = bnxt_update_link(bp, true);
7042 bnxt_rtnl_unlock_sp(bp);
7043 if (rc) 7065 if (rc)
7044 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7066 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7045 rc); 7067 rc);
7046 } 7068 }
7047 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7069 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7048 bnxt_rtnl_lock_sp(bp); 7070 mutex_lock(&bp->link_lock);
7049 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7071 bnxt_get_port_module_status(bp);
7050 bnxt_get_port_module_status(bp); 7072 mutex_unlock(&bp->link_lock);
7051 bnxt_rtnl_unlock_sp(bp);
7052 } 7073 }
7074 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7075 * must be the last functions to be called before exiting.
7076 */
7053 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7077 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7054 bnxt_reset(bp, false); 7078 bnxt_reset(bp, false);
7055 7079
@@ -7457,7 +7481,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7457 spin_unlock_bh(&bp->ntp_fltr_lock); 7481 spin_unlock_bh(&bp->ntp_fltr_lock);
7458 7482
7459 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7483 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7460 schedule_work(&bp->sp_task); 7484 bnxt_queue_sp_work(bp);
7461 7485
7462 return new_fltr->sw_id; 7486 return new_fltr->sw_id;
7463 7487
@@ -7540,7 +7564,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7540 if (bp->vxlan_port_cnt == 1) { 7564 if (bp->vxlan_port_cnt == 1) {
7541 bp->vxlan_port = ti->port; 7565 bp->vxlan_port = ti->port;
7542 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7566 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7543 schedule_work(&bp->sp_task); 7567 bnxt_queue_sp_work(bp);
7544 } 7568 }
7545 break; 7569 break;
7546 case UDP_TUNNEL_TYPE_GENEVE: 7570 case UDP_TUNNEL_TYPE_GENEVE:
@@ -7557,7 +7581,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7557 return; 7581 return;
7558 } 7582 }
7559 7583
7560 schedule_work(&bp->sp_task); 7584 bnxt_queue_sp_work(bp);
7561} 7585}
7562 7586
7563static void bnxt_udp_tunnel_del(struct net_device *dev, 7587static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7596,7 +7620,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
7596 return; 7620 return;
7597 } 7621 }
7598 7622
7599 schedule_work(&bp->sp_task); 7623 bnxt_queue_sp_work(bp);
7600} 7624}
7601 7625
7602static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7626static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7744,7 +7768,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
7744 pci_disable_pcie_error_reporting(pdev); 7768 pci_disable_pcie_error_reporting(pdev);
7745 unregister_netdev(dev); 7769 unregister_netdev(dev);
7746 bnxt_shutdown_tc(bp); 7770 bnxt_shutdown_tc(bp);
7747 cancel_work_sync(&bp->sp_task); 7771 bnxt_cancel_sp_work(bp);
7748 bp->sp_event = 0; 7772 bp->sp_event = 0;
7749 7773
7750 bnxt_clear_int_mode(bp); 7774 bnxt_clear_int_mode(bp);
@@ -7772,6 +7796,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
7772 rc); 7796 rc);
7773 return rc; 7797 return rc;
7774 } 7798 }
7799 mutex_init(&bp->link_lock);
7775 7800
7776 rc = bnxt_update_link(bp, false); 7801 rc = bnxt_update_link(bp, false);
7777 if (rc) { 7802 if (rc) {
@@ -7970,7 +7995,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7970 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7995 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7971 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7996 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7972 7997
7973 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7998 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
7974 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7999 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7975 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 8000 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7976 else 8001 else
@@ -8162,8 +8187,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8162 else 8187 else
8163 device_set_wakeup_capable(&pdev->dev, false); 8188 device_set_wakeup_capable(&pdev->dev, false);
8164 8189
8165 if (BNXT_PF(bp)) 8190 if (BNXT_PF(bp)) {
8191 if (!bnxt_pf_wq) {
8192 bnxt_pf_wq =
8193 create_singlethread_workqueue("bnxt_pf_wq");
8194 if (!bnxt_pf_wq) {
8195 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8196 goto init_err_pci_clean;
8197 }
8198 }
8166 bnxt_init_tc(bp); 8199 bnxt_init_tc(bp);
8200 }
8167 8201
8168 rc = register_netdev(dev); 8202 rc = register_netdev(dev);
8169 if (rc) 8203 if (rc)
@@ -8399,4 +8433,17 @@ static struct pci_driver bnxt_pci_driver = {
8399#endif 8433#endif
8400}; 8434};
8401 8435
8402module_pci_driver(bnxt_pci_driver); 8436static int __init bnxt_init(void)
8437{
8438 return pci_register_driver(&bnxt_pci_driver);
8439}
8440
8441static void __exit bnxt_exit(void)
8442{
8443 pci_unregister_driver(&bnxt_pci_driver);
8444 if (bnxt_pf_wq)
8445 destroy_workqueue(bnxt_pf_wq);
8446}
8447
8448module_init(bnxt_init);
8449module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
1290 unsigned long *ntp_fltr_bmap; 1290 unsigned long *ntp_fltr_bmap;
1291 int ntp_fltr_count; 1291 int ntp_fltr_count;
1292 1292
1293 /* To protect link related settings during link changes and
1294 * ethtool settings changes.
1295 */
1296 struct mutex link_lock;
1293 struct bnxt_link_info link_info; 1297 struct bnxt_link_info link_info;
1294 struct ethtool_eee eee; 1298 struct ethtool_eee eee;
1295 u32 lpi_tmr_lo; 1299 u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
1358int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); 1362int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
1359void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); 1363void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
1360int _hwrm_send_message(struct bnxt *, void *, u32, int); 1364int _hwrm_send_message(struct bnxt *, void *, u32, int);
1365int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
1361int hwrm_send_message(struct bnxt *, void *, u32, int); 1366int hwrm_send_message(struct bnxt *, void *, u32, int);
1362int hwrm_send_message_silent(struct bnxt *, void *, u32, int); 1367int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1363int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 1368int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
50 50
51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
53 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 53
54 mutex_lock(&bp->hwrm_cmd_lock);
55 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
54 if (!rc) { 56 if (!rc) {
55 u8 *pri2cos = &resp->pri0_cos_queue_id; 57 u8 *pri2cos = &resp->pri0_cos_queue_id;
56 int i, j; 58 int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
66 } 68 }
67 } 69 }
68 } 70 }
71 mutex_unlock(&bp->hwrm_cmd_lock);
69 return rc; 72 return rc;
70} 73}
71 74
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
119 int rc, i; 122 int rc, i;
120 123
121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); 124 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 125
123 if (rc) 126 mutex_lock(&bp->hwrm_cmd_lock);
127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
128 if (rc) {
129 mutex_unlock(&bp->hwrm_cmd_lock);
124 return rc; 130 return rc;
131 }
125 132
126 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); 133 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
127 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { 134 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
143 } 150 }
144 } 151 }
145 } 152 }
153 mutex_unlock(&bp->hwrm_cmd_lock);
146 return 0; 154 return 0;
147} 155}
148 156
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
240 int rc; 248 int rc;
241 249
242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); 250 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
243 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 251
244 if (rc) 252 mutex_lock(&bp->hwrm_cmd_lock);
253 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
254 if (rc) {
255 mutex_unlock(&bp->hwrm_cmd_lock);
245 return rc; 256 return rc;
257 }
246 258
247 pri_mask = le32_to_cpu(resp->flags); 259 pri_mask = le32_to_cpu(resp->flags);
248 pfc->pfc_en = pri_mask; 260 pfc->pfc_en = pri_mask;
261 mutex_unlock(&bp->hwrm_cmd_lock);
249 return 0; 262 return 0;
250} 263}
251 264
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1052 u32 ethtool_speed; 1052 u32 ethtool_speed;
1053 1053
1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1055 mutex_lock(&bp->link_lock);
1055 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1056 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1056 1057
1057 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1058 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1099 base->port = PORT_FIBRE; 1100 base->port = PORT_FIBRE;
1100 } 1101 }
1101 base->phy_address = link_info->phy_addr; 1102 base->phy_address = link_info->phy_addr;
1103 mutex_unlock(&bp->link_lock);
1102 1104
1103 return 0; 1105 return 0;
1104} 1106}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1190 if (!BNXT_SINGLE_PF(bp)) 1192 if (!BNXT_SINGLE_PF(bp))
1191 return -EOPNOTSUPP; 1193 return -EOPNOTSUPP;
1192 1194
1195 mutex_lock(&bp->link_lock);
1193 if (base->autoneg == AUTONEG_ENABLE) { 1196 if (base->autoneg == AUTONEG_ENABLE) {
1194 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1197 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1195 advertising); 1198 advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1234 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1237 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1235 1238
1236set_setting_exit: 1239set_setting_exit:
1240 mutex_unlock(&bp->link_lock);
1237 return rc; 1241 return rc;
1238} 1242}
1239 1243
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1805 req.dir_ordinal = cpu_to_le16(ordinal); 1809 req.dir_ordinal = cpu_to_le16(ordinal);
1806 req.dir_ext = cpu_to_le16(ext); 1810 req.dir_ext = cpu_to_le16(ext);
1807 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 1811 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1808 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1812 mutex_lock(&bp->hwrm_cmd_lock);
1813 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1809 if (rc == 0) { 1814 if (rc == 0) {
1810 if (index) 1815 if (index)
1811 *index = le16_to_cpu(output->dir_idx); 1816 *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1814 if (data_length) 1819 if (data_length)
1815 *data_length = le32_to_cpu(output->dir_data_length); 1820 *data_length = le32_to_cpu(output->dir_data_length);
1816 } 1821 }
1822 mutex_unlock(&bp->hwrm_cmd_lock);
1817 return rc; 1823 return rc;
1818} 1824}
1819 1825
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
502 int rc = 0, vfs_supported; 502 int rc = 0, vfs_supported;
503 int min_rx_rings, min_tx_rings, min_rss_ctxs; 503 int min_rx_rings, min_tx_rings, min_rss_ctxs;
504 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 504 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
505 int avail_cp, avail_stat;
505 506
506 /* Check if we can enable requested num of vf's. At a mininum 507 /* Check if we can enable requested num of vf's. At a mininum
507 * we require 1 RX 1 TX rings for each VF. In this minimum conf 508 * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
509 */ 510 */
510 vfs_supported = *num_vfs; 511 vfs_supported = *num_vfs;
511 512
513 avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
514 avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
515 avail_cp = min_t(int, avail_cp, avail_stat);
516
512 while (vfs_supported) { 517 while (vfs_supported) {
513 min_rx_rings = vfs_supported; 518 min_rx_rings = vfs_supported;
514 min_tx_rings = vfs_supported; 519 min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
523 min_rx_rings) 528 min_rx_rings)
524 rx_ok = 1; 529 rx_ok = 1;
525 } 530 }
526 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) 531 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
532 avail_cp < min_rx_rings)
527 rx_ok = 0; 533 rx_ok = 0;
528 534
529 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) 535 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
536 avail_cp >= min_tx_rings)
530 tx_ok = 1; 537 tx_ok = 1;
531 538
532 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) 539 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 963803bc6633..eafae3eb4fed 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1847 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1847 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1849 1849
1850 ns = timespec_to_ns(ts); 1850 ns = timespec64_to_ns(ts);
1851 1851
1852 spin_lock_irqsave(&lio->ptp_lock, flags); 1852 spin_lock_irqsave(&lio->ptp_lock, flags);
1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b991703319f9..11eba8277132 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1110,11 +1110,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1110 * places them in a descriptor array, scrq_arr 1110 * places them in a descriptor array, scrq_arr
1111 */ 1111 */
1112 1112
1113static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1113static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1114 union sub_crq *scrq_arr) 1114 union sub_crq *scrq_arr)
1115{ 1115{
1116 union sub_crq hdr_desc; 1116 union sub_crq hdr_desc;
1117 int tmp_len = len; 1117 int tmp_len = len;
1118 int num_descs = 0;
1118 u8 *data, *cur; 1119 u8 *data, *cur;
1119 int tmp; 1120 int tmp;
1120 1121
@@ -1143,7 +1144,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1143 tmp_len -= tmp; 1144 tmp_len -= tmp;
1144 *scrq_arr = hdr_desc; 1145 *scrq_arr = hdr_desc;
1145 scrq_arr++; 1146 scrq_arr++;
1147 num_descs++;
1146 } 1148 }
1149
1150 return num_descs;
1147} 1151}
1148 1152
1149/** 1153/**
@@ -1161,16 +1165,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1161 int *num_entries, u8 hdr_field) 1165 int *num_entries, u8 hdr_field)
1162{ 1166{
1163 int hdr_len[3] = {0, 0, 0}; 1167 int hdr_len[3] = {0, 0, 0};
1164 int tot_len, len; 1168 int tot_len;
1165 u8 *hdr_data = txbuff->hdr_data; 1169 u8 *hdr_data = txbuff->hdr_data;
1166 1170
1167 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1171 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1168 txbuff->hdr_data); 1172 txbuff->hdr_data);
1169 len = tot_len; 1173 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1170 len -= 24;
1171 if (len > 0)
1172 num_entries += len % 29 ? len / 29 + 1 : len / 29;
1173 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1174 txbuff->indir_arr + 1); 1174 txbuff->indir_arr + 1);
1175} 1175}
1176 1176
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 151d9cfb6ea4..0ccab0a5d717 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
298} 298}
299 299
300/** 300/**
301 * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking 301 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
302 * @hw: pointer to the HW structure 302 * @hw: pointer to the HW structure
303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
304 * @data: word read from the Shadow RAM 304 * @data: word read from the Shadow RAM
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a23306f04e00..edbc94c4353d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1038,6 +1038,32 @@ reset_latency:
1038} 1038}
1039 1039
1040/** 1040/**
1041 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1042 * @rx_ring: rx descriptor ring to store buffers on
1043 * @old_buff: donor buffer to have page reused
1044 *
1045 * Synchronizes page for reuse by the adapter
1046 **/
1047static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1048 struct i40e_rx_buffer *old_buff)
1049{
1050 struct i40e_rx_buffer *new_buff;
1051 u16 nta = rx_ring->next_to_alloc;
1052
1053 new_buff = &rx_ring->rx_bi[nta];
1054
1055 /* update, and store next to alloc */
1056 nta++;
1057 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1058
1059 /* transfer page from old buffer to new buffer */
1060 new_buff->dma = old_buff->dma;
1061 new_buff->page = old_buff->page;
1062 new_buff->page_offset = old_buff->page_offset;
1063 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1064}
1065
1066/**
1041 * i40e_rx_is_programming_status - check for programming status descriptor 1067 * i40e_rx_is_programming_status - check for programming status descriptor
1042 * @qw: qword representing status_error_len in CPU ordering 1068 * @qw: qword representing status_error_len in CPU ordering
1043 * 1069 *
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1071 union i40e_rx_desc *rx_desc, 1097 union i40e_rx_desc *rx_desc,
1072 u64 qw) 1098 u64 qw)
1073{ 1099{
1074 u32 ntc = rx_ring->next_to_clean + 1; 1100 struct i40e_rx_buffer *rx_buffer;
1101 u32 ntc = rx_ring->next_to_clean;
1075 u8 id; 1102 u8 id;
1076 1103
1077 /* fetch, update, and store next to clean */ 1104 /* fetch, update, and store next to clean */
1105 rx_buffer = &rx_ring->rx_bi[ntc++];
1078 ntc = (ntc < rx_ring->count) ? ntc : 0; 1106 ntc = (ntc < rx_ring->count) ? ntc : 0;
1079 rx_ring->next_to_clean = ntc; 1107 rx_ring->next_to_clean = ntc;
1080 1108
1081 prefetch(I40E_RX_DESC(rx_ring, ntc)); 1109 prefetch(I40E_RX_DESC(rx_ring, ntc));
1082 1110
1111 /* place unused page back on the ring */
1112 i40e_reuse_rx_page(rx_ring, rx_buffer);
1113 rx_ring->rx_stats.page_reuse_count++;
1114
1115 /* clear contents of buffer_info */
1116 rx_buffer->page = NULL;
1117
1083 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1118 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1084 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1119 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1085 1120
@@ -1648,32 +1683,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1648} 1683}
1649 1684
1650/** 1685/**
1651 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1652 * @rx_ring: rx descriptor ring to store buffers on
1653 * @old_buff: donor buffer to have page reused
1654 *
1655 * Synchronizes page for reuse by the adapter
1656 **/
1657static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1658 struct i40e_rx_buffer *old_buff)
1659{
1660 struct i40e_rx_buffer *new_buff;
1661 u16 nta = rx_ring->next_to_alloc;
1662
1663 new_buff = &rx_ring->rx_bi[nta];
1664
1665 /* update, and store next to alloc */
1666 nta++;
1667 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1668
1669 /* transfer page from old buffer to new buffer */
1670 new_buff->dma = old_buff->dma;
1671 new_buff->page = old_buff->page;
1672 new_buff->page_offset = old_buff->page_offset;
1673 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1674}
1675
1676/**
1677 * i40e_page_is_reusable - check if any reuse is possible 1686 * i40e_page_is_reusable - check if any reuse is possible
1678 * @page: page struct to check 1687 * @page: page struct to check
1679 * 1688 *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9d5e7cf288be..f3315bc874ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -96,6 +96,7 @@ struct mlxsw_core {
96 const struct mlxsw_bus *bus; 96 const struct mlxsw_bus *bus;
97 void *bus_priv; 97 void *bus_priv;
98 const struct mlxsw_bus_info *bus_info; 98 const struct mlxsw_bus_info *bus_info;
99 struct workqueue_struct *emad_wq;
99 struct list_head rx_listener_list; 100 struct list_head rx_listener_list;
100 struct list_head event_listener_list; 101 struct list_head event_listener_list;
101 struct { 102 struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
465{ 466{
466 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 467 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
467 468
468 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); 469 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
469} 470}
470 471
471static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 472static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
587 588
588static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 589static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
589{ 590{
591 struct workqueue_struct *emad_wq;
590 u64 tid; 592 u64 tid;
591 int err; 593 int err;
592 594
593 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 595 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
594 return 0; 596 return 0;
595 597
598 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
599 if (!emad_wq)
600 return -ENOMEM;
601 mlxsw_core->emad_wq = emad_wq;
602
596 /* Set the upper 32 bits of the transaction ID field to a random 603 /* Set the upper 32 bits of the transaction ID field to a random
597 * number. This allows us to discard EMADs addressed to other 604 * number. This allows us to discard EMADs addressed to other
598 * devices. 605 * devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
619err_emad_trap_set: 626err_emad_trap_set:
620 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 627 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
621 mlxsw_core); 628 mlxsw_core);
629 destroy_workqueue(mlxsw_core->emad_wq);
622 return err; 630 return err;
623} 631}
624 632
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
631 mlxsw_core->emad.use_emad = false; 639 mlxsw_core->emad.use_emad = false;
632 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 640 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
633 mlxsw_core); 641 mlxsw_core);
642 destroy_workqueue(mlxsw_core->emad_wq);
634} 643}
635 644
636static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 645static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index d44e673a4c4e..a3f31f425550 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6778,6 +6778,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
6778 mlxsw_reg_mgpc_opcode_set(payload, opcode); 6778 mlxsw_reg_mgpc_opcode_set(payload, opcode);
6779} 6779}
6780 6780
6781/* TIGCR - Tunneling IPinIP General Configuration Register
6782 * -------------------------------------------------------
6783 * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
6784 */
6785#define MLXSW_REG_TIGCR_ID 0xA801
6786#define MLXSW_REG_TIGCR_LEN 0x10
6787
6788MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
6789
6790/* reg_tigcr_ipip_ttlc
6791 * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
6792 * header.
6793 * Access: RW
6794 */
6795MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
6796
6797/* reg_tigcr_ipip_ttl_uc
6798 * The TTL for IPinIP Tunnel encapsulation of unicast packets if
6799 * reg_tigcr_ipip_ttlc is unset.
6800 * Access: RW
6801 */
6802MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
6803
6804static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
6805{
6806 MLXSW_REG_ZERO(tigcr, payload);
6807 mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
6808 mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
6809}
6810
6781/* SBPR - Shared Buffer Pools Register 6811/* SBPR - Shared Buffer Pools Register
6782 * ----------------------------------- 6812 * -----------------------------------
6783 * The SBPR configures and retrieves the shared buffer pools and configuration. 6813 * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -7262,6 +7292,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
7262 MLXSW_REG(mcc), 7292 MLXSW_REG(mcc),
7263 MLXSW_REG(mcda), 7293 MLXSW_REG(mcda),
7264 MLXSW_REG(mgpc), 7294 MLXSW_REG(mgpc),
7295 MLXSW_REG(tigcr),
7265 MLXSW_REG(sbpr), 7296 MLXSW_REG(sbpr),
7266 MLXSW_REG(sbcm), 7297 MLXSW_REG(sbcm),
7267 MLXSW_REG(sbpm), 7298 MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 12d471d2a90b..5f2d100e3718 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6432,11 +6432,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6432 kfree(mlxsw_sp->router->rifs); 6432 kfree(mlxsw_sp->router->rifs);
6433} 6433}
6434 6434
6435static int
6436mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6437{
6438 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6439
6440 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6441 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6442}
6443
6435static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) 6444static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6436{ 6445{
6437 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; 6446 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
6438 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); 6447 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
6439 return 0; 6448 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
6440} 6449}
6441 6450
6442static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) 6451static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index d2f73feb8497..2c9109b09faf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1180{ 1180{
1181 void *frag; 1181 void *frag;
1182 1182
1183 if (!dp->xdp_prog) 1183 if (!dp->xdp_prog) {
1184 frag = netdev_alloc_frag(dp->fl_bufsz); 1184 frag = netdev_alloc_frag(dp->fl_bufsz);
1185 else 1185 } else {
1186 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); 1186 struct page *page;
1187
1188 page = alloc_page(GFP_KERNEL | __GFP_COLD);
1189 frag = page ? page_address(page) : NULL;
1190 }
1187 if (!frag) { 1191 if (!frag) {
1188 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1192 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1189 return NULL; 1193 return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1203{ 1207{
1204 void *frag; 1208 void *frag;
1205 1209
1206 if (!dp->xdp_prog) 1210 if (!dp->xdp_prog) {
1207 frag = napi_alloc_frag(dp->fl_bufsz); 1211 frag = napi_alloc_frag(dp->fl_bufsz);
1208 else 1212 } else {
1209 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); 1213 struct page *page;
1214
1215 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1216 frag = page ? page_address(page) : NULL;
1217 }
1210 if (!frag) { 1218 if (!frag) {
1211 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1219 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1212 return NULL; 1220 return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 07969f06df10..dc016dfec64d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
464 464
465 do { 465 do {
466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); 466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
467 *data++ = nn->r_vecs[i].rx_pkts; 467 data[0] = nn->r_vecs[i].rx_pkts;
468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; 468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; 469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error; 470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
472 472
473 do { 473 do {
474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); 474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
475 *data++ = nn->r_vecs[i].tx_pkts; 475 data[1] = nn->r_vecs[i].tx_pkts;
476 *data++ = nn->r_vecs[i].tx_busy; 476 data[2] = nn->r_vecs[i].tx_busy;
477 tmp[3] = nn->r_vecs[i].hw_csum_tx; 477 tmp[3] = nn->r_vecs[i].hw_csum_tx;
478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; 478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
479 tmp[5] = nn->r_vecs[i].tx_gather; 479 tmp[5] = nn->r_vecs[i].tx_gather;
480 tmp[6] = nn->r_vecs[i].tx_lso; 480 tmp[6] = nn->r_vecs[i].tx_lso;
481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); 481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
482 482
483 data += 3;
484
483 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) 485 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
484 gathered_stats[j] += tmp[j]; 486 gathered_stats[j] += tmp[j];
485 } 487 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e03fcf914690..a3c949ea7d1a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8491 rtl8168_driver_start(tp); 8491 rtl8168_driver_start(tp);
8492 } 8492 }
8493 8493
8494 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
8495
8496 if (pci_dev_run_wake(pdev)) 8494 if (pci_dev_run_wake(pdev))
8497 pm_runtime_put_noidle(&pdev->dev); 8495 pm_runtime_put_noidle(&pdev->dev);
8498 8496
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e0ef02f9503b..4b286e27c4ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
275 goto exit; 275 goto exit;
276 i++; 276 i++;
277 277
278 } while ((ret == 1) || (i < 10)); 278 } while ((ret == 1) && (i < 10));
279 279
280 if (i == 10) 280 if (i == 10)
281 ret = -EBUSY; 281 ret = -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 67af0bdd7f10..7516ca210855 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
34 34
35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, 35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
36 !(value & DMA_BUS_MODE_SFT_RESET), 36 !(value & DMA_BUS_MODE_SFT_RESET),
37 100000, 10000); 37 10000, 100000);
38 if (err) 38 if (err)
39 return -EBUSY; 39 return -EBUSY;
40 40
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e1b0a3d7b76..c7a894ead274 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 struct dma_desc *np, struct sk_buff *skb) 473 struct dma_desc *np, struct sk_buff *skb)
474{ 474{
475 struct skb_shared_hwtstamps *shhwtstamp = NULL; 475 struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 struct dma_desc *desc = p;
476 u64 ns; 477 u64 ns;
477 478
478 if (!priv->hwts_rx_en) 479 if (!priv->hwts_rx_en)
479 return; 480 return;
481 /* For GMAC4, the valid timestamp is from CTX next desc. */
482 if (priv->plat->has_gmac4)
483 desc = np;
480 484
481 /* Check if timestamp is available */ 485 /* Check if timestamp is available */
482 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 486 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
483 /* For GMAC4, the valid timestamp is from CTX next desc. */ 487 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
484 if (priv->plat->has_gmac4)
485 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 else
487 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488
489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 488 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 shhwtstamp = skb_hwtstamps(skb); 489 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 490 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1815,12 +1814,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1815{ 1814{
1816 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1815 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1817 unsigned int bytes_compl = 0, pkts_compl = 0; 1816 unsigned int bytes_compl = 0, pkts_compl = 0;
1818 unsigned int entry = tx_q->dirty_tx; 1817 unsigned int entry;
1819 1818
1820 netif_tx_lock(priv->dev); 1819 netif_tx_lock(priv->dev);
1821 1820
1822 priv->xstats.tx_clean++; 1821 priv->xstats.tx_clean++;
1823 1822
1823 entry = tx_q->dirty_tx;
1824 while (entry != tx_q->cur_tx) { 1824 while (entry != tx_q->cur_tx) {
1825 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1825 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1826 struct dma_desc *p; 1826 struct dma_desc *p;
@@ -3358,6 +3358,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3358 * them in stmmac_rx_refill() function so that 3358 * them in stmmac_rx_refill() function so that
3359 * device can reuse it. 3359 * device can reuse it.
3360 */ 3360 */
3361 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3361 rx_q->rx_skbuff[entry] = NULL; 3362 rx_q->rx_skbuff[entry] = NULL;
3362 dma_unmap_single(priv->device, 3363 dma_unmap_single(priv->device,
3363 rx_q->rx_skbuff_dma[entry], 3364 rx_q->rx_skbuff_dma[entry],
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 01f7355ad277..5ec39f113127 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
113 113
114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) 114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
115{ 115{
116#ifdef __BIG_ENDIAN
117 return (vni[0] == tun_id[2]) &&
118 (vni[1] == tun_id[1]) &&
119 (vni[2] == tun_id[0]);
120#else
121 return !memcmp(vni, &tun_id[5], 3); 116 return !memcmp(vni, &tun_id[5], 3);
122#endif
123} 117}
124 118
125static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) 119static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 8948b6adc0c5..2c98152d1e1b 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -743,6 +743,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
743 sg_init_table(sg, ret); 743 sg_init_table(sg, ret);
744 ret = skb_to_sgvec(skb, sg, 0, skb->len); 744 ret = skb_to_sgvec(skb, sg, 0, skb->len);
745 if (unlikely(ret < 0)) { 745 if (unlikely(ret < 0)) {
746 aead_request_free(req);
746 macsec_txsa_put(tx_sa); 747 macsec_txsa_put(tx_sa);
747 kfree_skb(skb); 748 kfree_skb(skb);
748 return ERR_PTR(ret); 749 return ERR_PTR(ret);
@@ -955,6 +956,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
955 sg_init_table(sg, ret); 956 sg_init_table(sg, ret);
956 ret = skb_to_sgvec(skb, sg, 0, skb->len); 957 ret = skb_to_sgvec(skb, sg, 0, skb->len);
957 if (unlikely(ret < 0)) { 958 if (unlikely(ret < 0)) {
959 aead_request_free(req);
958 kfree_skb(skb); 960 kfree_skb(skb);
959 return ERR_PTR(ret); 961 return ERR_PTR(ret);
960 } 962 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2a2d058cdd40..ea29da91ea5a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2252,6 +2252,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2252 2252
2253 if (!dev) 2253 if (!dev)
2254 return -ENOMEM; 2254 return -ENOMEM;
2255 err = dev_get_valid_name(net, dev, name);
2256 if (err)
2257 goto err_free_dev;
2255 2258
2256 dev_net_set(dev, net); 2259 dev_net_set(dev, net);
2257 dev->rtnl_link_ops = &tun_link_ops; 2260 dev->rtnl_link_ops = &tun_link_ops;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..a89b5685e68b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
652 struct device *dev = i2400m_dev(i2400m); 652 struct device *dev = i2400m_dev(i2400m);
653 struct { 653 struct {
654 struct i2400m_bootrom_header cmd; 654 struct i2400m_bootrom_header cmd;
655 u8 cmd_payload[chunk_len]; 655 u8 cmd_payload[];
656 } __packed *buf; 656 } __packed *buf;
657 struct i2400m_bootrom_header ack; 657 struct i2400m_bootrom_header ack;
658 658
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 5cbe0ae55a07..d6dff347f896 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
486 486
487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
488 488
489 dev->min_mtu = 0; 489 dev->min_mtu = ETH_MIN_MTU;
490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; 490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
491 491
492 /* 492 /*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 523387e71a80..8b8689c6d887 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1316 netdev->features |= netdev->hw_features; 1316 netdev->features |= netdev->hw_features;
1317 1317
1318 netdev->ethtool_ops = &xennet_ethtool_ops; 1318 netdev->ethtool_ops = &xennet_ethtool_ops;
1319 netdev->min_mtu = 0; 1319 netdev->min_mtu = ETH_MIN_MTU;
1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1321 SET_NETDEV_DEV(netdev, &dev->dev); 1321 SET_NETDEV_DEV(netdev, &dev->dev);
1322 1322
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 260d33c0f26c..63897531cd75 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
1781{ 1781{
1782 if (!dn || dn != of_stdout || console_set_on_cmdline) 1782 if (!dn || dn != of_stdout || console_set_on_cmdline)
1783 return false; 1783 return false;
1784 return !add_preferred_console(name, index, 1784
1785 kstrdup(of_stdout_options, GFP_KERNEL)); 1785 /*
1786 * XXX: cast `options' to char pointer to suppress complication
1787 * warnings: printk, UART and console drivers expect char pointer.
1788 */
1789 return !add_preferred_console(name, index, (char *)of_stdout_options);
1786} 1790}
1787EXPORT_SYMBOL_GPL(of_console_check); 1791EXPORT_SYMBOL_GPL(of_console_check);
1788 1792
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d94dd8b77abd..98258583abb0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
44 return -EINVAL; 44 return -EINVAL;
45} 45}
46 46
47static void of_mdiobus_register_phy(struct mii_bus *mdio, 47static int of_mdiobus_register_phy(struct mii_bus *mdio,
48 struct device_node *child, u32 addr) 48 struct device_node *child, u32 addr)
49{ 49{
50 struct phy_device *phy; 50 struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
60 else 60 else
61 phy = get_phy_device(mdio, addr, is_c45); 61 phy = get_phy_device(mdio, addr, is_c45);
62 if (IS_ERR(phy)) 62 if (IS_ERR(phy))
63 return; 63 return PTR_ERR(phy);
64 64
65 rc = irq_of_parse_and_map(child, 0); 65 rc = of_irq_get(child, 0);
66 if (rc == -EPROBE_DEFER) {
67 phy_device_free(phy);
68 return rc;
69 }
66 if (rc > 0) { 70 if (rc > 0) {
67 phy->irq = rc; 71 phy->irq = rc;
68 mdio->irq[addr] = rc; 72 mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
84 if (rc) { 88 if (rc) {
85 phy_device_free(phy); 89 phy_device_free(phy);
86 of_node_put(child); 90 of_node_put(child);
87 return; 91 return rc;
88 } 92 }
89 93
90 dev_dbg(&mdio->dev, "registered phy %s at address %i\n", 94 dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
91 child->name, addr); 95 child->name, addr);
96 return 0;
92} 97}
93 98
94static void of_mdiobus_register_device(struct mii_bus *mdio, 99static int of_mdiobus_register_device(struct mii_bus *mdio,
95 struct device_node *child, u32 addr) 100 struct device_node *child, u32 addr)
96{ 101{
97 struct mdio_device *mdiodev; 102 struct mdio_device *mdiodev;
98 int rc; 103 int rc;
99 104
100 mdiodev = mdio_device_create(mdio, addr); 105 mdiodev = mdio_device_create(mdio, addr);
101 if (IS_ERR(mdiodev)) 106 if (IS_ERR(mdiodev))
102 return; 107 return PTR_ERR(mdiodev);
103 108
104 /* Associate the OF node with the device structure so it 109 /* Associate the OF node with the device structure so it
105 * can be looked up later. 110 * can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
112 if (rc) { 117 if (rc) {
113 mdio_device_free(mdiodev); 118 mdio_device_free(mdiodev);
114 of_node_put(child); 119 of_node_put(child);
115 return; 120 return rc;
116 } 121 }
117 122
118 dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", 123 dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
119 child->name, addr); 124 child->name, addr);
125 return 0;
120} 126}
121 127
122/* The following is a list of PHY compatible strings which appear in 128/* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
219 } 225 }
220 226
221 if (of_mdiobus_child_is_phy(child)) 227 if (of_mdiobus_child_is_phy(child))
222 of_mdiobus_register_phy(mdio, child, addr); 228 rc = of_mdiobus_register_phy(mdio, child, addr);
223 else 229 else
224 of_mdiobus_register_device(mdio, child, addr); 230 rc = of_mdiobus_register_device(mdio, child, addr);
231 if (rc)
232 goto unregister;
225 } 233 }
226 234
227 if (!scanphys) 235 if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
242 dev_info(&mdio->dev, "scan phy %s at address %i\n", 250 dev_info(&mdio->dev, "scan phy %s at address %i\n",
243 child->name, addr); 251 child->name, addr);
244 252
245 if (of_mdiobus_child_is_phy(child)) 253 if (of_mdiobus_child_is_phy(child)) {
246 of_mdiobus_register_phy(mdio, child, addr); 254 rc = of_mdiobus_register_phy(mdio, child, addr);
255 if (rc)
256 goto unregister;
257 }
247 } 258 }
248 } 259 }
249 260
250 return 0; 261 return 0;
262
263unregister:
264 mdiobus_unregister(mdio);
265 return rc;
251} 266}
252EXPORT_SYMBOL(of_mdiobus_register); 267EXPORT_SYMBOL(of_mdiobus_register);
253 268
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index d507c3569a88..32771c2ced7b 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,7 +25,7 @@
25#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#define MAX_RESERVED_REGIONS 16 28#define MAX_RESERVED_REGIONS 32
29static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; 29static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
30static int reserved_mem_count; 30static int reserved_mem_count;
31 31
diff --git a/drivers/of/property.c b/drivers/of/property.c
index fbb72116e9d4..264c355ba1ff 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
954 struct device_node *np; 954 struct device_node *np;
955 955
956 /* Get the parent of the port */ 956 /* Get the parent of the port */
957 np = of_get_next_parent(to_of_node(fwnode)); 957 np = of_get_parent(to_of_node(fwnode));
958 if (!np) 958 if (!np)
959 return NULL; 959 return NULL;
960 960
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 89f4e3d072d7..26ed0c08f209 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
935 bridge->sysdata = pcie; 935 bridge->sysdata = pcie;
936 bridge->busnr = 0; 936 bridge->busnr = 0;
937 bridge->ops = &advk_pcie_ops; 937 bridge->ops = &advk_pcie_ops;
938 bridge->map_irq = of_irq_parse_and_map_pci;
939 bridge->swizzle_irq = pci_common_swizzle;
938 940
939 ret = pci_scan_root_bus_bridge(bridge); 941 ret = pci_scan_root_bus_bridge(bridge);
940 if (ret < 0) { 942 if (ret < 0) {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 9c40da54f88a..1987fec1f126 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,6 +233,7 @@ struct tegra_msi {
233 struct msi_controller chip; 233 struct msi_controller chip;
234 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 234 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
235 struct irq_domain *domain; 235 struct irq_domain *domain;
236 unsigned long pages;
236 struct mutex lock; 237 struct mutex lock;
237 u64 phys; 238 u64 phys;
238 int irq; 239 int irq;
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1529 goto err; 1530 goto err;
1530 } 1531 }
1531 1532
1532 /* 1533 /* setup AFI/FPCI range */
1533 * The PCI host bridge on Tegra contains some logic that intercepts 1534 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1534 * MSI writes, which means that the MSI target address doesn't have 1535 msi->phys = virt_to_phys((void *)msi->pages);
1535 * to point to actual physical memory. Rather than allocating one 4
1536 * KiB page of system memory that's never used, we can simply pick
1537 * an arbitrary address within an area reserved for system memory
1538 * in the FPCI address map.
1539 *
1540 * However, in order to avoid confusion, we pick an address that
1541 * doesn't map to physical memory. The FPCI address map reserves a
1542 * 1012 GiB region for system memory and memory-mapped I/O. Since
1543 * none of the Tegra SoCs that contain this PCI host bridge can
1544 * address more than 16 GiB of system memory, the last 4 KiB of
1545 * these 1012 GiB is a good candidate.
1546 */
1547 msi->phys = 0xfcfffff000;
1548 1536
1549 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); 1537 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1550 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); 1538 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1596 afi_writel(pcie, 0, AFI_MSI_EN_VEC6); 1584 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1597 afi_writel(pcie, 0, AFI_MSI_EN_VEC7); 1585 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1598 1586
1587 free_pages(msi->pages, 0);
1588
1599 if (msi->irq > 0) 1589 if (msi->irq > 0)
1600 free_irq(msi->irq, pcie); 1590 free_irq(msi->irq, pcie);
1601 1591
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1778cf4f81c7..82cd8b08d71f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -100,6 +100,7 @@ config PINCTRL_AMD
100 tristate "AMD GPIO pin control" 100 tristate "AMD GPIO pin control"
101 depends on GPIOLIB 101 depends on GPIOLIB
102 select GPIOLIB_IRQCHIP 102 select GPIOLIB_IRQCHIP
103 select PINMUX
103 select PINCONF 104 select PINCONF
104 select GENERIC_PINCONF 105 select GENERIC_PINCONF
105 help 106 help
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0944310225db..ff782445dfb7 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
373 unsigned long events; 373 unsigned long events;
374 unsigned offset; 374 unsigned offset;
375 unsigned gpio; 375 unsigned gpio;
376 unsigned int type;
377 376
378 events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); 377 events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
379 events &= mask; 378 events &= mask;
380 events &= pc->enabled_irq_map[bank]; 379 events &= pc->enabled_irq_map[bank];
381 for_each_set_bit(offset, &events, 32) { 380 for_each_set_bit(offset, &events, 32) {
382 gpio = (32 * bank) + offset; 381 gpio = (32 * bank) + offset;
383 /* FIXME: no clue why the code looks up the type here */
384 type = pc->irq_type[gpio];
385
386 generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, 382 generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
387 gpio)); 383 gpio));
388 } 384 }
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 04e929fd0ffe..fadbca907c7c 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1577 struct gpio_chip *chip = &pctrl->chip; 1577 struct gpio_chip *chip = &pctrl->chip;
1578 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); 1578 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1579 int ret, i, offset; 1579 int ret, i, offset;
1580 int irq_base;
1580 1581
1581 *chip = chv_gpio_chip; 1582 *chip = chv_gpio_chip;
1582 1583
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1622 /* Clear all interrupts */ 1623 /* Clear all interrupts */
1623 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1624 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1624 1625
1625 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, 1626 if (!need_valid_mask) {
1627 irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
1628 chip->ngpio, NUMA_NO_NODE);
1629 if (irq_base < 0) {
1630 dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
1631 return irq_base;
1632 }
1633 } else {
1634 irq_base = 0;
1635 }
1636
1637 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
1626 handle_bad_irq, IRQ_TYPE_NONE); 1638 handle_bad_irq, IRQ_TYPE_NONE);
1627 if (ret) { 1639 if (ret) {
1628 dev_err(pctrl->dev, "failed to add IRQ chip\n"); 1640 dev_err(pctrl->dev, "failed to add IRQ chip\n");
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index d0e5d6ee882c..e2c1988cd7c0 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
523 if (*str == '=') 523 if (*str == '=')
524 str++; 524 str++;
525 525
526 if (!strncmp(str, "cec_disable", 7)) 526 if (!strcmp(str, "cec_disable"))
527 ce_arr.disabled = 1; 527 ce_arr.disabled = 1;
528 else 528 else
529 return 0; 529 return 0;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index df63e44526ac..bf04479456a0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL
109 depends on OF && ARCH_QCOM 109 depends on OF && ARCH_QCOM
110 depends on QCOM_SMEM 110 depends on QCOM_SMEM
111 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) 111 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
112 depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
112 select MFD_SYSCON 113 select MFD_SYSCON
113 select QCOM_RPROC_COMMON 114 select QCOM_RPROC_COMMON
114 select QCOM_SCM 115 select QCOM_SCM
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL
120 tristate "Qualcomm WCNSS Peripheral Image Loader" 121 tristate "Qualcomm WCNSS Peripheral Image Loader"
121 depends on OF && ARCH_QCOM 122 depends on OF && ARCH_QCOM
122 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) 123 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
124 depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
123 depends on QCOM_SMEM 125 depends on QCOM_SMEM
124 select QCOM_MDT_LOADER 126 select QCOM_MDT_LOADER
125 select QCOM_RPROC_COMMON 127 select QCOM_RPROC_COMMON
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 612d91403341..633268e9d550 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
264 if (!(att->flags & ATT_OWN)) 264 if (!(att->flags & ATT_OWN))
265 continue; 265 continue;
266 266
267 if (b > IMX7D_RPROC_MEM_MAX) 267 if (b >= IMX7D_RPROC_MEM_MAX)
268 break; 268 break;
269 269
270 priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, 270 priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
271 att->sa, att->size); 271 att->sa, att->size);
272 if (IS_ERR(priv->mem[b].cpu_addr)) { 272 if (!priv->mem[b].cpu_addr) {
273 dev_err(dev, "devm_ioremap_resource failed\n"); 273 dev_err(dev, "devm_ioremap_resource failed\n");
274 err = PTR_ERR(priv->mem[b].cpu_addr); 274 return -ENOMEM;
275 return err;
276 } 275 }
277 priv->mem[b].sys_addr = att->sa; 276 priv->mem[b].sys_addr = att->sa;
278 priv->mem[b].size = att->size; 277 priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
296 return err; 295 return err;
297 } 296 }
298 297
299 if (b > IMX7D_RPROC_MEM_MAX) 298 if (b >= IMX7D_RPROC_MEM_MAX)
300 break; 299 break;
301 300
302 priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); 301 priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index c60904ff40b8..3907bbc9c6cf 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
40 struct socfpga_reset_data *data = container_of(rcdev, 40 struct socfpga_reset_data *data = container_of(rcdev,
41 struct socfpga_reset_data, 41 struct socfpga_reset_data,
42 rcdev); 42 rcdev);
43 int bank = id / BITS_PER_LONG; 43 int reg_width = sizeof(u32);
44 int offset = id % BITS_PER_LONG; 44 int bank = id / (reg_width * BITS_PER_BYTE);
45 int offset = id % (reg_width * BITS_PER_BYTE);
45 unsigned long flags; 46 unsigned long flags;
46 u32 reg; 47 u32 reg;
47 48
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
61 struct socfpga_reset_data, 62 struct socfpga_reset_data,
62 rcdev); 63 rcdev);
63 64
64 int bank = id / BITS_PER_LONG; 65 int reg_width = sizeof(u32);
65 int offset = id % BITS_PER_LONG; 66 int bank = id / (reg_width * BITS_PER_BYTE);
67 int offset = id % (reg_width * BITS_PER_BYTE);
66 unsigned long flags; 68 unsigned long flags;
67 u32 reg; 69 u32 reg;
68 70
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
81{ 83{
82 struct socfpga_reset_data *data = container_of(rcdev, 84 struct socfpga_reset_data *data = container_of(rcdev,
83 struct socfpga_reset_data, rcdev); 85 struct socfpga_reset_data, rcdev);
84 int bank = id / BITS_PER_LONG; 86 int reg_width = sizeof(u32);
85 int offset = id % BITS_PER_LONG; 87 int bank = id / (reg_width * BITS_PER_BYTE);
88 int offset = id % (reg_width * BITS_PER_BYTE);
86 u32 reg; 89 u32 reg;
87 90
88 reg = readl(data->membase + (bank * BANK_INCREMENT)); 91 reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
132 spin_lock_init(&data->lock); 135 spin_lock_init(&data->lock);
133 136
134 data->rcdev.owner = THIS_MODULE; 137 data->rcdev.owner = THIS_MODULE;
135 data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; 138 data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
136 data->rcdev.ops = &socfpga_reset_ops; 139 data->rcdev.ops = &socfpga_reset_ops;
137 data->rcdev.of_node = pdev->dev.of_node; 140 data->rcdev.of_node = pdev->dev.of_node;
138 141
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5a5e927ea50f..5dcc9bf1c5bc 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
635 unsigned long flags; 635 unsigned long flags;
636 636
637 intent = kzalloc(sizeof(*intent), GFP_KERNEL); 637 intent = kzalloc(sizeof(*intent), GFP_KERNEL);
638
639 if (!intent) 638 if (!intent)
640 return NULL; 639 return NULL;
641 640
642 intent->data = kzalloc(size, GFP_KERNEL); 641 intent->data = kzalloc(size, GFP_KERNEL);
643 if (!intent->data) 642 if (!intent->data)
644 return NULL; 643 goto free_intent;
645 644
646 spin_lock_irqsave(&channel->intent_lock, flags); 645 spin_lock_irqsave(&channel->intent_lock, flags);
647 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); 646 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
648 if (ret < 0) { 647 if (ret < 0) {
649 spin_unlock_irqrestore(&channel->intent_lock, flags); 648 spin_unlock_irqrestore(&channel->intent_lock, flags);
650 return NULL; 649 goto free_data;
651 } 650 }
652 spin_unlock_irqrestore(&channel->intent_lock, flags); 651 spin_unlock_irqrestore(&channel->intent_lock, flags);
653 652
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
656 intent->reuse = reuseable; 655 intent->reuse = reuseable;
657 656
658 return intent; 657 return intent;
658
659free_data:
660 kfree(intent->data);
661free_intent:
662 kfree(intent);
663 return NULL;
659} 664}
660 665
661static void qcom_glink_handle_rx_done(struct qcom_glink *glink, 666static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
1197 1202
1198 ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 1203 ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
1199 if (ret) 1204 if (ret)
1200 return ret; 1205 goto unlock;
1201 1206
1202 ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); 1207 ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
1203 if (!ret) { 1208 if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
1207 ret = channel->intent_req_result ? 0 : -ECANCELED; 1212 ret = channel->intent_req_result ? 0 : -ECANCELED;
1208 } 1213 }
1209 1214
1215unlock:
1210 mutex_unlock(&channel->intent_req_lock); 1216 mutex_unlock(&channel->intent_req_lock);
1211 return ret; 1217 return ret;
1212} 1218}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 520325867e2b..31d31aad3de1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
383 fc_rport_enter_flogi(rdata); 383 fc_rport_enter_flogi(rdata);
384 mutex_unlock(&rdata->rp_mutex); 384 mutex_unlock(&rdata->rp_mutex);
385 } else { 385 } else {
386 mutex_unlock(&rdata->rp_mutex);
386 FC_RPORT_DBG(rdata, "work delete\n"); 387 FC_RPORT_DBG(rdata, "work delete\n");
387 mutex_lock(&lport->disc.disc_mutex); 388 mutex_lock(&lport->disc.disc_mutex);
388 list_del_rcu(&rdata->peers); 389 list_del_rcu(&rdata->peers);
389 mutex_unlock(&lport->disc.disc_mutex); 390 mutex_unlock(&lport->disc.disc_mutex);
390 mutex_unlock(&rdata->rp_mutex);
391 kref_put(&rdata->kref, fc_rport_destroy); 391 kref_put(&rdata->kref, fc_rport_destroy);
392 } 392 }
393 } else { 393 } else {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c62e8d111fd9..f8dc1601efd5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1728 1728
1729 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { 1729 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1730 reason = FAILURE_SESSION_IN_RECOVERY; 1730 reason = FAILURE_SESSION_IN_RECOVERY;
1731 sc->result = DID_REQUEUE; 1731 sc->result = DID_REQUEUE << 16;
1732 goto fault; 1732 goto fault;
1733 } 1733 }
1734 1734
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5b2437a5ea44..937209805baf 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3175,6 +3175,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3175 host->can_queue, base_vha->req, 3175 host->can_queue, base_vha->req,
3176 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3176 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3177 3177
3178 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3179
3178 if (ha->mqenable) { 3180 if (ha->mqenable) {
3179 bool mq = false; 3181 bool mq = false;
3180 bool startit = false; 3182 bool startit = false;
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3223 */ 3225 */
3224 qla2xxx_wake_dpc(base_vha); 3226 qla2xxx_wake_dpc(base_vha);
3225 3227
3226 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3227 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3228 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3228 3229
3229 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3230 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index bf53356f41f0..f796bd61f3f0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
1376 spin_lock_irqsave(shost->host_lock, flags); 1376 spin_lock_irqsave(shost->host_lock, flags);
1377 restart: 1377 restart:
1378 list_for_each_entry(sdev, &shost->__devices, siblings) { 1378 list_for_each_entry(sdev, &shost->__devices, siblings) {
1379 /*
1380 * We cannot call scsi_device_get() here, as
1381 * we might've been called from rmmod() causing
1382 * scsi_device_get() to fail the module_is_live()
1383 * check.
1384 */
1379 if (sdev->channel != starget->channel || 1385 if (sdev->channel != starget->channel ||
1380 sdev->id != starget->id || 1386 sdev->id != starget->id ||
1381 scsi_device_get(sdev)) 1387 !get_device(&sdev->sdev_gendev))
1382 continue; 1388 continue;
1383 spin_unlock_irqrestore(shost->host_lock, flags); 1389 spin_unlock_irqrestore(shost->host_lock, flags);
1384 scsi_remove_device(sdev); 1390 scsi_remove_device(sdev);
1385 scsi_device_put(sdev); 1391 put_device(&sdev->sdev_gendev);
1386 spin_lock_irqsave(shost->host_lock, flags); 1392 spin_lock_irqsave(shost->host_lock, flags);
1387 goto restart; 1393 goto restart;
1388 } 1394 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index cbd4495d0ff9..8c46a6d536af 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3320{ 3320{
3321 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 3321 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3322 3322
3323 if (WARN_ON_ONCE(!rport))
3324 return FAST_IO_FAIL;
3325
3323 return fc_block_rport(rport); 3326 return fc_block_rport(rport);
3324} 3327}
3325EXPORT_SYMBOL(fc_block_scsi_eh); 3328EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index d96f4512224f..b55e5ebba8b4 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
400 struct media_link, list); 400 struct media_link, list);
401 ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); 401 ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
402 if (ret) 402 if (ret)
403 break; 403 return ret;
404 } 404 }
405 405
406 return ret; 406 return 0;
407} 407}
408 408
409/* async subdev complete notifier */ 409/* async subdev complete notifier */
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 2fe216b276e2..84a8ac2a779f 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
694 tty_set_termios_ldisc(tty, disc); 694 tty_set_termios_ldisc(tty, disc);
695 retval = tty_ldisc_open(tty, tty->ldisc); 695 retval = tty_ldisc_open(tty, tty->ldisc);
696 if (retval) { 696 if (retval) {
697 if (!WARN_ON(disc == N_TTY)) { 697 tty_ldisc_put(tty->ldisc);
698 tty_ldisc_put(tty->ldisc); 698 tty->ldisc = NULL;
699 tty->ldisc = NULL;
700 }
701 } 699 }
702 return retval; 700 return retval;
703} 701}
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
752 750
753 if (tty->ldisc) { 751 if (tty->ldisc) {
754 if (reinit) { 752 if (reinit) {
755 if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) 753 if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
756 tty_ldisc_reinit(tty, N_TTY); 754 tty_ldisc_reinit(tty, N_TTY) < 0)
755 WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
757 } else 756 } else
758 tty_ldisc_kill(tty); 757 tty_ldisc_kill(tty);
759 } 758 }
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dd74c99d6ce1..5d061b3d8224 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended);
2026static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) 2026static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
2027{ 2027{
2028 struct usb_composite_dev *cdev = get_gadget_data(gadget); 2028 struct usb_composite_dev *cdev = get_gadget_data(gadget);
2029 struct usb_gadget_strings *gstr = cdev->driver->strings[0];
2030 struct usb_string *dev_str = gstr->strings;
2029 2031
2030 /* composite_disconnect() must already have been called 2032 /* composite_disconnect() must already have been called
2031 * by the underlying peripheral controller driver! 2033 * by the underlying peripheral controller driver!
@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
2045 2047
2046 composite_dev_cleanup(cdev); 2048 composite_dev_cleanup(cdev);
2047 2049
2050 if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
2051 dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
2052
2048 kfree(cdev->def_manufacturer); 2053 kfree(cdev->def_manufacturer);
2049 kfree(cdev); 2054 kfree(cdev);
2050 set_gadget_data(gadget, NULL); 2055 set_gadget_data(gadget, NULL);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index a22a892de7b7..aeb9f3c40521 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
1143 NULL 1143 NULL
1144}; 1144};
1145 1145
1146int usb_os_desc_prepare_interf_dir(struct config_group *parent, 1146struct config_group *usb_os_desc_prepare_interf_dir(
1147 int n_interf, 1147 struct config_group *parent,
1148 struct usb_os_desc **desc, 1148 int n_interf,
1149 char **names, 1149 struct usb_os_desc **desc,
1150 struct module *owner) 1150 char **names,
1151 struct module *owner)
1151{ 1152{
1152 struct config_group *os_desc_group; 1153 struct config_group *os_desc_group;
1153 struct config_item_type *os_desc_type, *interface_type; 1154 struct config_item_type *os_desc_type, *interface_type;
@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
1159 1160
1160 char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); 1161 char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
1161 if (!vlabuf) 1162 if (!vlabuf)
1162 return -ENOMEM; 1163 return ERR_PTR(-ENOMEM);
1163 1164
1164 os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); 1165 os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
1165 os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); 1166 os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
1184 configfs_add_default_group(&d->group, os_desc_group); 1185 configfs_add_default_group(&d->group, os_desc_group);
1185 } 1186 }
1186 1187
1187 return 0; 1188 return os_desc_group;
1188} 1189}
1189EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); 1190EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
1190 1191
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index 36c468c4f5e9..540d5e92ed22 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -5,11 +5,12 @@
5 5
6void unregister_gadget_item(struct config_item *item); 6void unregister_gadget_item(struct config_item *item);
7 7
8int usb_os_desc_prepare_interf_dir(struct config_group *parent, 8struct config_group *usb_os_desc_prepare_interf_dir(
9 int n_interf, 9 struct config_group *parent,
10 struct usb_os_desc **desc, 10 int n_interf,
11 char **names, 11 struct usb_os_desc **desc,
12 struct module *owner); 12 char **names,
13 struct module *owner);
13 14
14static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) 15static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
15{ 16{
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index e1d5853ef1e4..c7c5b3ce1d98 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
908 free_netdev(opts->net); 908 free_netdev(opts->net);
909 } 909 }
910 910
911 kfree(opts->rndis_interf_group); /* single VLA chunk */
911 kfree(opts); 912 kfree(opts);
912} 913}
913 914
@@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
916 struct f_rndis_opts *opts; 917 struct f_rndis_opts *opts;
917 struct usb_os_desc *descs[1]; 918 struct usb_os_desc *descs[1];
918 char *names[1]; 919 char *names[1];
920 struct config_group *rndis_interf_group;
919 921
920 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 922 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
921 if (!opts) 923 if (!opts)
@@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
940 names[0] = "rndis"; 942 names[0] = "rndis";
941 config_group_init_type_name(&opts->func_inst.group, "", 943 config_group_init_type_name(&opts->func_inst.group, "",
942 &rndis_func_type); 944 &rndis_func_type);
943 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, 945 rndis_interf_group =
944 names, THIS_MODULE); 946 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
947 names, THIS_MODULE);
948 if (IS_ERR(rndis_interf_group)) {
949 rndis_free_inst(&opts->func_inst);
950 return ERR_CAST(rndis_interf_group);
951 }
952 opts->rndis_interf_group = rndis_interf_group;
945 953
946 return &opts->func_inst; 954 return &opts->func_inst;
947} 955}
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index a35ee3c2545d..efdb7ac381d9 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -26,6 +26,7 @@ struct f_rndis_opts {
26 bool bound; 26 bool bound;
27 bool borrowed_net; 27 bool borrowed_net;
28 28
29 struct config_group *rndis_interf_group;
29 struct usb_os_desc rndis_os_desc; 30 struct usb_os_desc rndis_os_desc;
30 char rndis_ext_compat_id[16]; 31 char rndis_ext_compat_id[16];
31 32
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index b17618a55f1b..f04e91ef9e7c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -419,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
419static void set_link_state(struct dummy_hcd *dum_hcd) 419static void set_link_state(struct dummy_hcd *dum_hcd)
420{ 420{
421 struct dummy *dum = dum_hcd->dum; 421 struct dummy *dum = dum_hcd->dum;
422 unsigned int power_bit;
422 423
423 dum_hcd->active = 0; 424 dum_hcd->active = 0;
424 if (dum->pullup) 425 if (dum->pullup)
@@ -429,17 +430,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
429 return; 430 return;
430 431
431 set_link_state_by_speed(dum_hcd); 432 set_link_state_by_speed(dum_hcd);
433 power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
434 USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
432 435
433 if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || 436 if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
434 dum_hcd->active) 437 dum_hcd->active)
435 dum_hcd->resuming = 0; 438 dum_hcd->resuming = 0;
436 439
437 /* Currently !connected or in reset */ 440 /* Currently !connected or in reset */
438 if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || 441 if ((dum_hcd->port_status & power_bit) == 0 ||
439 (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { 442 (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
440 unsigned disconnect = USB_PORT_STAT_CONNECTION & 443 unsigned int disconnect = power_bit &
441 dum_hcd->old_status & (~dum_hcd->port_status); 444 dum_hcd->old_status & (~dum_hcd->port_status);
442 unsigned reset = USB_PORT_STAT_RESET & 445 unsigned int reset = USB_PORT_STAT_RESET &
443 (~dum_hcd->old_status) & dum_hcd->port_status; 446 (~dum_hcd->old_status) & dum_hcd->port_status;
444 447
445 /* Report reset and disconnect events to the driver */ 448 /* Report reset and disconnect events to the driver */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index eee82ca55b7b..b3fc602b2e24 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,12 +202,13 @@ found:
202 return tmp; 202 return tmp;
203 } 203 }
204 204
205 if (in) { 205 if (in)
206 dev->in_pipe = usb_rcvbulkpipe(udev, 206 dev->in_pipe = usb_rcvbulkpipe(udev,
207 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 207 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
208 if (out)
208 dev->out_pipe = usb_sndbulkpipe(udev, 209 dev->out_pipe = usb_sndbulkpipe(udev,
209 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 210 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
210 } 211
211 if (iso_in) { 212 if (iso_in) {
212 dev->iso_in = &iso_in->desc; 213 dev->iso_in = &iso_in->desc;
213 dev->in_iso_pipe = usb_rcvisocpipe(udev, 214 dev->in_iso_pipe = usb_rcvisocpipe(udev,
@@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
1964 int status = 0; 1965 int status = 0;
1965 struct urb *urbs[param->sglen]; 1966 struct urb *urbs[param->sglen];
1966 1967
1968 if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
1969 return -EINVAL;
1970
1967 memset(&context, 0, sizeof(context)); 1971 memset(&context, 0, sizeof(context));
1968 context.count = param->iterations * param->sglen; 1972 context.count = param->iterations * param->sglen;
1969 context.dev = dev; 1973 context.dev = dev;
@@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
2087 2091
2088 if (param->iterations <= 0) 2092 if (param->iterations <= 0)
2089 return -EINVAL; 2093 return -EINVAL;
2094 if (param->sglen > MAX_SGLEN)
2095 return -EINVAL;
2090 /* 2096 /*
2091 * Just a bunch of test cases that every HCD is expected to handle. 2097 * Just a bunch of test cases that every HCD is expected to handle.
2092 * 2098 *
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 5fe4a5704bde..ccc2bf5274b4 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
329 unsigned long val; 329 unsigned long val;
330 void __iomem *base = phy->regs; 330 void __iomem *base = phy->regs;
331 331
332 /*
333 * The USB driver may have already initiated the phy clock
334 * disable so wait to see if the clock turns off and if not
335 * then proceed with gating the clock.
336 */
337 if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0)
338 return;
339
332 if (phy->is_legacy_phy) { 340 if (phy->is_legacy_phy) {
333 val = readl(base + USB_SUSP_CTRL); 341 val = readl(base + USB_SUSP_CTRL);
334 val |= USB_SUSP_SET; 342 val |= USB_SUSP_SET;
@@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
351 unsigned long val; 359 unsigned long val;
352 void __iomem *base = phy->regs; 360 void __iomem *base = phy->regs;
353 361
362 /*
363 * The USB driver may have already initiated the phy clock
364 * enable so wait to see if the clock turns on and if not
365 * then proceed with ungating the clock.
366 */
367 if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
368 USB_PHY_CLK_VALID) == 0)
369 return;
370
354 if (phy->is_legacy_phy) { 371 if (phy->is_legacy_phy) {
355 val = readl(base + USB_SUSP_CTRL); 372 val = readl(base + USB_SUSP_CTRL);
356 val |= USB_SUSP_CLR; 373 val |= USB_SUSP_CLR;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 68f26904c316..50285b01da92 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work)
857 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); 857 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
858 858
859 usbhs_pipe_running(pipe, 1); 859 usbhs_pipe_running(pipe, 1);
860 usbhsf_dma_start(pipe, fifo);
861 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); 860 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
862 dma_async_issue_pending(chan); 861 dma_async_issue_pending(chan);
862 usbhsf_dma_start(pipe, fifo);
863 usbhs_pipe_enable(pipe); 863 usbhs_pipe_enable(pipe);
864 864
865xfer_work_end: 865xfer_work_end:
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index fdf89800ebc3..43a862a90a77 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
186 tty_kref_put(tty); 186 tty_kref_put(tty);
187 reset_open_count: 187 reset_open_count:
188 port->port.count = 0; 188 port->port.count = 0;
189 info->port = NULL;
189 usb_autopm_put_interface(serial->interface); 190 usb_autopm_put_interface(serial->interface);
190 error_get_interface: 191 error_get_interface:
191 usb_serial_put(serial); 192 usb_serial_put(serial);
@@ -265,7 +266,7 @@ static struct console usbcons = {
265 266
266void usb_serial_console_disconnect(struct usb_serial *serial) 267void usb_serial_console_disconnect(struct usb_serial *serial)
267{ 268{
268 if (serial->port[0] == usbcons_info.port) { 269 if (serial->port[0] && serial->port[0] == usbcons_info.port) {
269 usb_serial_console_exit(); 270 usb_serial_console_exit();
270 usb_serial_put(serial); 271 usb_serial_put(serial);
271 } 272 }
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2d945c9f975c..412f812522ee 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
177 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 177 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
178 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 178 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
179 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 179 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
180 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
180 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 181 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
181 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 182 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
182 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ 183 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
352#define CP210X_PARTNUM_CP2104 0x04 353#define CP210X_PARTNUM_CP2104 0x04
353#define CP210X_PARTNUM_CP2105 0x05 354#define CP210X_PARTNUM_CP2105 0x05
354#define CP210X_PARTNUM_CP2108 0x08 355#define CP210X_PARTNUM_CP2108 0x08
356#define CP210X_PARTNUM_UNKNOWN 0xFF
355 357
356/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ 358/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
357struct cp210x_comm_status { 359struct cp210x_comm_status {
@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial)
1491 result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, 1493 result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
1492 CP210X_GET_PARTNUM, &priv->partnum, 1494 CP210X_GET_PARTNUM, &priv->partnum,
1493 sizeof(priv->partnum)); 1495 sizeof(priv->partnum));
1494 if (result < 0) 1496 if (result < 0) {
1495 goto err_free_priv; 1497 dev_warn(&serial->interface->dev,
1498 "querying part number failed\n");
1499 priv->partnum = CP210X_PARTNUM_UNKNOWN;
1500 }
1496 1501
1497 usb_set_serial_data(serial, priv); 1502 usb_set_serial_data(serial, priv);
1498 1503
@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial)
1505 } 1510 }
1506 1511
1507 return 0; 1512 return 0;
1508err_free_priv:
1509 kfree(priv);
1510
1511 return result;
1512} 1513}
1513 1514
1514static void cp210x_disconnect(struct usb_serial *serial) 1515static void cp210x_disconnect(struct usb_serial *serial)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1cec03799cdf..49d1b2d4606d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
1015 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, 1015 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1016 { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), 1016 { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
1017 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 1017 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1018 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
1019 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
1018 { } /* Terminating entry */ 1020 { } /* Terminating entry */
1019}; 1021};
1020 1022
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4fcf1cecb6d7..f9d15bd62785 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -610,6 +610,13 @@
610#define ADI_GNICEPLUS_PID 0xF001 610#define ADI_GNICEPLUS_PID 0xF001
611 611
612/* 612/*
613 * Cypress WICED USB UART
614 */
615#define CYPRESS_VID 0x04B4
616#define CYPRESS_WICED_BT_USB_PID 0x009B
617#define CYPRESS_WICED_WL_USB_PID 0xF900
618
619/*
613 * Microchip Technology, Inc. 620 * Microchip Technology, Inc.
614 * 621 *
615 * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are 622 * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 54bfef13966a..ba672cf4e888 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
522 522
523/* TP-LINK Incorporated products */ 523/* TP-LINK Incorporated products */
524#define TPLINK_VENDOR_ID 0x2357 524#define TPLINK_VENDOR_ID 0x2357
525#define TPLINK_PRODUCT_LTE 0x000D
525#define TPLINK_PRODUCT_MA180 0x0201 526#define TPLINK_PRODUCT_MA180 0x0201
526 527
527/* Changhong products */ 528/* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
2011 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, 2012 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
2012 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, 2013 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
2013 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, 2014 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
2015 { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
2014 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 2016 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
2015 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2017 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2016 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ 2018 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index ebc0beea69d6..eb9928963a53 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
174 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 174 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
175 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ 175 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
176 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ 176 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
177 {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
178 {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
179 {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
180 {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
177 181
178 /* Huawei devices */ 182 /* Huawei devices */
179 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 183 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adaf6f6dd858..e1cbdfdb7c68 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
310 310
311 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); 311 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
312 312
313 if (unlikely(copied < len && !PageUptodate(page))) { 313 if (!PageUptodate(page)) {
314 copied = 0; 314 if (unlikely(copied < len)) {
315 goto out; 315 copied = 0;
316 goto out;
317 } else if (len == PAGE_SIZE) {
318 SetPageUptodate(page);
319 }
316 } 320 }
317 /* 321 /*
318 * No need to use i_size_read() here, the i_size 322 * No need to use i_size_read() here, the i_size
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 2a46762def31..a7c5a9861bef 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -596,7 +596,7 @@ static void bm_evict_inode(struct inode *inode)
596{ 596{
597 Node *e = inode->i_private; 597 Node *e = inode->i_private;
598 598
599 if (e->flags & MISC_FMT_OPEN_FILE) 599 if (e && e->flags & MISC_FMT_OPEN_FILE)
600 filp_close(e->interp_file, NULL); 600 filp_close(e->interp_file, NULL);
601 601
602 clear_inode(inode); 602 clear_inode(inode);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 93d088ffc05c..789f55e851ae 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
716 716
717 set_page_writeback(page); 717 set_page_writeback(page);
718 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); 718 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
719 if (result) 719 if (result) {
720 end_page_writeback(page); 720 end_page_writeback(page);
721 else 721 } else {
722 clean_page_buffers(page);
722 unlock_page(page); 723 unlock_page(page);
724 }
723 blk_queue_exit(bdev->bd_queue); 725 blk_queue_exit(bdev->bd_queue);
724 return result; 726 return result;
725} 727}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 35a128acfbd1..161694b66038 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL 1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1136 sb->s_flags |= MS_POSIXACL; 1136 sb->s_flags |= MS_POSIXACL;
1137#endif 1137#endif
1138 sb->s_flags |= MS_I_VERSION; 1138 sb->s_flags |= SB_I_VERSION;
1139 sb->s_iflags |= SB_I_CGROUPWB; 1139 sb->s_iflags |= SB_I_CGROUPWB;
1140 1140
1141 err = super_setup_bdi(sb); 1141 err = super_setup_bdi(sb);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 018c588c7ac3..8e704d12a1cf 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
109 goto out; 109 goto out;
110 } 110 }
111 ukp = user_key_payload_locked(keyring_key); 111 ukp = user_key_payload_locked(keyring_key);
112 if (!ukp) {
113 /* key was revoked before we acquired its semaphore */
114 res = -EKEYREVOKED;
115 goto out;
116 }
112 if (ukp->datalen != sizeof(struct fscrypt_key)) { 117 if (ukp->datalen != sizeof(struct fscrypt_key)) {
113 res = -EINVAL; 118 res = -EINVAL;
114 goto out; 119 goto out;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 62cf812ed0e5..b53e66d9abd7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -45,6 +45,12 @@
45#define DIO_PAGES 64 45#define DIO_PAGES 64
46 46
47/* 47/*
48 * Flags for dio_complete()
49 */
50#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */
51#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */
52
53/*
48 * This code generally works in units of "dio_blocks". A dio_block is 54 * This code generally works in units of "dio_blocks". A dio_block is
49 * somewhere between the hard sector size and the filesystem block size. it 55 * somewhere between the hard sector size and the filesystem block size. it
50 * is determined on a per-invocation basis. When talking to the filesystem 56 * is determined on a per-invocation basis. When talking to the filesystem
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio,
225 * filesystems can use it to hold additional state between get_block calls and 231 * filesystems can use it to hold additional state between get_block calls and
226 * dio_complete. 232 * dio_complete.
227 */ 233 */
228static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) 234static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
229{ 235{
230 loff_t offset = dio->iocb->ki_pos; 236 loff_t offset = dio->iocb->ki_pos;
231 ssize_t transferred = 0; 237 ssize_t transferred = 0;
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
259 if (ret == 0) 265 if (ret == 0)
260 ret = transferred; 266 ret = transferred;
261 267
268 if (dio->end_io) {
269 // XXX: ki_pos??
270 err = dio->end_io(dio->iocb, offset, ret, dio->private);
271 if (err)
272 ret = err;
273 }
274
262 /* 275 /*
263 * Try again to invalidate clean pages which might have been cached by 276 * Try again to invalidate clean pages which might have been cached by
264 * non-direct readahead, or faulted in by get_user_pages() if the source 277 * non-direct readahead, or faulted in by get_user_pages() if the source
265 * of the write was an mmap'ed region of the file we're writing. Either 278 * of the write was an mmap'ed region of the file we're writing. Either
266 * one is a pretty crazy thing to do, so we don't support it 100%. If 279 * one is a pretty crazy thing to do, so we don't support it 100%. If
267 * this invalidation fails, tough, the write still worked... 280 * this invalidation fails, tough, the write still worked...
281 *
282 * And this page cache invalidation has to be after dio->end_io(), as
283 * some filesystems convert unwritten extents to real allocations in
284 * end_io() when necessary, otherwise a racing buffer read would cache
285 * zeros from unwritten extents.
268 */ 286 */
269 if (ret > 0 && dio->op == REQ_OP_WRITE && 287 if (flags & DIO_COMPLETE_INVALIDATE &&
288 ret > 0 && dio->op == REQ_OP_WRITE &&
270 dio->inode->i_mapping->nrpages) { 289 dio->inode->i_mapping->nrpages) {
271 err = invalidate_inode_pages2_range(dio->inode->i_mapping, 290 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
272 offset >> PAGE_SHIFT, 291 offset >> PAGE_SHIFT,
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
274 WARN_ON_ONCE(err); 293 WARN_ON_ONCE(err);
275 } 294 }
276 295
277 if (dio->end_io) {
278
279 // XXX: ki_pos??
280 err = dio->end_io(dio->iocb, offset, ret, dio->private);
281 if (err)
282 ret = err;
283 }
284
285 if (!(dio->flags & DIO_SKIP_DIO_COUNT)) 296 if (!(dio->flags & DIO_SKIP_DIO_COUNT))
286 inode_dio_end(dio->inode); 297 inode_dio_end(dio->inode);
287 298
288 if (is_async) { 299 if (flags & DIO_COMPLETE_ASYNC) {
289 /* 300 /*
290 * generic_write_sync expects ki_pos to have been updated 301 * generic_write_sync expects ki_pos to have been updated
291 * already, but the submission path only does this for 302 * already, but the submission path only does this for
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
306{ 317{
307 struct dio *dio = container_of(work, struct dio, complete_work); 318 struct dio *dio = container_of(work, struct dio, complete_work);
308 319
309 dio_complete(dio, 0, true); 320 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
310} 321}
311 322
312static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); 323static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio)
348 queue_work(dio->inode->i_sb->s_dio_done_wq, 359 queue_work(dio->inode->i_sb->s_dio_done_wq,
349 &dio->complete_work); 360 &dio->complete_work);
350 } else { 361 } else {
351 dio_complete(dio, 0, true); 362 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
352 } 363 }
353 } 364 }
354} 365}
@@ -866,7 +877,8 @@ out:
866 */ 877 */
867 if (sdio->boundary) { 878 if (sdio->boundary) {
868 ret = dio_send_cur_page(dio, sdio, map_bh); 879 ret = dio_send_cur_page(dio, sdio, map_bh);
869 dio_bio_submit(dio, sdio); 880 if (sdio->bio)
881 dio_bio_submit(dio, sdio);
870 put_page(sdio->cur_page); 882 put_page(sdio->cur_page);
871 sdio->cur_page = NULL; 883 sdio->cur_page = NULL;
872 } 884 }
@@ -1359,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1359 dio_await_completion(dio); 1371 dio_await_completion(dio);
1360 1372
1361 if (drop_refcount(dio) == 0) { 1373 if (drop_refcount(dio) == 0) {
1362 retval = dio_complete(dio, retval, false); 1374 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1363 } else 1375 } else
1364 BUG_ON(retval != -EIOCBQUEUED); 1376 BUG_ON(retval != -EIOCBQUEUED);
1365 1377
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 9c351bf757b2..3fbc0ff79699 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
84static inline struct ecryptfs_auth_tok * 84static inline struct ecryptfs_auth_tok *
85ecryptfs_get_encrypted_key_payload_data(struct key *key) 85ecryptfs_get_encrypted_key_payload_data(struct key *key)
86{ 86{
87 if (key->type == &key_type_encrypted) 87 struct encrypted_key_payload *payload;
88 return (struct ecryptfs_auth_tok *) 88
89 (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data); 89 if (key->type != &key_type_encrypted)
90 else
91 return NULL; 90 return NULL;
91
92 payload = key->payload.data[0];
93 if (!payload)
94 return ERR_PTR(-EKEYREVOKED);
95
96 return (struct ecryptfs_auth_tok *)payload->payload_data;
92} 97}
93 98
94static inline struct key *ecryptfs_get_encrypted_key(char *sig) 99static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
114ecryptfs_get_key_payload_data(struct key *key) 119ecryptfs_get_key_payload_data(struct key *key)
115{ 120{
116 struct ecryptfs_auth_tok *auth_tok; 121 struct ecryptfs_auth_tok *auth_tok;
122 struct user_key_payload *ukp;
117 123
118 auth_tok = ecryptfs_get_encrypted_key_payload_data(key); 124 auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
119 if (!auth_tok) 125 if (auth_tok)
120 return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
121 else
122 return auth_tok; 126 return auth_tok;
127
128 ukp = user_key_payload_locked(key);
129 if (!ukp)
130 return ERR_PTR(-EKEYREVOKED);
131
132 return (struct ecryptfs_auth_tok *)ukp->data;
123} 133}
124 134
125#define ECRYPTFS_MAX_KEYSET_SIZE 1024 135#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546dca82..fa218cd64f74 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@ out:
459 * @auth_tok_key: key containing the authentication token 459 * @auth_tok_key: key containing the authentication token
460 * @auth_tok: authentication token 460 * @auth_tok: authentication token
461 * 461 *
462 * Returns zero on valid auth tok; -EINVAL otherwise 462 * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
463 * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
463 */ 464 */
464static int 465static int
465ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, 466ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
468 int rc = 0; 469 int rc = 0;
469 470
470 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); 471 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
472 if (IS_ERR(*auth_tok)) {
473 rc = PTR_ERR(*auth_tok);
474 *auth_tok = NULL;
475 goto out;
476 }
477
471 if (ecryptfs_verify_version((*auth_tok)->version)) { 478 if (ecryptfs_verify_version((*auth_tok)->version)) {
472 printk(KERN_ERR "Data structure version mismatch. Userspace " 479 printk(KERN_ERR "Data structure version mismatch. Userspace "
473 "tools must match eCryptfs kernel module with major " 480 "tools must match eCryptfs kernel module with major "
diff --git a/fs/exec.c b/fs/exec.c
index 5470d3c1892a..3e14ba25f678 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
1802 /* execve succeeded */ 1802 /* execve succeeded */
1803 current->fs->in_exec = 0; 1803 current->fs->in_exec = 0;
1804 current->in_execve = 0; 1804 current->in_execve = 0;
1805 membarrier_execve(current);
1805 acct_update_integrals(current); 1806 acct_update_integrals(current);
1806 task_numa_free(current); 1807 task_numa_free(current);
1807 free_bprm(bprm); 1808 free_bprm(bprm);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b104096fce9e..b0915b734a38 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; 1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1678 return 1; 1678 return 1;
1679 case Opt_i_version: 1679 case Opt_i_version:
1680 sb->s_flags |= MS_I_VERSION; 1680 sb->s_flags |= SB_I_VERSION;
1681 return 1; 1681 return 1;
1682 case Opt_lazytime: 1682 case Opt_lazytime:
1683 sb->s_flags |= MS_LAZYTIME; 1683 sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2063 if (sb->s_flags & MS_I_VERSION) 2063 if (sb->s_flags & SB_I_VERSION)
2064 SEQ_OPTS_PUTS("i_version"); 2064 SEQ_OPTS_PUTS("i_version");
2065 if (nodefs || sbi->s_stripe) 2065 if (nodefs || sbi->s_stripe)
2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9a7c90386947..4b4a72f392be 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
2525bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 2525bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
2526void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); 2526void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
2527void stop_discard_thread(struct f2fs_sb_info *sbi); 2527void stop_discard_thread(struct f2fs_sb_info *sbi);
2528void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); 2528void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
2529void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2529void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
2530void release_discard_addrs(struct f2fs_sb_info *sbi); 2530void release_discard_addrs(struct f2fs_sb_info *sbi);
2531int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 2531int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 621b9b3d320b..c695ff462ee6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
1210} 1210}
1211 1211
1212/* This comes from f2fs_put_super and f2fs_trim_fs */ 1212/* This comes from f2fs_put_super and f2fs_trim_fs */
1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) 1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
1214{ 1214{
1215 __issue_discard_cmd(sbi, false); 1215 __issue_discard_cmd(sbi, false);
1216 __drop_discard_cmd(sbi); 1216 __drop_discard_cmd(sbi);
1217 __wait_discard_cmd(sbi, false); 1217 __wait_discard_cmd(sbi, !umount);
1218} 1218}
1219 1219
1220static void mark_discard_range_all(struct f2fs_sb_info *sbi) 1220static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2244 } 2244 }
2245 /* It's time to issue all the filed discards */ 2245 /* It's time to issue all the filed discards */
2246 mark_discard_range_all(sbi); 2246 mark_discard_range_all(sbi);
2247 f2fs_wait_discard_bios(sbi); 2247 f2fs_wait_discard_bios(sbi, false);
2248out: 2248out:
2249 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); 2249 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
2250 return err; 2250 return err;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 89f61eb3d167..933c3d529e65 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
801 } 801 }
802 802
803 /* be sure to wait for any on-going discard commands */ 803 /* be sure to wait for any on-going discard commands */
804 f2fs_wait_discard_bios(sbi); 804 f2fs_wait_discard_bios(sbi, true);
805 805
806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) { 806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
807 struct cp_control cpc = { 807 struct cp_control cpc = {
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index b5ab06fabc60..0438d4cd91ef 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
331 rcu_read_lock(); 331 rcu_read_lock();
332 332
333 confkey = user_key_payload_rcu(key); 333 confkey = user_key_payload_rcu(key);
334 if (!confkey) {
335 /* key was revoked */
336 rcu_read_unlock();
337 key_put(key);
338 goto no_config;
339 }
340
334 buf = confkey->data; 341 buf = confkey->data;
335 342
336 for (len = confkey->datalen - 1; len >= 0; len--) { 343 for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 65c88379a3a1..94a745acaef8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1059 if (sb->s_flags & MS_MANDLOCK) 1059 if (sb->s_flags & MS_MANDLOCK)
1060 goto err; 1060 goto err;
1061 1061
1062 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1062 sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
1063 1063
1064 if (!parse_fuse_opt(data, &d, is_bdev)) 1064 if (!parse_fuse_opt(data, &d, is_bdev))
1065 goto err; 1065 goto err;
diff --git a/fs/iomap.c b/fs/iomap.c
index be61cf742b5e..d4801f8dd4fd 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -714,23 +714,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
714{ 714{
715 struct kiocb *iocb = dio->iocb; 715 struct kiocb *iocb = dio->iocb;
716 struct inode *inode = file_inode(iocb->ki_filp); 716 struct inode *inode = file_inode(iocb->ki_filp);
717 loff_t offset = iocb->ki_pos;
717 ssize_t ret; 718 ssize_t ret;
718 719
719 /*
720 * Try again to invalidate clean pages which might have been cached by
721 * non-direct readahead, or faulted in by get_user_pages() if the source
722 * of the write was an mmap'ed region of the file we're writing. Either
723 * one is a pretty crazy thing to do, so we don't support it 100%. If
724 * this invalidation fails, tough, the write still worked...
725 */
726 if (!dio->error &&
727 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
728 ret = invalidate_inode_pages2_range(inode->i_mapping,
729 iocb->ki_pos >> PAGE_SHIFT,
730 (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT);
731 WARN_ON_ONCE(ret);
732 }
733
734 if (dio->end_io) { 720 if (dio->end_io) {
735 ret = dio->end_io(iocb, 721 ret = dio->end_io(iocb,
736 dio->error ? dio->error : dio->size, 722 dio->error ? dio->error : dio->size,
@@ -742,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
742 if (likely(!ret)) { 728 if (likely(!ret)) {
743 ret = dio->size; 729 ret = dio->size;
744 /* check for short read */ 730 /* check for short read */
745 if (iocb->ki_pos + ret > dio->i_size && 731 if (offset + ret > dio->i_size &&
746 !(dio->flags & IOMAP_DIO_WRITE)) 732 !(dio->flags & IOMAP_DIO_WRITE))
747 ret = dio->i_size - iocb->ki_pos; 733 ret = dio->i_size - offset;
748 iocb->ki_pos += ret; 734 iocb->ki_pos += ret;
749 } 735 }
750 736
737 /*
738 * Try again to invalidate clean pages which might have been cached by
739 * non-direct readahead, or faulted in by get_user_pages() if the source
740 * of the write was an mmap'ed region of the file we're writing. Either
741 * one is a pretty crazy thing to do, so we don't support it 100%. If
742 * this invalidation fails, tough, the write still worked...
743 *
744 * And this page cache invalidation has to be after dio->end_io(), as
745 * some filesystems convert unwritten extents to real allocations in
746 * end_io() when necessary, otherwise a racing buffer read would cache
747 * zeros from unwritten extents.
748 */
749 if (!dio->error &&
750 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
751 int err;
752 err = invalidate_inode_pages2_range(inode->i_mapping,
753 offset >> PAGE_SHIFT,
754 (offset + dio->size - 1) >> PAGE_SHIFT);
755 WARN_ON_ONCE(err);
756 }
757
751 inode_dio_end(file_inode(iocb->ki_filp)); 758 inode_dio_end(file_inode(iocb->ki_filp));
752 kfree(dio); 759 kfree(dio);
753 760
diff --git a/fs/mpage.c b/fs/mpage.c
index 37bb77c1302c..c991faec70b9 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
468 try_to_free_buffers(page); 468 try_to_free_buffers(page);
469} 469}
470 470
471/*
472 * For situations where we want to clean all buffers attached to a page.
473 * We don't need to calculate how many buffers are attached to the page,
474 * we just need to specify a number larger than the maximum number of buffers.
475 */
476void clean_page_buffers(struct page *page)
477{
478 clean_buffers(page, ~0U);
479}
480
471static int __mpage_writepage(struct page *page, struct writeback_control *wbc, 481static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
472 void *data) 482 void *data)
473{ 483{
@@ -605,10 +615,8 @@ alloc_new:
605 if (bio == NULL) { 615 if (bio == NULL) {
606 if (first_unmapped == blocks_per_page) { 616 if (first_unmapped == blocks_per_page) {
607 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), 617 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
608 page, wbc)) { 618 page, wbc))
609 clean_buffers(page, first_unmapped);
610 goto out; 619 goto out;
611 }
612 } 620 }
613 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 621 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
614 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); 622 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
diff --git a/fs/namespace.c b/fs/namespace.c
index 3b601f115b6c..d18deb4c410b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2825,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2825 SB_MANDLOCK | 2825 SB_MANDLOCK |
2826 SB_DIRSYNC | 2826 SB_DIRSYNC |
2827 SB_SILENT | 2827 SB_SILENT |
2828 SB_POSIXACL); 2828 SB_POSIXACL |
2829 SB_I_VERSION);
2829 2830
2830 if (flags & MS_REMOUNT) 2831 if (flags & MS_REMOUNT)
2831 retval = do_remount(&path, flags, sb_flags, mnt_flags, 2832 retval = do_remount(&path, flags, sb_flags, mnt_flags,
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3c69db7d4905..8487486ec496 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
927 exp_put(u->secinfo.si_exp); 927 exp_put(u->secinfo.si_exp);
928} 928}
929 929
930static void
931nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
932{
933 if (u->secinfo_no_name.sin_exp)
934 exp_put(u->secinfo_no_name.sin_exp);
935}
936
930static __be32 937static __be32
931nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 938nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
932 union nfsd4_op_u *u) 939 union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2375 }, 2382 },
2376 [OP_SECINFO_NO_NAME] = { 2383 [OP_SECINFO_NO_NAME] = {
2377 .op_func = nfsd4_secinfo_no_name, 2384 .op_func = nfsd4_secinfo_no_name,
2378 .op_release = nfsd4_secinfo_release, 2385 .op_release = nfsd4_secinfo_no_name_release,
2379 .op_flags = OP_HANDLES_WRONGSEC, 2386 .op_flags = OP_HANDLES_WRONGSEC,
2380 .op_name = "OP_SECINFO_NO_NAME", 2387 .op_name = "OP_SECINFO_NO_NAME",
2381 .op_rsize_bop = nfsd4_secinfo_rsize, 2388 .op_rsize_bop = nfsd4_secinfo_rsize,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 50b0556a124f..52ad15192e72 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1297 spin_lock(&dquot->dq_dqb_lock); 1297 spin_lock(&dquot->dq_dqb_lock);
1298 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1298 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1299 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1299 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1300 goto add; 1300 goto finish;
1301 1301
1302 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1302 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1303 + space + rsv_space; 1303 + space + rsv_space;
1304 1304
1305 if (flags & DQUOT_SPACE_NOFAIL)
1306 goto add;
1307
1308 if (dquot->dq_dqb.dqb_bhardlimit && 1305 if (dquot->dq_dqb.dqb_bhardlimit &&
1309 tspace > dquot->dq_dqb.dqb_bhardlimit && 1306 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1310 !ignore_hardlimit(dquot)) { 1307 !ignore_hardlimit(dquot)) {
1311 if (flags & DQUOT_SPACE_WARN) 1308 if (flags & DQUOT_SPACE_WARN)
1312 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1309 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1313 ret = -EDQUOT; 1310 ret = -EDQUOT;
1314 goto out; 1311 goto finish;
1315 } 1312 }
1316 1313
1317 if (dquot->dq_dqb.dqb_bsoftlimit && 1314 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1322 if (flags & DQUOT_SPACE_WARN) 1319 if (flags & DQUOT_SPACE_WARN)
1323 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1320 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1324 ret = -EDQUOT; 1321 ret = -EDQUOT;
1325 goto out; 1322 goto finish;
1326 } 1323 }
1327 1324
1328 if (dquot->dq_dqb.dqb_bsoftlimit && 1325 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1338 * be always printed 1335 * be always printed
1339 */ 1336 */
1340 ret = -EDQUOT; 1337 ret = -EDQUOT;
1341 goto out; 1338 goto finish;
1342 } 1339 }
1343 } 1340 }
1344add: 1341finish:
1345 dquot->dq_dqb.dqb_rsvspace += rsv_space; 1342 /*
1346 dquot->dq_dqb.dqb_curspace += space; 1343 * We have to be careful and go through warning generation & grace time
1347out: 1344 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1345 * only here...
1346 */
1347 if (flags & DQUOT_SPACE_NOFAIL)
1348 ret = 0;
1349 if (!ret) {
1350 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1351 dquot->dq_dqb.dqb_curspace += space;
1352 }
1348 spin_unlock(&dquot->dq_dqb_lock); 1353 spin_unlock(&dquot->dq_dqb_lock);
1349 return ret; 1354 return ret;
1350} 1355}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 744dcaec34cc..f965ce832bc0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
1584 1584
1585 bp = xfs_btree_get_bufs(args->mp, args->tp, 1585 bp = xfs_btree_get_bufs(args->mp, args->tp,
1586 args->agno, fbno, 0); 1586 args->agno, fbno, 0);
1587 if (!bp) {
1588 error = -EFSCORRUPTED;
1589 goto error0;
1590 }
1587 xfs_trans_binval(args->tp, bp); 1591 xfs_trans_binval(args->tp, bp);
1588 } 1592 }
1589 args->len = 1; 1593 args->len = 1;
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
2141 if (error) 2145 if (error)
2142 goto out_agbp_relse; 2146 goto out_agbp_relse;
2143 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); 2147 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2148 if (!bp) {
2149 error = -EFSCORRUPTED;
2150 goto out_agbp_relse;
2151 }
2144 xfs_trans_binval(tp, bp); 2152 xfs_trans_binval(tp, bp);
2145 } 2153 }
2146 2154
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 044a363119be..89263797cf32 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1477,14 +1477,14 @@ xfs_bmap_isaeof(
1477 int is_empty; 1477 int is_empty;
1478 int error; 1478 int error;
1479 1479
1480 bma->aeof = 0; 1480 bma->aeof = false;
1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1482 &is_empty); 1482 &is_empty);
1483 if (error) 1483 if (error)
1484 return error; 1484 return error;
1485 1485
1486 if (is_empty) { 1486 if (is_empty) {
1487 bma->aeof = 1; 1487 bma->aeof = true;
1488 return 0; 1488 return 0;
1489 } 1489 }
1490 1490
@@ -3852,6 +3852,17 @@ xfs_trim_extent(
3852 } 3852 }
3853} 3853}
3854 3854
3855/* trim extent to within eof */
3856void
3857xfs_trim_extent_eof(
3858 struct xfs_bmbt_irec *irec,
3859 struct xfs_inode *ip)
3860
3861{
3862 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3863 i_size_read(VFS_I(ip))));
3864}
3865
3855/* 3866/*
3856 * Trim the returned map to the required bounds 3867 * Trim the returned map to the required bounds
3857 */ 3868 */
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 851982a5dfbc..502e0d8fb4ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -208,6 +208,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
208 208
209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, 209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
210 xfs_filblks_t len); 210 xfs_filblks_t len);
211void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
211int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 212int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
212void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); 213void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
213void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, 214void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 988bb3f31446..dfd643909f85 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1962,7 +1962,7 @@ xfs_difree_inobt(
1962 if (!(mp->m_flags & XFS_MOUNT_IKEEP) && 1962 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1963 rec.ir_free == XFS_INOBT_ALL_FREE && 1963 rec.ir_free == XFS_INOBT_ALL_FREE &&
1964 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1964 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1965 xic->deleted = 1; 1965 xic->deleted = true;
1966 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); 1966 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1967 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1967 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1968 1968
@@ -1989,7 +1989,7 @@ xfs_difree_inobt(
1989 1989
1990 xfs_difree_inode_chunk(mp, agno, &rec, dfops); 1990 xfs_difree_inode_chunk(mp, agno, &rec, dfops);
1991 } else { 1991 } else {
1992 xic->deleted = 0; 1992 xic->deleted = false;
1993 1993
1994 error = xfs_inobt_update(cur, &rec); 1994 error = xfs_inobt_update(cur, &rec);
1995 if (error) { 1995 if (error) {
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8372e9bcd7b6..71de185735e0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
270 uint32_t ilf_fields; /* flags for fields logged */ 270 uint32_t ilf_fields; /* flags for fields logged */
271 uint16_t ilf_asize; /* size of attr d/ext/root */ 271 uint16_t ilf_asize; /* size of attr d/ext/root */
272 uint16_t ilf_dsize; /* size of data/ext/root */ 272 uint16_t ilf_dsize; /* size of data/ext/root */
273 uint32_t ilf_pad; /* pad for 64 bit boundary */
273 uint64_t ilf_ino; /* inode number */ 274 uint64_t ilf_ino; /* inode number */
274 union { 275 union {
275 uint32_t ilfu_rdev; /* rdev value for dev inode*/ 276 uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format {
280 int32_t ilf_boffset; /* off of inode in buffer */ 281 int32_t ilf_boffset; /* off of inode in buffer */
281} xfs_inode_log_format_t; 282} xfs_inode_log_format_t;
282 283
283typedef struct xfs_inode_log_format_32 { 284/*
284 uint16_t ilf_type; /* inode log item type */ 285 * Old 32 bit systems will log in this format without the 64 bit
285 uint16_t ilf_size; /* size of this item */ 286 * alignment padding. Recovery will detect this and convert it to the
286 uint32_t ilf_fields; /* flags for fields logged */ 287 * correct format.
287 uint16_t ilf_asize; /* size of attr d/ext/root */ 288 */
288 uint16_t ilf_dsize; /* size of data/ext/root */ 289struct xfs_inode_log_format_32 {
289 uint64_t ilf_ino; /* inode number */
290 union {
291 uint32_t ilfu_rdev; /* rdev value for dev inode*/
292 uuid_t ilfu_uuid; /* mount point value */
293 } ilf_u;
294 int64_t ilf_blkno; /* blkno of inode buffer */
295 int32_t ilf_len; /* len of inode buffer */
296 int32_t ilf_boffset; /* off of inode in buffer */
297} __attribute__((packed)) xfs_inode_log_format_32_t;
298
299typedef struct xfs_inode_log_format_64 {
300 uint16_t ilf_type; /* inode log item type */ 290 uint16_t ilf_type; /* inode log item type */
301 uint16_t ilf_size; /* size of this item */ 291 uint16_t ilf_size; /* size of this item */
302 uint32_t ilf_fields; /* flags for fields logged */ 292 uint32_t ilf_fields; /* flags for fields logged */
303 uint16_t ilf_asize; /* size of attr d/ext/root */ 293 uint16_t ilf_asize; /* size of attr d/ext/root */
304 uint16_t ilf_dsize; /* size of data/ext/root */ 294 uint16_t ilf_dsize; /* size of data/ext/root */
305 uint32_t ilf_pad; /* pad for 64 bit boundary */
306 uint64_t ilf_ino; /* inode number */ 295 uint64_t ilf_ino; /* inode number */
307 union { 296 union {
308 uint32_t ilfu_rdev; /* rdev value for dev inode*/ 297 uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 {
311 int64_t ilf_blkno; /* blkno of inode buffer */ 300 int64_t ilf_blkno; /* blkno of inode buffer */
312 int32_t ilf_len; /* len of inode buffer */ 301 int32_t ilf_len; /* len of inode buffer */
313 int32_t ilf_boffset; /* off of inode in buffer */ 302 int32_t ilf_boffset; /* off of inode in buffer */
314} xfs_inode_log_format_64_t; 303} __attribute__((packed));
315 304
316 305
317/* 306/*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 7034e17535de..3354140de07e 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
247int 247int
248xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) 248xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
249{ 249{
250 umode_t mode;
251 bool set_mode = false;
250 int error = 0; 252 int error = 0;
251 253
252 if (!acl) 254 if (!acl)
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
257 return error; 259 return error;
258 260
259 if (type == ACL_TYPE_ACCESS) { 261 if (type == ACL_TYPE_ACCESS) {
260 umode_t mode;
261
262 error = posix_acl_update_mode(inode, &mode, &acl); 262 error = posix_acl_update_mode(inode, &mode, &acl);
263 if (error) 263 if (error)
264 return error; 264 return error;
265 error = xfs_set_mode(inode, mode); 265 set_mode = true;
266 if (error)
267 return error;
268 } 266 }
269 267
270 set_acl: 268 set_acl:
271 return __xfs_set_acl(inode, acl, type); 269 error = __xfs_set_acl(inode, acl, type);
270 if (error)
271 return error;
272
273 /*
274 * We set the mode after successfully updating the ACL xattr because the
275 * xattr update can fail at ENOSPC and we don't want to change the mode
276 * if the ACL update hasn't been applied.
277 */
278 if (set_mode)
279 error = xfs_set_mode(inode, mode);
280
281 return error;
272} 282}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f18e5932aec4..a3eeaba156c5 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -446,6 +446,19 @@ xfs_imap_valid(
446{ 446{
447 offset >>= inode->i_blkbits; 447 offset >>= inode->i_blkbits;
448 448
449 /*
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
454 * wrong blocks.
455 *
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
458 * just eofblocks.
459 */
460 xfs_trim_extent_eof(imap, XFS_I(inode));
461
449 return offset >= imap->br_startoff && 462 return offset >= imap->br_startoff &&
450 offset < imap->br_startoff + imap->br_blockcount; 463 offset < imap->br_startoff + imap->br_blockcount;
451} 464}
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage(
735{ 748{
736 trace_xfs_invalidatepage(page->mapping->host, page, offset, 749 trace_xfs_invalidatepage(page->mapping->host, page, offset,
737 length); 750 length);
751
752 /*
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
756 */
757 if (offset == 0 && length >= PAGE_SIZE)
758 cancel_dirty_page(page);
738 block_invalidatepage(page, offset, length); 759 block_invalidatepage(page, offset, length);
739} 760}
740 761
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage(
1190 * mm accommodates an old ext3 case where clean pages might not have had 1211 * mm accommodates an old ext3 case where clean pages might not have had
1191 * the dirty bit cleared. Thus, it can send actual dirty pages to 1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1192 * ->releasepage() via shrink_active_list(). Conversely, 1213 * ->releasepage() via shrink_active_list(). Conversely,
1193 * block_invalidatepage() can send pages that are still marked dirty 1214 * block_invalidatepage() can send pages that are still marked dirty but
1194 * but otherwise have invalidated buffers. 1215 * otherwise have invalidated buffers.
1195 * 1216 *
1196 * We want to release the latter to avoid unnecessary buildup of the 1217 * We want to release the latter to avoid unnecessary buildup of the
1197 * LRU, skip the former and warn if we've left any lingering 1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1198 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc 1219 * that are entirely invalidated and need to be released. Hence the
1199 * or unwritten buffers and warn if the page is not dirty. Otherwise 1220 * only time we should get dirty pages here is through
1200 * try to release the buffers. 1221 * shrink_active_list() and so we can simply skip those now.
1222 *
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
1201 */ 1225 */
1226 if (PageDirty(page))
1227 return 0;
1228
1202 xfs_count_page_state(page, &delalloc, &unwritten); 1229 xfs_count_page_state(page, &delalloc, &unwritten);
1203 1230
1204 if (delalloc) { 1231 if (WARN_ON_ONCE(delalloc))
1205 WARN_ON_ONCE(!PageDirty(page));
1206 return 0; 1232 return 0;
1207 } 1233 if (WARN_ON_ONCE(unwritten))
1208 if (unwritten) {
1209 WARN_ON_ONCE(!PageDirty(page));
1210 return 0; 1234 return 0;
1211 }
1212 1235
1213 return try_to_free_buffers(page); 1236 return try_to_free_buffers(page);
1214} 1237}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index ebd66b19fbfc..e3a950ed35a8 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
302 &bp, XFS_ATTR_FORK); 302 &bp, XFS_ATTR_FORK);
303 if (error) 303 if (error)
304 return error; 304 return error;
305 node = bp->b_addr;
306 btree = dp->d_ops->node_tree_p(node);
305 child_fsb = be32_to_cpu(btree[i + 1].before); 307 child_fsb = be32_to_cpu(btree[i + 1].before);
306 xfs_trans_brelse(*trans, bp); 308 xfs_trans_brelse(*trans, bp);
307 } 309 }
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index e9db7fc95b70..6503cfa44262 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@ xfs_zero_extent(
84 GFP_NOFS, 0); 84 GFP_NOFS, 0);
85} 85}
86 86
87#ifdef CONFIG_XFS_RT
87int 88int
88xfs_bmap_rtalloc( 89xfs_bmap_rtalloc(
89 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 90 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
190 } 191 }
191 return 0; 192 return 0;
192} 193}
194#endif /* CONFIG_XFS_RT */
193 195
194/* 196/*
195 * Check if the endoff is outside the last extent. If so the caller will grow 197 * Check if the endoff is outside the last extent. If so the caller will grow
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 0eaa81dc49be..7d330b3c77c3 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@ struct xfs_mount;
28struct xfs_trans; 28struct xfs_trans;
29struct xfs_bmalloca; 29struct xfs_bmalloca;
30 30
31#ifdef CONFIG_XFS_RT
31int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 32int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
33#else /* !CONFIG_XFS_RT */
34/*
35 * Attempts to allocate RT extents when RT is disable indicates corruption and
36 * should trigger a shutdown.
37 */
38static inline int
39xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
40{
41 return -EFSCORRUPTED;
42}
43#endif /* CONFIG_XFS_RT */
44
32int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 45int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
33 int whichfork, int *eof); 46 int whichfork, int *eof);
34int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, 47int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 309e26c9dddb..56d0e526870c 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -764,7 +764,7 @@ xfs_file_fallocate(
764 enum xfs_prealloc_flags flags = 0; 764 enum xfs_prealloc_flags flags = 0;
765 uint iolock = XFS_IOLOCK_EXCL; 765 uint iolock = XFS_IOLOCK_EXCL;
766 loff_t new_size = 0; 766 loff_t new_size = 0;
767 bool do_file_insert = 0; 767 bool do_file_insert = false;
768 768
769 if (!S_ISREG(inode->i_mode)) 769 if (!S_ISREG(inode->i_mode))
770 return -EINVAL; 770 return -EINVAL;
@@ -825,7 +825,7 @@ xfs_file_fallocate(
825 error = -EINVAL; 825 error = -EINVAL;
826 goto out_unlock; 826 goto out_unlock;
827 } 827 }
828 do_file_insert = 1; 828 do_file_insert = true;
829 } else { 829 } else {
830 flags |= XFS_PREALLOC_SET; 830 flags |= XFS_PREALLOC_SET;
831 831
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 814ed729881d..43cfc07996a4 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr); 367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
368} 368}
369 369
370/* Transform a rtbitmap "record" into a fsmap */
371STATIC int
372xfs_getfsmap_rtdev_rtbitmap_helper(
373 struct xfs_trans *tp,
374 struct xfs_rtalloc_rec *rec,
375 void *priv)
376{
377 struct xfs_mount *mp = tp->t_mountp;
378 struct xfs_getfsmap_info *info = priv;
379 struct xfs_rmap_irec irec;
380 xfs_daddr_t rec_daddr;
381
382 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
383
384 irec.rm_startblock = rec->ar_startblock;
385 irec.rm_blockcount = rec->ar_blockcount;
386 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
387 irec.rm_offset = 0;
388 irec.rm_flags = 0;
389
390 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
391}
392
393/* Transform a bnobt irec into a fsmap */ 370/* Transform a bnobt irec into a fsmap */
394STATIC int 371STATIC int
395xfs_getfsmap_datadev_bnobt_helper( 372xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
475 return xfs_getfsmap_helper(tp, info, &rmap, 0); 452 return xfs_getfsmap_helper(tp, info, &rmap, 0);
476} 453}
477 454
455#ifdef CONFIG_XFS_RT
456/* Transform a rtbitmap "record" into a fsmap */
457STATIC int
458xfs_getfsmap_rtdev_rtbitmap_helper(
459 struct xfs_trans *tp,
460 struct xfs_rtalloc_rec *rec,
461 void *priv)
462{
463 struct xfs_mount *mp = tp->t_mountp;
464 struct xfs_getfsmap_info *info = priv;
465 struct xfs_rmap_irec irec;
466 xfs_daddr_t rec_daddr;
467
468 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
469
470 irec.rm_startblock = rec->ar_startblock;
471 irec.rm_blockcount = rec->ar_blockcount;
472 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
473 irec.rm_offset = 0;
474 irec.rm_flags = 0;
475
476 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
477}
478
478/* Execute a getfsmap query against the realtime device. */ 479/* Execute a getfsmap query against the realtime device. */
479STATIC int 480STATIC int
480__xfs_getfsmap_rtdev( 481__xfs_getfsmap_rtdev(
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
561 return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, 562 return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
562 info); 563 info);
563} 564}
565#endif /* CONFIG_XFS_RT */
564 566
565/* Execute a getfsmap query against the regular data device. */ 567/* Execute a getfsmap query against the regular data device. */
566STATIC int 568STATIC int
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
795 return false; 797 return false;
796} 798}
797 799
800/*
801 * There are only two devices if we didn't configure RT devices at build time.
802 */
803#ifdef CONFIG_XFS_RT
798#define XFS_GETFSMAP_DEVS 3 804#define XFS_GETFSMAP_DEVS 3
805#else
806#define XFS_GETFSMAP_DEVS 2
807#endif /* CONFIG_XFS_RT */
808
799/* 809/*
800 * Get filesystem's extents as described in head, and format for 810 * Get filesystem's extents as described in head, and format for
801 * output. Calls formatter to fill the user's buffer until all 811 * output. Calls formatter to fill the user's buffer until all
@@ -853,10 +863,12 @@ xfs_getfsmap(
853 handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); 863 handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
854 handlers[1].fn = xfs_getfsmap_logdev; 864 handlers[1].fn = xfs_getfsmap_logdev;
855 } 865 }
866#ifdef CONFIG_XFS_RT
856 if (mp->m_rtdev_targp) { 867 if (mp->m_rtdev_targp) {
857 handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); 868 handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
858 handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; 869 handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
859 } 870 }
871#endif /* CONFIG_XFS_RT */
860 872
861 xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), 873 xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
862 xfs_getfsmap_dev_compare); 874 xfs_getfsmap_dev_compare);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index a705f34b58fa..9bbc2d7cc8cb 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
364 to->di_dmstate = from->di_dmstate; 364 to->di_dmstate = from->di_dmstate;
365 to->di_flags = from->di_flags; 365 to->di_flags = from->di_flags;
366 366
367 /* log a dummy value to ensure log structure is fully initialised */
368 to->di_next_unlinked = NULLAGINO;
369
367 if (from->di_version == 3) { 370 if (from->di_version == 3) {
368 to->di_changecount = inode->i_version; 371 to->di_changecount = inode->i_version;
369 to->di_crtime.t_sec = from->di_crtime.t_sec; 372 to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
404 * the second with the on-disk inode structure, and a possible third and/or 407 * the second with the on-disk inode structure, and a possible third and/or
405 * fourth with the inode data/extents/b-tree root and inode attributes 408 * fourth with the inode data/extents/b-tree root and inode attributes
406 * data/extents/b-tree root. 409 * data/extents/b-tree root.
410 *
411 * Note: Always use the 64 bit inode log format structure so we don't
412 * leave an uninitialised hole in the format item on 64 bit systems. Log
413 * recovery on 32 bit systems handles this just fine, so there's no reason
414 * for not using an initialising the properly padded structure all the time.
407 */ 415 */
408STATIC void 416STATIC void
409xfs_inode_item_format( 417xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
412{ 420{
413 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 421 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
414 struct xfs_inode *ip = iip->ili_inode; 422 struct xfs_inode *ip = iip->ili_inode;
415 struct xfs_inode_log_format *ilf;
416 struct xfs_log_iovec *vecp = NULL; 423 struct xfs_log_iovec *vecp = NULL;
424 struct xfs_inode_log_format *ilf;
417 425
418 ASSERT(ip->i_d.di_version > 1); 426 ASSERT(ip->i_d.di_version > 1);
419 427
@@ -425,7 +433,17 @@ xfs_inode_item_format(
425 ilf->ilf_boffset = ip->i_imap.im_boffset; 433 ilf->ilf_boffset = ip->i_imap.im_boffset;
426 ilf->ilf_fields = XFS_ILOG_CORE; 434 ilf->ilf_fields = XFS_ILOG_CORE;
427 ilf->ilf_size = 2; /* format + core */ 435 ilf->ilf_size = 2; /* format + core */
428 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); 436
437 /*
438 * make sure we don't leak uninitialised data into the log in the case
439 * when we don't log every field in the inode.
440 */
441 ilf->ilf_dsize = 0;
442 ilf->ilf_asize = 0;
443 ilf->ilf_pad = 0;
444 uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
445
446 xlog_finish_iovec(lv, vecp, sizeof(*ilf));
429 447
430 xfs_inode_item_format_core(ip, lv, &vecp); 448 xfs_inode_item_format_core(ip, lv, &vecp);
431 xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); 449 xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -855,44 +873,29 @@ xfs_istale_done(
855} 873}
856 874
857/* 875/*
858 * convert an xfs_inode_log_format struct from either 32 or 64 bit versions 876 * convert an xfs_inode_log_format struct from the old 32 bit version
859 * (which can have different field alignments) to the native version 877 * (which can have different field alignments) to the native 64 bit version
860 */ 878 */
861int 879int
862xfs_inode_item_format_convert( 880xfs_inode_item_format_convert(
863 xfs_log_iovec_t *buf, 881 struct xfs_log_iovec *buf,
864 xfs_inode_log_format_t *in_f) 882 struct xfs_inode_log_format *in_f)
865{ 883{
866 if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { 884 struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
867 xfs_inode_log_format_32_t *in_f32 = buf->i_addr; 885
868 886 if (buf->i_len != sizeof(*in_f32))
869 in_f->ilf_type = in_f32->ilf_type; 887 return -EFSCORRUPTED;
870 in_f->ilf_size = in_f32->ilf_size; 888
871 in_f->ilf_fields = in_f32->ilf_fields; 889 in_f->ilf_type = in_f32->ilf_type;
872 in_f->ilf_asize = in_f32->ilf_asize; 890 in_f->ilf_size = in_f32->ilf_size;
873 in_f->ilf_dsize = in_f32->ilf_dsize; 891 in_f->ilf_fields = in_f32->ilf_fields;
874 in_f->ilf_ino = in_f32->ilf_ino; 892 in_f->ilf_asize = in_f32->ilf_asize;
875 /* copy biggest field of ilf_u */ 893 in_f->ilf_dsize = in_f32->ilf_dsize;
876 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); 894 in_f->ilf_ino = in_f32->ilf_ino;
877 in_f->ilf_blkno = in_f32->ilf_blkno; 895 /* copy biggest field of ilf_u */
878 in_f->ilf_len = in_f32->ilf_len; 896 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
879 in_f->ilf_boffset = in_f32->ilf_boffset; 897 in_f->ilf_blkno = in_f32->ilf_blkno;
880 return 0; 898 in_f->ilf_len = in_f32->ilf_len;
881 } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ 899 in_f->ilf_boffset = in_f32->ilf_boffset;
882 xfs_inode_log_format_64_t *in_f64 = buf->i_addr; 900 return 0;
883
884 in_f->ilf_type = in_f64->ilf_type;
885 in_f->ilf_size = in_f64->ilf_size;
886 in_f->ilf_fields = in_f64->ilf_fields;
887 in_f->ilf_asize = in_f64->ilf_asize;
888 in_f->ilf_dsize = in_f64->ilf_dsize;
889 in_f->ilf_ino = in_f64->ilf_ino;
890 /* copy biggest field of ilf_u */
891 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
892 in_f->ilf_blkno = in_f64->ilf_blkno;
893 in_f->ilf_len = in_f64->ilf_len;
894 in_f->ilf_boffset = in_f64->ilf_boffset;
895 return 0;
896 }
897 return -EFSCORRUPTED;
898} 901}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c5107c7bc4bf..dc95a49d62e7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2515,7 +2515,7 @@ next_lv:
2515 if (lv) 2515 if (lv)
2516 vecp = lv->lv_iovecp; 2516 vecp = lv->lv_iovecp;
2517 } 2517 }
2518 if (record_cnt == 0 && ordered == false) { 2518 if (record_cnt == 0 && !ordered) {
2519 if (!lv) 2519 if (!lv)
2520 return 0; 2520 return 0;
2521 break; 2521 break;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ea7d4b4e50d0..e9727d0a541a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -704,7 +704,7 @@ xfs_mountfs(
704 xfs_set_maxicount(mp); 704 xfs_set_maxicount(mp);
705 705
706 /* enable fail_at_unmount as default */ 706 /* enable fail_at_unmount as default */
707 mp->m_fail_unmount = 1; 707 mp->m_fail_unmount = true;
708 708
709 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); 709 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
710 if (error) 710 if (error)
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d71b242..0492436a053f 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
134 XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); 134 XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28);
135 XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); 135 XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8);
136 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); 136 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52);
137 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56); 137 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
138 XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); 138 XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
139 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); 139 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
140} 140}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 584cf2d573ba..f663022353c0 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
1637 1637
1638 /* version 5 superblocks support inode version counters. */ 1638 /* version 5 superblocks support inode version counters. */
1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1640 sb->s_flags |= MS_I_VERSION; 1640 sb->s_flags |= SB_I_VERSION;
1641 1641
1642 if (mp->m_flags & XFS_MOUNT_DAX) { 1642 if (mp->m_flags & XFS_MOUNT_DAX) {
1643 xfs_warn(mp, 1643 xfs_warn(mp,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555eccf..446b24cac67d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
232 loff_t, unsigned, unsigned, 232 loff_t, unsigned, unsigned,
233 struct page *, void *); 233 struct page *, void *);
234void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); 234void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
235void clean_page_buffers(struct page *page);
235int cont_write_begin(struct file *, struct address_space *, loff_t, 236int cont_write_begin(struct file *, struct address_space *, loff_t,
236 unsigned, unsigned, struct page **, void **, 237 unsigned, unsigned, struct page **, void **,
237 get_block_t *, loff_t *); 238 get_block_t *, loff_t *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 2d2db394b0ca..cdd78a7beaae 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -748,7 +748,7 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
748 748
749void bpf_warn_invalid_xdp_action(u32 act); 749void bpf_warn_invalid_xdp_action(u32 act);
750 750
751struct sock *do_sk_redirect_map(void); 751struct sock *do_sk_redirect_map(struct sk_buff *skb);
752 752
753#ifdef CONFIG_BPF_JIT 753#ifdef CONFIG_BPF_JIT
754extern int bpf_jit_enable; 754extern int bpf_jit_enable;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index c458d7b7ad19..6431087816ba 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1403 const int *srv_version, int srv_vercnt, 1403 const int *srv_version, int srv_vercnt,
1404 int *nego_fw_version, int *nego_srv_version); 1404 int *nego_fw_version, int *nego_srv_version);
1405 1405
1406void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1406void hv_process_channel_removal(u32 relid);
1407 1407
1408void vmbus_setevent(struct vmbus_channel *channel); 1408void vmbus_setevent(struct vmbus_channel *channel);
1409/* 1409/*
diff --git a/include/linux/input.h b/include/linux/input.h
index fb5e23c7ed98..7c7516eb7d76 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -234,6 +234,10 @@ struct input_dev {
234#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" 234#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
235#endif 235#endif
236 236
237#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
238#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
239#endif
240
237#define INPUT_DEVICE_ID_MATCH_DEVICE \ 241#define INPUT_DEVICE_ID_MATCH_DEVICE \
238 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) 242 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
239#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ 243#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
469int input_set_keycode(struct input_dev *dev, 473int input_set_keycode(struct input_dev *dev,
470 const struct input_keymap_entry *ke); 474 const struct input_keymap_entry *ke);
471 475
476bool input_match_device_id(const struct input_dev *dev,
477 const struct input_device_id *id);
478
472void input_enable_softrepeat(struct input_dev *dev, int delay, int period); 479void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
473 480
474extern struct class input_class; 481extern struct class input_class;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0ad4c3044cf9..91189bb0c818 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,12 @@
44 44
45#define STACK_MAGIC 0xdeadbeef 45#define STACK_MAGIC 0xdeadbeef
46 46
47/**
48 * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
49 * @x: value to repeat
50 *
51 * NOTE: @x is not checked for > 0xff; larger values produce odd results.
52 */
47#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) 53#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48 54
49/* @a is a power of 2 value */ 55/* @a is a power of 2 value */
@@ -57,6 +63,10 @@
57#define READ 0 63#define READ 0
58#define WRITE 1 64#define WRITE 1
59 65
66/**
67 * ARRAY_SIZE - get the number of elements in array @arr
68 * @arr: array to be sized
69 */
60#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) 70#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
61 71
62#define u64_to_user_ptr(x) ( \ 72#define u64_to_user_ptr(x) ( \
@@ -76,7 +86,15 @@
76#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) 86#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
77#define round_down(x, y) ((x) & ~__round_mask(x, y)) 87#define round_down(x, y) ((x) & ~__round_mask(x, y))
78 88
89/**
90 * FIELD_SIZEOF - get the size of a struct's field
91 * @t: the target struct
92 * @f: the target struct's field
93 * Return: the size of @f in the struct definition without having a
94 * declared instance of @t.
95 */
79#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 96#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
97
80#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP 98#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
81 99
82#define DIV_ROUND_DOWN_ULL(ll, d) \ 100#define DIV_ROUND_DOWN_ULL(ll, d) \
@@ -107,7 +125,7 @@
107/* 125/*
108 * Divide positive or negative dividend by positive or negative divisor 126 * Divide positive or negative dividend by positive or negative divisor
109 * and round to closest integer. Result is undefined for negative 127 * and round to closest integer. Result is undefined for negative
110 * divisors if he dividend variable type is unsigned and for negative 128 * divisors if the dividend variable type is unsigned and for negative
111 * dividends if the divisor variable type is unsigned. 129 * dividends if the divisor variable type is unsigned.
112 */ 130 */
113#define DIV_ROUND_CLOSEST(x, divisor)( \ 131#define DIV_ROUND_CLOSEST(x, divisor)( \
@@ -247,13 +265,13 @@ extern int _cond_resched(void);
247 * @ep_ro: right open interval endpoint 265 * @ep_ro: right open interval endpoint
248 * 266 *
249 * Perform a "reciprocal multiplication" in order to "scale" a value into 267 * Perform a "reciprocal multiplication" in order to "scale" a value into
250 * range [0, ep_ro), where the upper interval endpoint is right-open. 268 * range [0, @ep_ro), where the upper interval endpoint is right-open.
251 * This is useful, e.g. for accessing a index of an array containing 269 * This is useful, e.g. for accessing a index of an array containing
252 * ep_ro elements, for example. Think of it as sort of modulus, only that 270 * @ep_ro elements, for example. Think of it as sort of modulus, only that
253 * the result isn't that of modulo. ;) Note that if initial input is a 271 * the result isn't that of modulo. ;) Note that if initial input is a
254 * small value, then result will return 0. 272 * small value, then result will return 0.
255 * 273 *
256 * Return: a result based on val in interval [0, ep_ro). 274 * Return: a result based on @val in interval [0, @ep_ro).
257 */ 275 */
258static inline u32 reciprocal_scale(u32 val, u32 ep_ro) 276static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
259{ 277{
@@ -618,8 +636,8 @@ do { \
618 * trace_printk - printf formatting in the ftrace buffer 636 * trace_printk - printf formatting in the ftrace buffer
619 * @fmt: the printf format for printing 637 * @fmt: the printf format for printing
620 * 638 *
621 * Note: __trace_printk is an internal function for trace_printk and 639 * Note: __trace_printk is an internal function for trace_printk() and
622 * the @ip is passed in via the trace_printk macro. 640 * the @ip is passed in via the trace_printk() macro.
623 * 641 *
624 * This function allows a kernel developer to debug fast path sections 642 * This function allows a kernel developer to debug fast path sections
625 * that printk is not appropriate for. By scattering in various 643 * that printk is not appropriate for. By scattering in various
@@ -629,7 +647,7 @@ do { \
629 * This is intended as a debugging tool for the developer only. 647 * This is intended as a debugging tool for the developer only.
630 * Please refrain from leaving trace_printks scattered around in 648 * Please refrain from leaving trace_printks scattered around in
631 * your code. (Extra memory is used for special buffers that are 649 * your code. (Extra memory is used for special buffers that are
632 * allocated when trace_printk() is used) 650 * allocated when trace_printk() is used.)
633 * 651 *
634 * A little optization trick is done here. If there's only one 652 * A little optization trick is done here. If there's only one
635 * argument, there's no need to scan the string for printf formats. 653 * argument, there's no need to scan the string for printf formats.
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
681 * the @ip is passed in via the trace_puts macro. 699 * the @ip is passed in via the trace_puts macro.
682 * 700 *
683 * This is similar to trace_printk() but is made for those really fast 701 * This is similar to trace_printk() but is made for those really fast
684 * paths that a developer wants the least amount of "Heisenbug" affects, 702 * paths that a developer wants the least amount of "Heisenbug" effects,
685 * where the processing of the print format is still too much. 703 * where the processing of the print format is still too much.
686 * 704 *
687 * This function allows a kernel developer to debug fast path sections 705 * This function allows a kernel developer to debug fast path sections
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
692 * This is intended as a debugging tool for the developer only. 710 * This is intended as a debugging tool for the developer only.
693 * Please refrain from leaving trace_puts scattered around in 711 * Please refrain from leaving trace_puts scattered around in
694 * your code. (Extra memory is used for special buffers that are 712 * your code. (Extra memory is used for special buffers that are
695 * allocated when trace_puts() is used) 713 * allocated when trace_puts() is used.)
696 * 714 *
697 * Returns: 0 if nothing was written, positive # if string was. 715 * Returns: 0 if nothing was written, positive # if string was.
698 * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) 716 * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
771 t2 min2 = (y); \ 789 t2 min2 = (y); \
772 (void) (&min1 == &min2); \ 790 (void) (&min1 == &min2); \
773 min1 < min2 ? min1 : min2; }) 791 min1 < min2 ? min1 : min2; })
792
793/**
794 * min - return minimum of two values of the same or compatible types
795 * @x: first value
796 * @y: second value
797 */
774#define min(x, y) \ 798#define min(x, y) \
775 __min(typeof(x), typeof(y), \ 799 __min(typeof(x), typeof(y), \
776 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 800 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
781 t2 max2 = (y); \ 805 t2 max2 = (y); \
782 (void) (&max1 == &max2); \ 806 (void) (&max1 == &max2); \
783 max1 > max2 ? max1 : max2; }) 807 max1 > max2 ? max1 : max2; })
808
809/**
810 * max - return maximum of two values of the same or compatible types
811 * @x: first value
812 * @y: second value
813 */
784#define max(x, y) \ 814#define max(x, y) \
785 __max(typeof(x), typeof(y), \ 815 __max(typeof(x), typeof(y), \
786 __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ 816 __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
787 x, y) 817 x, y)
788 818
819/**
820 * min3 - return minimum of three values
821 * @x: first value
822 * @y: second value
823 * @z: third value
824 */
789#define min3(x, y, z) min((typeof(x))min(x, y), z) 825#define min3(x, y, z) min((typeof(x))min(x, y), z)
826
827/**
828 * max3 - return maximum of three values
829 * @x: first value
830 * @y: second value
831 * @z: third value
832 */
790#define max3(x, y, z) max((typeof(x))max(x, y), z) 833#define max3(x, y, z) max((typeof(x))max(x, y), z)
791 834
792/** 835/**
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
805 * @lo: lowest allowable value 848 * @lo: lowest allowable value
806 * @hi: highest allowable value 849 * @hi: highest allowable value
807 * 850 *
808 * This macro does strict typechecking of lo/hi to make sure they are of the 851 * This macro does strict typechecking of @lo/@hi to make sure they are of the
809 * same type as val. See the unnecessary pointer comparisons. 852 * same type as @val. See the unnecessary pointer comparisons.
810 */ 853 */
811#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) 854#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
812 855
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
816 * 859 *
817 * Or not use min/max/clamp at all, of course. 860 * Or not use min/max/clamp at all, of course.
818 */ 861 */
862
863/**
864 * min_t - return minimum of two values, using the specified type
865 * @type: data type to use
866 * @x: first value
867 * @y: second value
868 */
819#define min_t(type, x, y) \ 869#define min_t(type, x, y) \
820 __min(type, type, \ 870 __min(type, type, \
821 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 871 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
822 x, y) 872 x, y)
823 873
874/**
875 * max_t - return maximum of two values, using the specified type
876 * @type: data type to use
877 * @x: first value
878 * @y: second value
879 */
824#define max_t(type, x, y) \ 880#define max_t(type, x, y) \
825 __max(type, type, \ 881 __max(type, type, \
826 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 882 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
834 * @hi: maximum allowable value 890 * @hi: maximum allowable value
835 * 891 *
836 * This macro does no typechecking and uses temporary variables of type 892 * This macro does no typechecking and uses temporary variables of type
837 * 'type' to make all the comparisons. 893 * @type to make all the comparisons.
838 */ 894 */
839#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) 895#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
840 896
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
845 * @hi: maximum allowable value 901 * @hi: maximum allowable value
846 * 902 *
847 * This macro does no typechecking and uses temporary variables of whatever 903 * This macro does no typechecking and uses temporary variables of whatever
848 * type the input argument 'val' is. This is useful when val is an unsigned 904 * type the input argument @val is. This is useful when @val is an unsigned
849 * type and min and max are literals that will otherwise be assigned a signed 905 * type and @lo and @hi are literals that will otherwise be assigned a signed
850 * integer type. 906 * integer type.
851 */ 907 */
852#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) 908#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
853 909
854 910
855/* 911/**
856 * swap - swap value of @a and @b 912 * swap - swap values of @a and @b
913 * @a: first value
914 * @b: second value
857 */ 915 */
858#define swap(a, b) \ 916#define swap(a, b) \
859 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 917 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
diff --git a/include/linux/key.h b/include/linux/key.h
index e315e16b6ff8..8a15cabe928d 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -138,6 +138,11 @@ struct key_restriction {
138 struct key_type *keytype; 138 struct key_type *keytype;
139}; 139};
140 140
141enum key_state {
142 KEY_IS_UNINSTANTIATED,
143 KEY_IS_POSITIVE, /* Positively instantiated */
144};
145
141/*****************************************************************************/ 146/*****************************************************************************/
142/* 147/*
143 * authentication token / access credential / keyring 148 * authentication token / access credential / keyring
@@ -169,6 +174,7 @@ struct key {
169 * - may not match RCU dereferenced payload 174 * - may not match RCU dereferenced payload
170 * - payload should contain own length 175 * - payload should contain own length
171 */ 176 */
177 short state; /* Key state (+) or rejection error (-) */
172 178
173#ifdef KEY_DEBUGGING 179#ifdef KEY_DEBUGGING
174 unsigned magic; 180 unsigned magic;
@@ -176,18 +182,16 @@ struct key {
176#endif 182#endif
177 183
178 unsigned long flags; /* status flags (change with bitops) */ 184 unsigned long flags; /* status flags (change with bitops) */
179#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ 185#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
180#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ 186#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
181#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ 187#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
182#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ 188#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
183#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ 189#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
184#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ 190#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
185#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ 191#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
186#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ 192#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
187#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ 193#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
188#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ 194#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
189#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
190#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
191 195
192 /* the key type and key description string 196 /* the key type and key description string
193 * - the desc is used to match a key against search criteria 197 * - the desc is used to match a key against search criteria
@@ -213,7 +217,6 @@ struct key {
213 struct list_head name_link; 217 struct list_head name_link;
214 struct assoc_array keys; 218 struct assoc_array keys;
215 }; 219 };
216 int reject_error;
217 }; 220 };
218 221
219 /* This is set on a keyring to restrict the addition of a link to a key 222 /* This is set on a keyring to restrict the addition of a link to a key
@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
353#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ 356#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
354#define KEY_NEED_ALL 0x3f /* All the above permissions */ 357#define KEY_NEED_ALL 0x3f /* All the above permissions */
355 358
359static inline short key_read_state(const struct key *key)
360{
361 /* Barrier versus mark_key_instantiated(). */
362 return smp_load_acquire(&key->state);
363}
364
356/** 365/**
357 * key_is_instantiated - Determine if a key has been positively instantiated 366 * key_is_positive - Determine if a key has been positively instantiated
358 * @key: The key to check. 367 * @key: The key to check.
359 * 368 *
360 * Return true if the specified key has been positively instantiated, false 369 * Return true if the specified key has been positively instantiated, false
361 * otherwise. 370 * otherwise.
362 */ 371 */
363static inline bool key_is_instantiated(const struct key *key) 372static inline bool key_is_positive(const struct key *key)
373{
374 return key_read_state(key) == KEY_IS_POSITIVE;
375}
376
377static inline bool key_is_negative(const struct key *key)
364{ 378{
365 return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 379 return key_read_state(key) < 0;
366 !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
367} 380}
368 381
369#define dereference_key_rcu(KEY) \ 382#define dereference_key_rcu(KEY) \
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 0d3f14fd2621..4773145246ed 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
31 struct mbus_dram_window { 31 struct mbus_dram_window {
32 u8 cs_index; 32 u8 cs_index;
33 u8 mbus_attr; 33 u8 mbus_attr;
34 u32 base; 34 u64 base;
35 u32 size; 35 u64 size;
36 } cs[4]; 36 } cs[4];
37}; 37};
38 38
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 46f4ecf5479a..1861ea8dba77 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -445,6 +445,9 @@ struct mm_struct {
445 unsigned long flags; /* Must use atomic bitops to access the bits */ 445 unsigned long flags; /* Must use atomic bitops to access the bits */
446 446
447 struct core_state *core_state; /* coredumping support */ 447 struct core_state *core_state; /* coredumping support */
448#ifdef CONFIG_MEMBARRIER
449 atomic_t membarrier_state;
450#endif
448#ifdef CONFIG_AIO 451#ifdef CONFIG_AIO
449 spinlock_t ioctx_lock; 452 spinlock_t ioctx_lock;
450 struct kioctx_table __rcu *ioctx_table; 453 struct kioctx_table __rcu *ioctx_table;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 7625c3b81f84..d9fb7abad445 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
293#define INPUT_DEVICE_ID_SND_MAX 0x07 293#define INPUT_DEVICE_ID_SND_MAX 0x07
294#define INPUT_DEVICE_ID_FF_MAX 0x7f 294#define INPUT_DEVICE_ID_FF_MAX 0x7f
295#define INPUT_DEVICE_ID_SW_MAX 0x0f 295#define INPUT_DEVICE_ID_SW_MAX 0x0f
296#define INPUT_DEVICE_ID_PROP_MAX 0x1f
296 297
297#define INPUT_DEVICE_ID_MATCH_BUS 1 298#define INPUT_DEVICE_ID_MATCH_BUS 1
298#define INPUT_DEVICE_ID_MATCH_VENDOR 2 299#define INPUT_DEVICE_ID_MATCH_VENDOR 2
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
308#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 309#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
309#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 310#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
310#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 311#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
312#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000
311 313
312struct input_device_id { 314struct input_device_id {
313 315
@@ -327,6 +329,7 @@ struct input_device_id {
327 kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; 329 kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
328 kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; 330 kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
329 kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; 331 kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
332 kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
330 333
331 kernel_ulong_t driver_info; 334 kernel_ulong_t driver_info;
332}; 335};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 4de5b08ee0fb..6c7960c8338a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3711,6 +3711,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3711 unsigned char name_assign_type, 3711 unsigned char name_assign_type,
3712 void (*setup)(struct net_device *), 3712 void (*setup)(struct net_device *),
3713 unsigned int txqs, unsigned int rxqs); 3713 unsigned int txqs, unsigned int rxqs);
3714int dev_get_valid_name(struct net *net, struct net_device *dev,
3715 const char *name);
3716
3714#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 3717#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3715 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 3718 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3716 3719
diff --git a/include/linux/of.h b/include/linux/of.h
index cfc34117fc92..b240ed69dc96 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
734 return NULL; 734 return NULL;
735} 735}
736 736
737static inline int of_n_addr_cells(struct device_node *np)
738{
739 return 0;
740
741}
742static inline int of_n_size_cells(struct device_node *np)
743{
744 return 0;
745}
746
737static inline int of_property_read_u64(const struct device_node *np, 747static inline int of_property_read_u64(const struct device_node *np,
738 const char *propname, u64 *out_value) 748 const char *propname, u64 *out_value)
739{ 749{
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b1fd8bf85fdc..2bea1d5e9930 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
276#define list_entry_rcu(ptr, type, member) \ 276#define list_entry_rcu(ptr, type, member) \
277 container_of(lockless_dereference(ptr), type, member) 277 container_of(lockless_dereference(ptr), type, member)
278 278
279/** 279/*
280 * Where are list_empty_rcu() and list_first_entry_rcu()? 280 * Where are list_empty_rcu() and list_first_entry_rcu()?
281 * 281 *
282 * Implementing those functions following their counterparts list_empty() and 282 * Implementing those functions following their counterparts list_empty() and
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de50d8a4cf41..1a9f70d44af9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
523 * Return the value of the specified RCU-protected pointer, but omit 523 * Return the value of the specified RCU-protected pointer, but omit
524 * both the smp_read_barrier_depends() and the READ_ONCE(). This 524 * both the smp_read_barrier_depends() and the READ_ONCE(). This
525 * is useful in cases where update-side locks prevent the value of the 525 * is useful in cases where update-side locks prevent the value of the
526 * pointer from changing. Please note that this primitive does -not- 526 * pointer from changing. Please note that this primitive does *not*
527 * prevent the compiler from repeating this reference or combining it 527 * prevent the compiler from repeating this reference or combining it
528 * with other references, so it should not be used without protection 528 * with other references, so it should not be used without protection
529 * of appropriate locks. 529 * of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
568 * is handed off from RCU to some other synchronization mechanism, for 568 * is handed off from RCU to some other synchronization mechanism, for
569 * example, reference counting or locking. In C11, it would map to 569 * example, reference counting or locking. In C11, it would map to
570 * kill_dependency(). It could be used as follows: 570 * kill_dependency(). It could be used as follows:
571 * 571 * ``
572 * rcu_read_lock(); 572 * rcu_read_lock();
573 * p = rcu_dereference(gp); 573 * p = rcu_dereference(gp);
574 * long_lived = is_long_lived(p); 574 * long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
579 * p = rcu_pointer_handoff(p); 579 * p = rcu_pointer_handoff(p);
580 * } 580 * }
581 * rcu_read_unlock(); 581 * rcu_read_unlock();
582 *``
582 */ 583 */
583#define rcu_pointer_handoff(p) (p) 584#define rcu_pointer_handoff(p) (p)
584 585
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
778 779
779/** 780/**
780 * RCU_INIT_POINTER() - initialize an RCU protected pointer 781 * RCU_INIT_POINTER() - initialize an RCU protected pointer
782 * @p: The pointer to be initialized.
783 * @v: The value to initialized the pointer to.
781 * 784 *
782 * Initialize an RCU-protected pointer in special cases where readers 785 * Initialize an RCU-protected pointer in special cases where readers
783 * do not need ordering constraints on the CPU or the compiler. These 786 * do not need ordering constraints on the CPU or the compiler. These
784 * special cases are: 787 * special cases are:
785 * 788 *
786 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- 789 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
787 * 2. The caller has taken whatever steps are required to prevent 790 * 2. The caller has taken whatever steps are required to prevent
788 * RCU readers from concurrently accessing this pointer -or- 791 * RCU readers from concurrently accessing this pointer *or*
789 * 3. The referenced data structure has already been exposed to 792 * 3. The referenced data structure has already been exposed to
790 * readers either at compile time or via rcu_assign_pointer() -and- 793 * readers either at compile time or via rcu_assign_pointer() *and*
791 * a. You have not made -any- reader-visible changes to 794 *
792 * this structure since then -or- 795 * a. You have not made *any* reader-visible changes to
796 * this structure since then *or*
793 * b. It is OK for readers accessing this structure from its 797 * b. It is OK for readers accessing this structure from its
794 * new location to see the old state of the structure. (For 798 * new location to see the old state of the structure. (For
795 * example, the changes were to statistical counters or to 799 * example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
805 * by a single external-to-structure RCU-protected pointer, then you may 809 * by a single external-to-structure RCU-protected pointer, then you may
806 * use RCU_INIT_POINTER() to initialize the internal RCU-protected 810 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
807 * pointers, but you must use rcu_assign_pointer() to initialize the 811 * pointers, but you must use rcu_assign_pointer() to initialize the
808 * external-to-structure pointer -after- you have completely initialized 812 * external-to-structure pointer *after* you have completely initialized
809 * the reader-accessible portions of the linked structure. 813 * the reader-accessible portions of the linked structure.
810 * 814 *
811 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no 815 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
819 823
820/** 824/**
821 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer 825 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
826 * @p: The pointer to be initialized.
827 * @v: The value to initialized the pointer to.
822 * 828 *
823 * GCC-style initialization for an RCU-protected pointer in a structure field. 829 * GCC-style initialization for an RCU-protected pointer in a structure field.
824 */ 830 */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index ae53e413fb13..ab9bf7b73954 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -211,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
211 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 211 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
212} 212}
213 213
214#ifdef CONFIG_MEMBARRIER
215enum {
216 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
217 MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
218};
219
220static inline void membarrier_execve(struct task_struct *t)
221{
222 atomic_set(&t->mm->membarrier_state, 0);
223}
224#else
225static inline void membarrier_execve(struct task_struct *t)
226{
227}
228#endif
229
214#endif /* _LINUX_SCHED_MM_H */ 230#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index d7b6dab956ec..7d065abc7a47 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -71,14 +71,6 @@ struct sched_domain_shared {
71 atomic_t ref; 71 atomic_t ref;
72 atomic_t nr_busy_cpus; 72 atomic_t nr_busy_cpus;
73 int has_idle_cores; 73 int has_idle_cores;
74
75 /*
76 * Some variables from the most recent sd_lb_stats for this domain,
77 * used by wake_affine().
78 */
79 unsigned long nr_running;
80 unsigned long load;
81 unsigned long capacity;
82}; 74};
83 75
84struct sched_domain { 76struct sched_domain {
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 39af9bc0f653..62be8966e837 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
78 78
79/** 79/**
80 * srcu_read_lock_held - might we be in SRCU read-side critical section? 80 * srcu_read_lock_held - might we be in SRCU read-side critical section?
81 * @sp: The srcu_struct structure to check
81 * 82 *
82 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 83 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
83 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 84 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 905d769d8ddc..5f7eeab990fe 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -42,7 +42,7 @@ enum {
42#define THREAD_ALIGN THREAD_SIZE 42#define THREAD_ALIGN THREAD_SIZE
43#endif 43#endif
44 44
45#ifdef CONFIG_DEBUG_STACK_USAGE 45#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
46# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ 46# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
47 __GFP_ZERO) 47 __GFP_ZERO)
48#else 48#else
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa95053dfc78..425752f768d2 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -96,7 +96,7 @@ struct inet_request_sock {
96 kmemcheck_bitfield_end(flags); 96 kmemcheck_bitfield_end(flags);
97 u32 ir_mark; 97 u32 ir_mark;
98 union { 98 union {
99 struct ip_options_rcu *opt; 99 struct ip_options_rcu __rcu *ireq_opt;
100#if IS_ENABLED(CONFIG_IPV6) 100#if IS_ENABLED(CONFIG_IPV6)
101 struct { 101 struct {
102 struct ipv6_txoptions *ipv6_opt; 102 struct ipv6_txoptions *ipv6_opt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1efe8365cb28..2c13484704cb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -840,6 +840,11 @@ struct tcp_skb_cb {
840 struct inet6_skb_parm h6; 840 struct inet6_skb_parm h6;
841#endif 841#endif
842 } header; /* For incoming skbs */ 842 } header; /* For incoming skbs */
843 struct {
844 __u32 key;
845 __u32 flags;
846 struct bpf_map *map;
847 } bpf;
843 }; 848 };
844}; 849};
845 850
diff --git a/include/sound/control.h b/include/sound/control.h
index bd7246de58e7..a1f1152bc687 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
248 void *private_data); 248 void *private_data);
249void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only); 249void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
250#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true) 250#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
251int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
252 int (*func)(struct snd_kcontrol *, void *),
253 void *arg);
251 254
252/* 255/*
253 * Helper functions for jack-detection controls 256 * Helper functions for jack-detection controls
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index a03acd0d398a..695257ae64ac 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
60 int port; /* created/attached port */ 60 int port; /* created/attached port */
61 unsigned int flags; /* SNDRV_VIRMIDI_* */ 61 unsigned int flags; /* SNDRV_VIRMIDI_* */
62 rwlock_t filelist_lock; 62 rwlock_t filelist_lock;
63 struct rw_semaphore filelist_sem;
63 struct list_head filelist; 64 struct list_head filelist;
64}; 65};
65 66
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 6d47b3249d8a..4e01ad7ffe98 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -52,21 +52,30 @@
52 * (non-running threads are de facto in such a 52 * (non-running threads are de facto in such a
53 * state). This only covers threads from the 53 * state). This only covers threads from the
54 * same processes as the caller thread. This 54 * same processes as the caller thread. This
55 * command returns 0. The "expedited" commands 55 * command returns 0 on success. The
56 * complete faster than the non-expedited ones, 56 * "expedited" commands complete faster than
57 * they never block, but have the downside of 57 * the non-expedited ones, they never block,
58 * causing extra overhead. 58 * but have the downside of causing extra
59 * overhead. A process needs to register its
60 * intent to use the private expedited command
61 * prior to using it, otherwise this command
62 * returns -EPERM.
63 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
64 * Register the process intent to use
65 * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
66 * returns 0.
59 * 67 *
60 * Command to be passed to the membarrier system call. The commands need to 68 * Command to be passed to the membarrier system call. The commands need to
61 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to 69 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
62 * the value 0. 70 * the value 0.
63 */ 71 */
64enum membarrier_cmd { 72enum membarrier_cmd {
65 MEMBARRIER_CMD_QUERY = 0, 73 MEMBARRIER_CMD_QUERY = 0,
66 MEMBARRIER_CMD_SHARED = (1 << 0), 74 MEMBARRIER_CMD_SHARED = (1 << 0),
67 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ 75 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
68 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ 76 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
69 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), 77 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
78 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
70}; 79};
71 80
72#endif /* _UAPI_LINUX_MEMBARRIER_H */ 81#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 988c04c91e10..7c25426d3cf5 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -102,7 +102,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
102 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); 102 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
103 103
104 if (array_size >= U32_MAX - PAGE_SIZE || 104 if (array_size >= U32_MAX - PAGE_SIZE ||
105 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { 105 bpf_array_alloc_percpu(array)) {
106 bpf_map_area_free(array); 106 bpf_map_area_free(array);
107 return ERR_PTR(-ENOMEM); 107 return ERR_PTR(-ENOMEM);
108 } 108 }
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index e5d3de7cff2e..ebdef54bf7df 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -72,7 +72,7 @@ static LIST_HEAD(dev_map_list);
72 72
73static u64 dev_map_bitmap_size(const union bpf_attr *attr) 73static u64 dev_map_bitmap_size(const union bpf_attr *attr)
74{ 74{
75 return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); 75 return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
76} 76}
77 77
78static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 78static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
@@ -81,6 +81,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
81 int err = -EINVAL; 81 int err = -EINVAL;
82 u64 cost; 82 u64 cost;
83 83
84 if (!capable(CAP_NET_ADMIN))
85 return ERR_PTR(-EPERM);
86
84 /* check sanity of attributes */ 87 /* check sanity of attributes */
85 if (attr->max_entries == 0 || attr->key_size != 4 || 88 if (attr->max_entries == 0 || attr->key_size != 4 ||
86 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) 89 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
@@ -114,8 +117,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
114 err = -ENOMEM; 117 err = -ENOMEM;
115 118
116 /* A per cpu bitfield with a bit per possible net device */ 119 /* A per cpu bitfield with a bit per possible net device */
117 dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), 120 dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
118 __alignof__(unsigned long)); 121 __alignof__(unsigned long),
122 GFP_KERNEL | __GFP_NOWARN);
119 if (!dtab->flush_needed) 123 if (!dtab->flush_needed)
120 goto free_dtab; 124 goto free_dtab;
121 125
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 919955236e63..e469e05c8e83 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -318,10 +318,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
318 */ 318 */
319 goto free_htab; 319 goto free_htab;
320 320
321 if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
322 /* make sure the size for pcpu_alloc() is reasonable */
323 goto free_htab;
324
325 htab->elem_size = sizeof(struct htab_elem) + 321 htab->elem_size = sizeof(struct htab_elem) +
326 round_up(htab->map.key_size, 8); 322 round_up(htab->map.key_size, 8);
327 if (percpu) 323 if (percpu)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 86ec846f2d5e..eef843c3b419 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -39,6 +39,7 @@
39#include <linux/workqueue.h> 39#include <linux/workqueue.h>
40#include <linux/list.h> 40#include <linux/list.h>
41#include <net/strparser.h> 41#include <net/strparser.h>
42#include <net/tcp.h>
42 43
43#define SOCK_CREATE_FLAG_MASK \ 44#define SOCK_CREATE_FLAG_MASK \
44 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 45 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@@ -104,9 +105,16 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
104 return SK_DROP; 105 return SK_DROP;
105 106
106 skb_orphan(skb); 107 skb_orphan(skb);
108 /* We need to ensure that BPF metadata for maps is also cleared
109 * when we orphan the skb so that we don't have the possibility
110 * to reference a stale map.
111 */
112 TCP_SKB_CB(skb)->bpf.map = NULL;
107 skb->sk = psock->sock; 113 skb->sk = psock->sock;
108 bpf_compute_data_pointers(skb); 114 bpf_compute_data_pointers(skb);
115 preempt_disable();
109 rc = (*prog->bpf_func)(skb, prog->insnsi); 116 rc = (*prog->bpf_func)(skb, prog->insnsi);
117 preempt_enable();
110 skb->sk = NULL; 118 skb->sk = NULL;
111 119
112 return rc; 120 return rc;
@@ -117,17 +125,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
117 struct sock *sk; 125 struct sock *sk;
118 int rc; 126 int rc;
119 127
120 /* Because we use per cpu values to feed input from sock redirect
121 * in BPF program to do_sk_redirect_map() call we need to ensure we
122 * are not preempted. RCU read lock is not sufficient in this case
123 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
124 */
125 preempt_disable();
126 rc = smap_verdict_func(psock, skb); 128 rc = smap_verdict_func(psock, skb);
127 switch (rc) { 129 switch (rc) {
128 case SK_REDIRECT: 130 case SK_REDIRECT:
129 sk = do_sk_redirect_map(); 131 sk = do_sk_redirect_map(skb);
130 preempt_enable();
131 if (likely(sk)) { 132 if (likely(sk)) {
132 struct smap_psock *peer = smap_psock_sk(sk); 133 struct smap_psock *peer = smap_psock_sk(sk);
133 134
@@ -144,8 +145,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
144 /* Fall through and free skb otherwise */ 145 /* Fall through and free skb otherwise */
145 case SK_DROP: 146 case SK_DROP:
146 default: 147 default:
147 if (rc != SK_REDIRECT)
148 preempt_enable();
149 kfree_skb(skb); 148 kfree_skb(skb);
150 } 149 }
151} 150}
@@ -490,6 +489,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
490 int err = -EINVAL; 489 int err = -EINVAL;
491 u64 cost; 490 u64 cost;
492 491
492 if (!capable(CAP_NET_ADMIN))
493 return ERR_PTR(-EPERM);
494
493 /* check sanity of attributes */ 495 /* check sanity of attributes */
494 if (attr->max_entries == 0 || attr->key_size != 4 || 496 if (attr->max_entries == 0 || attr->key_size != 4 ||
495 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 497 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
@@ -843,6 +845,12 @@ static int sock_map_update_elem(struct bpf_map *map,
843 return -EINVAL; 845 return -EINVAL;
844 } 846 }
845 847
848 if (skops.sk->sk_type != SOCK_STREAM ||
849 skops.sk->sk_protocol != IPPROTO_TCP) {
850 fput(socket->file);
851 return -EOPNOTSUPP;
852 }
853
846 err = sock_map_ctx_update_elem(&skops, map, key, flags); 854 err = sock_map_ctx_update_elem(&skops, map, key, flags);
847 fput(socket->file); 855 fput(socket->file);
848 return err; 856 return err;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 545b8c45a578..d906775e12c1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1006,7 +1006,13 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1006 /* ctx accesses must be at a fixed offset, so that we can 1006 /* ctx accesses must be at a fixed offset, so that we can
1007 * determine what type of data were returned. 1007 * determine what type of data were returned.
1008 */ 1008 */
1009 if (!tnum_is_const(reg->var_off)) { 1009 if (reg->off) {
1010 verbose(env,
1011 "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1012 regno, reg->off, off - reg->off);
1013 return -EACCES;
1014 }
1015 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1010 char tn_buf[48]; 1016 char tn_buf[48];
1011 1017
1012 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1018 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -1015,7 +1021,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1015 tn_buf, off, size); 1021 tn_buf, off, size);
1016 return -EACCES; 1022 return -EACCES;
1017 } 1023 }
1018 off += reg->var_off.value;
1019 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); 1024 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1020 if (!err && t == BPF_READ && value_regno >= 0) { 1025 if (!err && t == BPF_READ && value_regno >= 0) {
1021 /* ctx access returns either a scalar, or a 1026 /* ctx access returns either a scalar, or a
@@ -2341,12 +2346,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2341 2346
2342static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2347static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2343 struct bpf_reg_state *dst_reg, 2348 struct bpf_reg_state *dst_reg,
2344 enum bpf_reg_type type) 2349 enum bpf_reg_type type,
2350 bool range_right_open)
2345{ 2351{
2346 struct bpf_reg_state *regs = state->regs, *reg; 2352 struct bpf_reg_state *regs = state->regs, *reg;
2353 u16 new_range;
2347 int i; 2354 int i;
2348 2355
2349 if (dst_reg->off < 0) 2356 if (dst_reg->off < 0 ||
2357 (dst_reg->off == 0 && range_right_open))
2350 /* This doesn't give us any range */ 2358 /* This doesn't give us any range */
2351 return; 2359 return;
2352 2360
@@ -2357,9 +2365,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2357 */ 2365 */
2358 return; 2366 return;
2359 2367
2360 /* LLVM can generate four kind of checks: 2368 new_range = dst_reg->off;
2369 if (range_right_open)
2370 new_range--;
2371
2372 /* Examples for register markings:
2361 * 2373 *
2362 * Type 1/2: 2374 * pkt_data in dst register:
2363 * 2375 *
2364 * r2 = r3; 2376 * r2 = r3;
2365 * r2 += 8; 2377 * r2 += 8;
@@ -2376,7 +2388,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2376 * r2=pkt(id=n,off=8,r=0) 2388 * r2=pkt(id=n,off=8,r=0)
2377 * r3=pkt(id=n,off=0,r=0) 2389 * r3=pkt(id=n,off=0,r=0)
2378 * 2390 *
2379 * Type 3/4: 2391 * pkt_data in src register:
2380 * 2392 *
2381 * r2 = r3; 2393 * r2 = r3;
2382 * r2 += 8; 2394 * r2 += 8;
@@ -2394,7 +2406,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2394 * r3=pkt(id=n,off=0,r=0) 2406 * r3=pkt(id=n,off=0,r=0)
2395 * 2407 *
2396 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2408 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2397 * so that range of bytes [r3, r3 + 8) is safe to access. 2409 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2410 * and [r3, r3 + 8-1) respectively is safe to access depending on
2411 * the check.
2398 */ 2412 */
2399 2413
2400 /* If our ids match, then we must have the same max_value. And we 2414 /* If our ids match, then we must have the same max_value. And we
@@ -2405,14 +2419,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2405 for (i = 0; i < MAX_BPF_REG; i++) 2419 for (i = 0; i < MAX_BPF_REG; i++)
2406 if (regs[i].type == type && regs[i].id == dst_reg->id) 2420 if (regs[i].type == type && regs[i].id == dst_reg->id)
2407 /* keep the maximum range already checked */ 2421 /* keep the maximum range already checked */
2408 regs[i].range = max_t(u16, regs[i].range, dst_reg->off); 2422 regs[i].range = max(regs[i].range, new_range);
2409 2423
2410 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2424 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2411 if (state->stack_slot_type[i] != STACK_SPILL) 2425 if (state->stack_slot_type[i] != STACK_SPILL)
2412 continue; 2426 continue;
2413 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2427 reg = &state->spilled_regs[i / BPF_REG_SIZE];
2414 if (reg->type == type && reg->id == dst_reg->id) 2428 if (reg->type == type && reg->id == dst_reg->id)
2415 reg->range = max_t(u16, reg->range, dst_reg->off); 2429 reg->range = max_t(u16, reg->range, new_range);
2416 } 2430 }
2417} 2431}
2418 2432
@@ -2776,39 +2790,71 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
2776 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2790 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2777 dst_reg->type == PTR_TO_PACKET && 2791 dst_reg->type == PTR_TO_PACKET &&
2778 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2792 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2779 find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET); 2793 /* pkt_data' > pkt_end */
2794 find_good_pkt_pointers(this_branch, dst_reg,
2795 PTR_TO_PACKET, false);
2796 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2797 dst_reg->type == PTR_TO_PACKET_END &&
2798 regs[insn->src_reg].type == PTR_TO_PACKET) {
2799 /* pkt_end > pkt_data' */
2800 find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
2801 PTR_TO_PACKET, true);
2802 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2803 dst_reg->type == PTR_TO_PACKET &&
2804 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2805 /* pkt_data' < pkt_end */
2806 find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET,
2807 true);
2780 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && 2808 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2809 dst_reg->type == PTR_TO_PACKET_END &&
2810 regs[insn->src_reg].type == PTR_TO_PACKET) {
2811 /* pkt_end < pkt_data' */
2812 find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
2813 PTR_TO_PACKET, false);
2814 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2781 dst_reg->type == PTR_TO_PACKET && 2815 dst_reg->type == PTR_TO_PACKET &&
2782 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2816 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2783 find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET); 2817 /* pkt_data' >= pkt_end */
2818 find_good_pkt_pointers(this_branch, dst_reg,
2819 PTR_TO_PACKET, true);
2784 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2820 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2785 dst_reg->type == PTR_TO_PACKET_END && 2821 dst_reg->type == PTR_TO_PACKET_END &&
2786 regs[insn->src_reg].type == PTR_TO_PACKET) { 2822 regs[insn->src_reg].type == PTR_TO_PACKET) {
2823 /* pkt_end >= pkt_data' */
2787 find_good_pkt_pointers(other_branch, &regs[insn->src_reg], 2824 find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
2788 PTR_TO_PACKET); 2825 PTR_TO_PACKET, false);
2826 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2827 dst_reg->type == PTR_TO_PACKET &&
2828 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2829 /* pkt_data' <= pkt_end */
2830 find_good_pkt_pointers(other_branch, dst_reg,
2831 PTR_TO_PACKET, false);
2789 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && 2832 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2790 dst_reg->type == PTR_TO_PACKET_END && 2833 dst_reg->type == PTR_TO_PACKET_END &&
2791 regs[insn->src_reg].type == PTR_TO_PACKET) { 2834 regs[insn->src_reg].type == PTR_TO_PACKET) {
2835 /* pkt_end <= pkt_data' */
2792 find_good_pkt_pointers(this_branch, &regs[insn->src_reg], 2836 find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
2793 PTR_TO_PACKET); 2837 PTR_TO_PACKET, true);
2794 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2838 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2795 dst_reg->type == PTR_TO_PACKET_META && 2839 dst_reg->type == PTR_TO_PACKET_META &&
2796 reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) { 2840 reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) {
2797 find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET_META); 2841 find_good_pkt_pointers(this_branch, dst_reg,
2842 PTR_TO_PACKET_META, false);
2798 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && 2843 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2799 dst_reg->type == PTR_TO_PACKET_META && 2844 dst_reg->type == PTR_TO_PACKET_META &&
2800 reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) { 2845 reg_is_init_pkt_pointer(&regs[insn->src_reg], PTR_TO_PACKET)) {
2801 find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET_META); 2846 find_good_pkt_pointers(other_branch, dst_reg,
2847 PTR_TO_PACKET_META, false);
2802 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2848 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2803 reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 2849 reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2804 regs[insn->src_reg].type == PTR_TO_PACKET_META) { 2850 regs[insn->src_reg].type == PTR_TO_PACKET_META) {
2805 find_good_pkt_pointers(other_branch, &regs[insn->src_reg], 2851 find_good_pkt_pointers(other_branch, &regs[insn->src_reg],
2806 PTR_TO_PACKET_META); 2852 PTR_TO_PACKET_META, false);
2807 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && 2853 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2808 reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 2854 reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
2809 regs[insn->src_reg].type == PTR_TO_PACKET_META) { 2855 regs[insn->src_reg].type == PTR_TO_PACKET_META) {
2810 find_good_pkt_pointers(this_branch, &regs[insn->src_reg], 2856 find_good_pkt_pointers(this_branch, &regs[insn->src_reg],
2811 PTR_TO_PACKET_META); 2857 PTR_TO_PACKET_META, false);
2812 } else if (is_pointer_value(env, insn->dst_reg)) { 2858 } else if (is_pointer_value(env, insn->dst_reg)) {
2813 verbose(env, "R%d pointer comparison prohibited\n", 2859 verbose(env, "R%d pointer comparison prohibited\n",
2814 insn->dst_reg); 2860 insn->dst_reg);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 902149f05381..31ee304a5844 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
662 /* 662 /*
663 * Do not update time when cgroup is not active 663 * Do not update time when cgroup is not active
664 */ 664 */
665 if (cgrp == event->cgrp) 665 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
666 __update_cgrp_time(event->cgrp); 666 __update_cgrp_time(event->cgrp);
667} 667}
668 668
@@ -8966,6 +8966,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
8966 8966
8967static void free_pmu_context(struct pmu *pmu) 8967static void free_pmu_context(struct pmu *pmu)
8968{ 8968{
8969 /*
8970 * Static contexts such as perf_sw_context have a global lifetime
8971 * and may be shared between different PMUs. Avoid freeing them
8972 * when a single PMU is going away.
8973 */
8974 if (pmu->task_ctx_nr > perf_invalid_context)
8975 return;
8976
8969 mutex_lock(&pmus_lock); 8977 mutex_lock(&pmus_lock);
8970 free_percpu(pmu->pmu_cpu_context); 8978 free_percpu(pmu->pmu_cpu_context);
8971 mutex_unlock(&pmus_lock); 8979 mutex_unlock(&pmus_lock);
diff --git a/kernel/exit.c b/kernel/exit.c
index f2cd53e92147..f6cad39f35df 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1610 if (!infop) 1610 if (!infop)
1611 return err; 1611 return err;
1612 1612
1613 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1614 return -EFAULT;
1615
1613 user_access_begin(); 1616 user_access_begin();
1614 unsafe_put_user(signo, &infop->si_signo, Efault); 1617 unsafe_put_user(signo, &infop->si_signo, Efault);
1615 unsafe_put_user(0, &infop->si_errno, Efault); 1618 unsafe_put_user(0, &infop->si_errno, Efault);
@@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
1735 if (!infop) 1738 if (!infop)
1736 return err; 1739 return err;
1737 1740
1741 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1742 return -EFAULT;
1743
1738 user_access_begin(); 1744 user_access_begin();
1739 unsafe_put_user(signo, &infop->si_signo, Efault); 1745 unsafe_put_user(signo, &infop->si_signo, Efault);
1740 unsafe_put_user(0, &infop->si_errno, Efault); 1746 unsafe_put_user(0, &infop->si_errno, Efault);
diff --git a/kernel/fork.c b/kernel/fork.c
index e702cb9ffbd8..07cc743698d3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
215 if (!s) 215 if (!s)
216 continue; 216 continue;
217 217
218#ifdef CONFIG_DEBUG_KMEMLEAK
219 /* Clear stale pointers from reused stack. */
220 memset(s->addr, 0, THREAD_SIZE);
221#endif
218 tsk->stack_vm_area = s; 222 tsk->stack_vm_area = s;
219 return s->addr; 223 return s->addr;
220 } 224 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6fc89fd93824..5a2ef92c2782 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
265 irq_setup_affinity(desc); 265 irq_setup_affinity(desc);
266 break; 266 break;
267 case IRQ_STARTUP_MANAGED: 267 case IRQ_STARTUP_MANAGED:
268 irq_do_set_affinity(d, aff, false);
268 ret = __irq_startup(desc); 269 ret = __irq_startup(desc);
269 irq_set_affinity_locked(d, aff, false);
270 break; 270 break;
271 case IRQ_STARTUP_ABORT: 271 case IRQ_STARTUP_ABORT:
272 return 0; 272 return 0;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 638eb9c83d9f..9eb09aef0313 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -18,8 +18,34 @@
18static inline bool irq_needs_fixup(struct irq_data *d) 18static inline bool irq_needs_fixup(struct irq_data *d)
19{ 19{
20 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); 20 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
21 unsigned int cpu = smp_processor_id();
21 22
22 return cpumask_test_cpu(smp_processor_id(), m); 23#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
24 /*
25 * The cpumask_empty() check is a workaround for interrupt chips,
26 * which do not implement effective affinity, but the architecture has
27 * enabled the config switch. Use the general affinity mask instead.
28 */
29 if (cpumask_empty(m))
30 m = irq_data_get_affinity_mask(d);
31
32 /*
33 * Sanity check. If the mask is not empty when excluding the outgoing
34 * CPU then it must contain at least one online CPU. The outgoing CPU
35 * has been removed from the online mask already.
36 */
37 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
38 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
39 /*
40 * If this happens then there was a missed IRQ fixup at some
41 * point. Warn about it and enforce fixup.
42 */
43 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
44 cpumask_pr_args(m), d->irq, cpu);
45 return true;
46 }
47#endif
48 return cpumask_test_cpu(cpu, m);
23} 49}
24 50
25static bool migrate_one_irq(struct irq_desc *desc) 51static bool migrate_one_irq(struct irq_desc *desc)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d00132b5c325..4bff6a10ae8e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags); 168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169} 169}
170 170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
171int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
172 bool force) 185 bool force)
173{ 186{
@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
175 struct irq_chip *chip = irq_data_get_irq_chip(data); 188 struct irq_chip *chip = irq_data_get_irq_chip(data);
176 int ret; 189 int ret;
177 190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
178 ret = chip->irq_set_affinity(data, mask, force); 194 ret = chip->irq_set_affinity(data, mask, force);
179 switch (ret) { 195 switch (ret) {
180 case IRQ_SET_MASK_OK: 196 case IRQ_SET_MASK_OK:
181 case IRQ_SET_MASK_OK_DONE: 197 case IRQ_SET_MASK_OK_DONE:
182 cpumask_copy(desc->irq_common_data.affinity, mask); 198 cpumask_copy(desc->irq_common_data.affinity, mask);
183 case IRQ_SET_MASK_OK_NOCOPY: 199 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data);
184 irq_set_thread_affinity(desc); 201 irq_set_thread_affinity(desc);
185 ret = 0; 202 ret = 0;
186 } 203 }
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index b9628e43c78f..bf8c8fd72589 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch)
830} 830}
831EXPORT_SYMBOL_GPL(klp_register_patch); 831EXPORT_SYMBOL_GPL(klp_register_patch);
832 832
833/*
834 * Remove parts of patches that touch a given kernel module. The list of
835 * patches processed might be limited. When limit is NULL, all patches
836 * will be handled.
837 */
838static void klp_cleanup_module_patches_limited(struct module *mod,
839 struct klp_patch *limit)
840{
841 struct klp_patch *patch;
842 struct klp_object *obj;
843
844 list_for_each_entry(patch, &klp_patches, list) {
845 if (patch == limit)
846 break;
847
848 klp_for_each_object(patch, obj) {
849 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
850 continue;
851
852 /*
853 * Only unpatch the module if the patch is enabled or
854 * is in transition.
855 */
856 if (patch->enabled || patch == klp_transition_patch) {
857 pr_notice("reverting patch '%s' on unloading module '%s'\n",
858 patch->mod->name, obj->mod->name);
859 klp_unpatch_object(obj);
860 }
861
862 klp_free_object_loaded(obj);
863 break;
864 }
865 }
866}
867
833int klp_module_coming(struct module *mod) 868int klp_module_coming(struct module *mod)
834{ 869{
835 int ret; 870 int ret;
@@ -894,7 +929,7 @@ err:
894 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 929 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
895 patch->mod->name, obj->mod->name, obj->mod->name); 930 patch->mod->name, obj->mod->name, obj->mod->name);
896 mod->klp_alive = false; 931 mod->klp_alive = false;
897 klp_free_object_loaded(obj); 932 klp_cleanup_module_patches_limited(mod, patch);
898 mutex_unlock(&klp_mutex); 933 mutex_unlock(&klp_mutex);
899 934
900 return ret; 935 return ret;
@@ -902,9 +937,6 @@ err:
902 937
903void klp_module_going(struct module *mod) 938void klp_module_going(struct module *mod)
904{ 939{
905 struct klp_patch *patch;
906 struct klp_object *obj;
907
908 if (WARN_ON(mod->state != MODULE_STATE_GOING && 940 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
909 mod->state != MODULE_STATE_COMING)) 941 mod->state != MODULE_STATE_COMING))
910 return; 942 return;
@@ -917,25 +949,7 @@ void klp_module_going(struct module *mod)
917 */ 949 */
918 mod->klp_alive = false; 950 mod->klp_alive = false;
919 951
920 list_for_each_entry(patch, &klp_patches, list) { 952 klp_cleanup_module_patches_limited(mod, NULL);
921 klp_for_each_object(patch, obj) {
922 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
923 continue;
924
925 /*
926 * Only unpatch the module if the patch is enabled or
927 * is in transition.
928 */
929 if (patch->enabled || patch == klp_transition_patch) {
930 pr_notice("reverting patch '%s' on unloading module '%s'\n",
931 patch->mod->name, obj->mod->name);
932 klp_unpatch_object(obj);
933 }
934
935 klp_free_object_loaded(obj);
936 break;
937 }
938 }
939 953
940 mutex_unlock(&klp_mutex); 954 mutex_unlock(&klp_mutex);
941} 955}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 44c8d0d17170..e36e652d996f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1873 struct held_lock *next, int distance, struct stack_trace *trace, 1873 struct held_lock *next, int distance, struct stack_trace *trace,
1874 int (*save)(struct stack_trace *trace)) 1874 int (*save)(struct stack_trace *trace))
1875{ 1875{
1876 struct lock_list *uninitialized_var(target_entry);
1876 struct lock_list *entry; 1877 struct lock_list *entry;
1877 int ret;
1878 struct lock_list this; 1878 struct lock_list this;
1879 struct lock_list *uninitialized_var(target_entry); 1879 int ret;
1880 1880
1881 /* 1881 /*
1882 * Prove that the new <prev> -> <next> dependency would not 1882 * Prove that the new <prev> -> <next> dependency would not
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1890 this.class = hlock_class(next); 1890 this.class = hlock_class(next);
1891 this.parent = NULL; 1891 this.parent = NULL;
1892 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 1892 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1893 if (unlikely(!ret)) 1893 if (unlikely(!ret)) {
1894 if (!trace->entries) {
1895 /*
1896 * If @save fails here, the printing might trigger
1897 * a WARN but because of the !nr_entries it should
1898 * not do bad things.
1899 */
1900 save(trace);
1901 }
1894 return print_circular_bug(&this, target_entry, next, prev, trace); 1902 return print_circular_bug(&this, target_entry, next, prev, trace);
1903 }
1895 else if (unlikely(ret < 0)) 1904 else if (unlikely(ret < 0))
1896 return print_bfs_bug(ret); 1905 return print_bfs_bug(ret);
1897 1906
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1938 return print_bfs_bug(ret); 1947 return print_bfs_bug(ret);
1939 1948
1940 1949
1941 if (save && !save(trace)) 1950 if (!trace->entries && !save(trace))
1942 return 0; 1951 return 0;
1943 1952
1944 /* 1953 /*
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1958 if (!ret) 1967 if (!ret)
1959 return 0; 1968 return 0;
1960 1969
1961 /*
1962 * Debugging printouts:
1963 */
1964 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1965 graph_unlock();
1966 printk("\n new dependency: ");
1967 print_lock_name(hlock_class(prev));
1968 printk(KERN_CONT " => ");
1969 print_lock_name(hlock_class(next));
1970 printk(KERN_CONT "\n");
1971 dump_stack();
1972 if (!graph_lock())
1973 return 0;
1974 }
1975 return 2; 1970 return 2;
1976} 1971}
1977 1972
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1986{ 1981{
1987 int depth = curr->lockdep_depth; 1982 int depth = curr->lockdep_depth;
1988 struct held_lock *hlock; 1983 struct held_lock *hlock;
1989 struct stack_trace trace; 1984 struct stack_trace trace = {
1990 int (*save)(struct stack_trace *trace) = save_trace; 1985 .nr_entries = 0,
1986 .max_entries = 0,
1987 .entries = NULL,
1988 .skip = 0,
1989 };
1991 1990
1992 /* 1991 /*
1993 * Debugging checks. 1992 * Debugging checks.
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2018 */ 2017 */
2019 if (hlock->read != 2 && hlock->check) { 2018 if (hlock->read != 2 && hlock->check) {
2020 int ret = check_prev_add(curr, hlock, next, 2019 int ret = check_prev_add(curr, hlock, next,
2021 distance, &trace, save); 2020 distance, &trace, save_trace);
2022 if (!ret) 2021 if (!ret)
2023 return 0; 2022 return 0;
2024 2023
2025 /* 2024 /*
2026 * Stop saving stack_trace if save_trace() was
2027 * called at least once:
2028 */
2029 if (save && ret == 2)
2030 save = NULL;
2031
2032 /*
2033 * Stop after the first non-trylock entry, 2025 * Stop after the first non-trylock entry,
2034 * as non-trylock entries have added their 2026 * as non-trylock entries have added their
2035 * own direct dependencies already, so this 2027 * own direct dependencies already, so this
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 729a8706751d..6d5880089ff6 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
854/** 854/**
855 * call_srcu() - Queue a callback for invocation after an SRCU grace period 855 * call_srcu() - Queue a callback for invocation after an SRCU grace period
856 * @sp: srcu_struct in queue the callback 856 * @sp: srcu_struct in queue the callback
857 * @head: structure to be used for queueing the SRCU callback. 857 * @rhp: structure to be used for queueing the SRCU callback.
858 * @func: function to be invoked after the SRCU grace period 858 * @func: function to be invoked after the SRCU grace period
859 * 859 *
860 * The callback function will be invoked some time after a full SRCU 860 * The callback function will be invoked some time after a full SRCU
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 50d1861f7759..3f943efcf61c 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
85} 85}
86 86
87/** 87/**
88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
89 * @rsp: Pointer to rcu_sync structure to use for synchronization
90 *
88 * Must be called after rcu_sync_init() and before first use. 91 * Must be called after rcu_sync_init() and before first use.
89 * 92 *
90 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
142 145
143/** 146/**
144 * rcu_sync_func() - Callback function managing reader access to fastpath 147 * rcu_sync_func() - Callback function managing reader access to fastpath
145 * @rsp: Pointer to rcu_sync structure to use for synchronization 148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
146 * 149 *
147 * This function is passed to one of the call_rcu() functions by 150 * This function is passed to one of the call_rcu() functions by
148 * rcu_sync_exit(), so that it is invoked after a grace period following the 151 * rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
158 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 161 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
159 * can again use their fastpaths. 162 * can again use their fastpaths.
160 */ 163 */
161static void rcu_sync_func(struct rcu_head *rcu) 164static void rcu_sync_func(struct rcu_head *rhp)
162{ 165{
163 struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head); 166 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
164 unsigned long flags; 167 unsigned long flags;
165 168
166 BUG_ON(rsp->gp_state != GP_PASSED); 169 BUG_ON(rsp->gp_state != GP_PASSED);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b0ad62b0e7b8..3e3650e94ae6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3097,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
3097 * read-side critical sections have completed. call_rcu_sched() assumes 3097 * read-side critical sections have completed. call_rcu_sched() assumes
3098 * that the read-side critical sections end on enabling of preemption 3098 * that the read-side critical sections end on enabling of preemption
3099 * or on voluntary preemption. 3099 * or on voluntary preemption.
3100 * RCU read-side critical sections are delimited by : 3100 * RCU read-side critical sections are delimited by:
3101 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR 3101 *
3102 * - anything that disables preemption. 3102 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
3103 * - anything that disables preemption.
3103 * 3104 *
3104 * These may be nested. 3105 * These may be nested.
3105 * 3106 *
@@ -3124,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
3124 * handler. This means that read-side critical sections in process 3125 * handler. This means that read-side critical sections in process
3125 * context must not be interrupted by softirqs. This interface is to be 3126 * context must not be interrupted by softirqs. This interface is to be
3126 * used when most of the read-side critical sections are in softirq context. 3127 * used when most of the read-side critical sections are in softirq context.
3127 * RCU read-side critical sections are delimited by : 3128 * RCU read-side critical sections are delimited by:
3128 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 3129 *
3129 * OR 3130 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
3130 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 3131 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3131 * These may be nested. 3132 *
3133 * These may be nested.
3132 * 3134 *
3133 * See the description of call_rcu() for more detailed information on 3135 * See the description of call_rcu() for more detailed information on
3134 * memory ordering guarantees. 3136 * memory ordering guarantees.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 70ba32e08a23..d3f3094856fe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p)
5356 return 1; 5356 return 1;
5357} 5357}
5358 5358
5359struct llc_stats { 5359/*
5360 unsigned long nr_running; 5360 * The purpose of wake_affine() is to quickly determine on which CPU we can run
5361 unsigned long load; 5361 * soonest. For the purpose of speed we only consider the waking and previous
5362 unsigned long capacity; 5362 * CPU.
5363 int has_capacity; 5363 *
5364}; 5364 * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
5365 * will be) idle.
5366 *
5367 * wake_affine_weight() - considers the weight to reflect the average
5368 * scheduling latency of the CPUs. This seems to work
5369 * for the overloaded case.
5370 */
5365 5371
5366static bool get_llc_stats(struct llc_stats *stats, int cpu) 5372static bool
5373wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
5374 int this_cpu, int prev_cpu, int sync)
5367{ 5375{
5368 struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5376 if (idle_cpu(this_cpu))
5369 5377 return true;
5370 if (!sds)
5371 return false;
5372 5378
5373 stats->nr_running = READ_ONCE(sds->nr_running); 5379 if (sync && cpu_rq(this_cpu)->nr_running == 1)
5374 stats->load = READ_ONCE(sds->load); 5380 return true;
5375 stats->capacity = READ_ONCE(sds->capacity);
5376 stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu);
5377 5381
5378 return true; 5382 return false;
5379} 5383}
5380 5384
5381/*
5382 * Can a task be moved from prev_cpu to this_cpu without causing a load
5383 * imbalance that would trigger the load balancer?
5384 *
5385 * Since we're running on 'stale' values, we might in fact create an imbalance
5386 * but recomputing these values is expensive, as that'd mean iteration 2 cache
5387 * domains worth of CPUs.
5388 */
5389static bool 5385static bool
5390wake_affine_llc(struct sched_domain *sd, struct task_struct *p, 5386wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5391 int this_cpu, int prev_cpu, int sync) 5387 int this_cpu, int prev_cpu, int sync)
5392{ 5388{
5393 struct llc_stats prev_stats, this_stats;
5394 s64 this_eff_load, prev_eff_load; 5389 s64 this_eff_load, prev_eff_load;
5395 unsigned long task_load; 5390 unsigned long task_load;
5396 5391
5397 if (!get_llc_stats(&prev_stats, prev_cpu) || 5392 this_eff_load = target_load(this_cpu, sd->wake_idx);
5398 !get_llc_stats(&this_stats, this_cpu)) 5393 prev_eff_load = source_load(prev_cpu, sd->wake_idx);
5399 return false;
5400 5394
5401 /*
5402 * If sync wakeup then subtract the (maximum possible)
5403 * effect of the currently running task from the load
5404 * of the current LLC.
5405 */
5406 if (sync) { 5395 if (sync) {
5407 unsigned long current_load = task_h_load(current); 5396 unsigned long current_load = task_h_load(current);
5408 5397
5409 /* in this case load hits 0 and this LLC is considered 'idle' */ 5398 if (current_load > this_eff_load)
5410 if (current_load > this_stats.load)
5411 return true; 5399 return true;
5412 5400
5413 this_stats.load -= current_load; 5401 this_eff_load -= current_load;
5414 } 5402 }
5415 5403
5416 /*
5417 * The has_capacity stuff is not SMT aware, but by trying to balance
5418 * the nr_running on both ends we try and fill the domain at equal
5419 * rates, thereby first consuming cores before siblings.
5420 */
5421
5422 /* if the old cache has capacity, stay there */
5423 if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1)
5424 return false;
5425
5426 /* if this cache has capacity, come here */
5427 if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
5428 return true;
5429
5430 /*
5431 * Check to see if we can move the load without causing too much
5432 * imbalance.
5433 */
5434 task_load = task_h_load(p); 5404 task_load = task_h_load(p);
5435 5405
5436 this_eff_load = 100; 5406 this_eff_load += task_load;
5437 this_eff_load *= prev_stats.capacity; 5407 if (sched_feat(WA_BIAS))
5438 5408 this_eff_load *= 100;
5439 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; 5409 this_eff_load *= capacity_of(prev_cpu);
5440 prev_eff_load *= this_stats.capacity;
5441 5410
5442 this_eff_load *= this_stats.load + task_load; 5411 prev_eff_load -= task_load;
5443 prev_eff_load *= prev_stats.load - task_load; 5412 if (sched_feat(WA_BIAS))
5413 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
5414 prev_eff_load *= capacity_of(this_cpu);
5444 5415
5445 return this_eff_load <= prev_eff_load; 5416 return this_eff_load <= prev_eff_load;
5446} 5417}
@@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5449 int prev_cpu, int sync) 5420 int prev_cpu, int sync)
5450{ 5421{
5451 int this_cpu = smp_processor_id(); 5422 int this_cpu = smp_processor_id();
5452 bool affine; 5423 bool affine = false;
5453 5424
5454 /* 5425 if (sched_feat(WA_IDLE) && !affine)
5455 * Default to no affine wakeups; wake_affine() should not effect a task 5426 affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
5456 * placement the load-balancer feels inclined to undo. The conservative
5457 * option is therefore to not move tasks when they wake up.
5458 */
5459 affine = false;
5460 5427
5461 /* 5428 if (sched_feat(WA_WEIGHT) && !affine)
5462 * If the wakeup is across cache domains, try to evaluate if movement 5429 affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
5463 * makes sense, otherwise rely on select_idle_siblings() to do
5464 * placement inside the cache domain.
5465 */
5466 if (!cpus_share_cache(prev_cpu, this_cpu))
5467 affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync);
5468 5430
5469 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); 5431 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5470 if (affine) { 5432 if (affine) {
@@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7600 */ 7562 */
7601static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 7563static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7602{ 7564{
7603 struct sched_domain_shared *shared = env->sd->shared;
7604 struct sched_domain *child = env->sd->child; 7565 struct sched_domain *child = env->sd->child;
7605 struct sched_group *sg = env->sd->groups; 7566 struct sched_group *sg = env->sd->groups;
7606 struct sg_lb_stats *local = &sds->local_stat; 7567 struct sg_lb_stats *local = &sds->local_stat;
@@ -7672,22 +7633,6 @@ next_group:
7672 if (env->dst_rq->rd->overload != overload) 7633 if (env->dst_rq->rd->overload != overload)
7673 env->dst_rq->rd->overload = overload; 7634 env->dst_rq->rd->overload = overload;
7674 } 7635 }
7675
7676 if (!shared)
7677 return;
7678
7679 /*
7680 * Since these are sums over groups they can contain some CPUs
7681 * multiple times for the NUMA domains.
7682 *
7683 * Currently only wake_affine_llc() and find_busiest_group()
7684 * uses these numbers, only the last is affected by this problem.
7685 *
7686 * XXX fix that.
7687 */
7688 WRITE_ONCE(shared->nr_running, sds->total_running);
7689 WRITE_ONCE(shared->load, sds->total_load);
7690 WRITE_ONCE(shared->capacity, sds->total_capacity);
7691} 7636}
7692 7637
7693/** 7638/**
@@ -8098,6 +8043,13 @@ static int should_we_balance(struct lb_env *env)
8098 int cpu, balance_cpu = -1; 8043 int cpu, balance_cpu = -1;
8099 8044
8100 /* 8045 /*
8046 * Ensure the balancing environment is consistent; can happen
8047 * when the softirq triggers 'during' hotplug.
8048 */
8049 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
8050 return 0;
8051
8052 /*
8101 * In the newly idle case, we will allow all the cpu's 8053 * In the newly idle case, we will allow all the cpu's
8102 * to do the newly idle load balance. 8054 * to do the newly idle load balance.
8103 */ 8055 */
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index d3fb15555291..319ed0e8a347 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
81SCHED_FEAT(LB_MIN, false) 81SCHED_FEAT(LB_MIN, false)
82SCHED_FEAT(ATTACH_AGE_LOAD, true) 82SCHED_FEAT(ATTACH_AGE_LOAD, true)
83 83
84SCHED_FEAT(WA_IDLE, true)
85SCHED_FEAT(WA_WEIGHT, true)
86SCHED_FEAT(WA_BIAS, true)
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index a92fddc22747..dd7908743dab 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -18,6 +18,7 @@
18#include <linux/membarrier.h> 18#include <linux/membarrier.h>
19#include <linux/tick.h> 19#include <linux/tick.h>
20#include <linux/cpumask.h> 20#include <linux/cpumask.h>
21#include <linux/atomic.h>
21 22
22#include "sched.h" /* for cpu_rq(). */ 23#include "sched.h" /* for cpu_rq(). */
23 24
@@ -26,21 +27,26 @@
26 * except MEMBARRIER_CMD_QUERY. 27 * except MEMBARRIER_CMD_QUERY.
27 */ 28 */
28#define MEMBARRIER_CMD_BITMASK \ 29#define MEMBARRIER_CMD_BITMASK \
29 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED) 30 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
31 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
30 32
31static void ipi_mb(void *info) 33static void ipi_mb(void *info)
32{ 34{
33 smp_mb(); /* IPIs should be serializing but paranoid. */ 35 smp_mb(); /* IPIs should be serializing but paranoid. */
34} 36}
35 37
36static void membarrier_private_expedited(void) 38static int membarrier_private_expedited(void)
37{ 39{
38 int cpu; 40 int cpu;
39 bool fallback = false; 41 bool fallback = false;
40 cpumask_var_t tmpmask; 42 cpumask_var_t tmpmask;
41 43
44 if (!(atomic_read(&current->mm->membarrier_state)
45 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
46 return -EPERM;
47
42 if (num_online_cpus() == 1) 48 if (num_online_cpus() == 1)
43 return; 49 return 0;
44 50
45 /* 51 /*
46 * Matches memory barriers around rq->curr modification in 52 * Matches memory barriers around rq->curr modification in
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void)
94 * rq->curr modification in scheduler. 100 * rq->curr modification in scheduler.
95 */ 101 */
96 smp_mb(); /* exit from system call is not a mb */ 102 smp_mb(); /* exit from system call is not a mb */
103 return 0;
104}
105
106static void membarrier_register_private_expedited(void)
107{
108 struct task_struct *p = current;
109 struct mm_struct *mm = p->mm;
110
111 /*
112 * We need to consider threads belonging to different thread
113 * groups, which use the same mm. (CLONE_VM but not
114 * CLONE_THREAD).
115 */
116 if (atomic_read(&mm->membarrier_state)
117 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
118 return;
119 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
120 &mm->membarrier_state);
97} 121}
98 122
99/** 123/**
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
144 synchronize_sched(); 168 synchronize_sched();
145 return 0; 169 return 0;
146 case MEMBARRIER_CMD_PRIVATE_EXPEDITED: 170 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
147 membarrier_private_expedited(); 171 return membarrier_private_expedited();
172 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
173 membarrier_register_private_expedited();
148 return 0; 174 return 0;
149 default: 175 default:
150 return -EINVAL; 176 return -EINVAL;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index bb3a38005b9c..0ae832e13b97 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags,
473 return 0; 473 return 0;
474} 474}
475 475
476void __get_seccomp_filter(struct seccomp_filter *filter) 476static void __get_seccomp_filter(struct seccomp_filter *filter)
477{ 477{
478 /* Reference count is bounded by the number of total processes. */ 478 /* Reference count is bounded by the number of total processes. */
479 refcount_inc(&filter->usage); 479 refcount_inc(&filter->usage);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2689b7c50c52..dfdad67d8f6c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING
1092 select DEBUG_MUTEXES 1092 select DEBUG_MUTEXES
1093 select DEBUG_RT_MUTEXES if RT_MUTEXES 1093 select DEBUG_RT_MUTEXES if RT_MUTEXES
1094 select DEBUG_LOCK_ALLOC 1094 select DEBUG_LOCK_ALLOC
1095 select LOCKDEP_CROSSRELEASE 1095 select LOCKDEP_CROSSRELEASE if BROKEN
1096 select LOCKDEP_COMPLETIONS 1096 select LOCKDEP_COMPLETIONS if BROKEN
1097 select TRACE_IRQFLAGS 1097 select TRACE_IRQFLAGS
1098 default n 1098 default n
1099 help 1099 help
@@ -1590,6 +1590,54 @@ config LATENCYTOP
1590 1590
1591source kernel/trace/Kconfig 1591source kernel/trace/Kconfig
1592 1592
1593config PROVIDE_OHCI1394_DMA_INIT
1594 bool "Remote debugging over FireWire early on boot"
1595 depends on PCI && X86
1596 help
1597 If you want to debug problems which hang or crash the kernel early
1598 on boot and the crashing machine has a FireWire port, you can use
1599 this feature to remotely access the memory of the crashed machine
1600 over FireWire. This employs remote DMA as part of the OHCI1394
1601 specification which is now the standard for FireWire controllers.
1602
1603 With remote DMA, you can monitor the printk buffer remotely using
1604 firescope and access all memory below 4GB using fireproxy from gdb.
1605 Even controlling a kernel debugger is possible using remote DMA.
1606
1607 Usage:
1608
1609 If ohci1394_dma=early is used as boot parameter, it will initialize
1610 all OHCI1394 controllers which are found in the PCI config space.
1611
1612 As all changes to the FireWire bus such as enabling and disabling
1613 devices cause a bus reset and thereby disable remote DMA for all
1614 devices, be sure to have the cable plugged and FireWire enabled on
1615 the debugging host before booting the debug target for debugging.
1616
1617 This code (~1k) is freed after boot. By then, the firewire stack
1618 in charge of the OHCI-1394 controllers should be used instead.
1619
1620 See Documentation/debugging-via-ohci1394.txt for more information.
1621
1622config DMA_API_DEBUG
1623 bool "Enable debugging of DMA-API usage"
1624 depends on HAVE_DMA_API_DEBUG
1625 help
1626 Enable this option to debug the use of the DMA API by device drivers.
1627 With this option you will be able to detect common bugs in device
1628 drivers like double-freeing of DMA mappings or freeing mappings that
1629 were never allocated.
1630
1631 This also attempts to catch cases where a page owned by DMA is
1632 accessed by the cpu in a way that could cause data corruption. For
1633 example, this enables cow_user_page() to check that the source page is
1634 not undergoing DMA.
1635
1636 This option causes a performance degradation. Use only if you want to
1637 debug device drivers and dma interactions.
1638
1639 If unsure, say N.
1640
1593menu "Runtime Testing" 1641menu "Runtime Testing"
1594 1642
1595config LKDTM 1643config LKDTM
@@ -1749,56 +1797,6 @@ config TEST_PARMAN
1749 1797
1750 If unsure, say N. 1798 If unsure, say N.
1751 1799
1752endmenu # runtime tests
1753
1754config PROVIDE_OHCI1394_DMA_INIT
1755 bool "Remote debugging over FireWire early on boot"
1756 depends on PCI && X86
1757 help
1758 If you want to debug problems which hang or crash the kernel early
1759 on boot and the crashing machine has a FireWire port, you can use
1760 this feature to remotely access the memory of the crashed machine
1761 over FireWire. This employs remote DMA as part of the OHCI1394
1762 specification which is now the standard for FireWire controllers.
1763
1764 With remote DMA, you can monitor the printk buffer remotely using
1765 firescope and access all memory below 4GB using fireproxy from gdb.
1766 Even controlling a kernel debugger is possible using remote DMA.
1767
1768 Usage:
1769
1770 If ohci1394_dma=early is used as boot parameter, it will initialize
1771 all OHCI1394 controllers which are found in the PCI config space.
1772
1773 As all changes to the FireWire bus such as enabling and disabling
1774 devices cause a bus reset and thereby disable remote DMA for all
1775 devices, be sure to have the cable plugged and FireWire enabled on
1776 the debugging host before booting the debug target for debugging.
1777
1778 This code (~1k) is freed after boot. By then, the firewire stack
1779 in charge of the OHCI-1394 controllers should be used instead.
1780
1781 See Documentation/debugging-via-ohci1394.txt for more information.
1782
1783config DMA_API_DEBUG
1784 bool "Enable debugging of DMA-API usage"
1785 depends on HAVE_DMA_API_DEBUG
1786 help
1787 Enable this option to debug the use of the DMA API by device drivers.
1788 With this option you will be able to detect common bugs in device
1789 drivers like double-freeing of DMA mappings or freeing mappings that
1790 were never allocated.
1791
1792 This also attempts to catch cases where a page owned by DMA is
1793 accessed by the cpu in a way that could cause data corruption. For
1794 example, this enables cow_user_page() to check that the source page is
1795 not undergoing DMA.
1796
1797 This option causes a performance degradation. Use only if you want to
1798 debug device drivers and dma interactions.
1799
1800 If unsure, say N.
1801
1802config TEST_LKM 1800config TEST_LKM
1803 tristate "Test module loading with 'hello world' module" 1801 tristate "Test module loading with 'hello world' module"
1804 default n 1802 default n
@@ -1873,18 +1871,6 @@ config TEST_UDELAY
1873 1871
1874 If unsure, say N. 1872 If unsure, say N.
1875 1873
1876config MEMTEST
1877 bool "Memtest"
1878 depends on HAVE_MEMBLOCK
1879 ---help---
1880 This option adds a kernel parameter 'memtest', which allows memtest
1881 to be set.
1882 memtest=0, mean disabled; -- default
1883 memtest=1, mean do 1 test pattern;
1884 ...
1885 memtest=17, mean do 17 test patterns.
1886 If you are unsure how to answer this question, answer N.
1887
1888config TEST_STATIC_KEYS 1874config TEST_STATIC_KEYS
1889 tristate "Test static keys" 1875 tristate "Test static keys"
1890 default n 1876 default n
@@ -1894,16 +1880,6 @@ config TEST_STATIC_KEYS
1894 1880
1895 If unsure, say N. 1881 If unsure, say N.
1896 1882
1897config BUG_ON_DATA_CORRUPTION
1898 bool "Trigger a BUG when data corruption is detected"
1899 select DEBUG_LIST
1900 help
1901 Select this option if the kernel should BUG when it encounters
1902 data corruption in kernel memory structures when they get checked
1903 for validity.
1904
1905 If unsure, say N.
1906
1907config TEST_KMOD 1883config TEST_KMOD
1908 tristate "kmod stress tester" 1884 tristate "kmod stress tester"
1909 default n 1885 default n
@@ -1941,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL
1941 1917
1942 If unsure, say N. 1918 If unsure, say N.
1943 1919
1920endmenu # runtime tests
1921
1922config MEMTEST
1923 bool "Memtest"
1924 depends on HAVE_MEMBLOCK
1925 ---help---
1926 This option adds a kernel parameter 'memtest', which allows memtest
1927 to be set.
1928 memtest=0, mean disabled; -- default
1929 memtest=1, mean do 1 test pattern;
1930 ...
1931 memtest=17, mean do 17 test patterns.
1932 If you are unsure how to answer this question, answer N.
1933
1934config BUG_ON_DATA_CORRUPTION
1935 bool "Trigger a BUG when data corruption is detected"
1936 select DEBUG_LIST
1937 help
1938 Select this option if the kernel should BUG when it encounters
1939 data corruption in kernel memory structures when they get checked
1940 for validity.
1941
1942 If unsure, say N.
1944 1943
1945source "samples/Kconfig" 1944source "samples/Kconfig"
1946 1945
diff --git a/lib/digsig.c b/lib/digsig.c
index 03d7c63837ae..6ba6fcd92dd1 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
87 down_read(&key->sem); 87 down_read(&key->sem);
88 ukp = user_key_payload_locked(key); 88 ukp = user_key_payload_locked(key);
89 89
90 if (!ukp) {
91 /* key was revoked before we acquired its semaphore */
92 err = -EKEYREVOKED;
93 goto err1;
94 }
95
90 if (ukp->datalen < sizeof(*pkh)) 96 if (ukp->datalen < sizeof(*pkh))
91 goto err1; 97 goto err1;
92 98
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index cd0b5c964bd0..2b827b8a1d8c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2031,11 +2031,13 @@ void locking_selftest(void)
2031 print_testname("mixed read-lock/lock-write ABBA"); 2031 print_testname("mixed read-lock/lock-write ABBA");
2032 pr_cont(" |"); 2032 pr_cont(" |");
2033 dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); 2033 dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
2034#ifdef CONFIG_PROVE_LOCKING
2034 /* 2035 /*
2035 * Lockdep does indeed fail here, but there's nothing we can do about 2036 * Lockdep does indeed fail here, but there's nothing we can do about
2036 * that now. Don't kill lockdep for it. 2037 * that now. Don't kill lockdep for it.
2037 */ 2038 */
2038 unexpected_testcase_failures--; 2039 unexpected_testcase_failures--;
2040#endif
2039 2041
2040 pr_cont(" |"); 2042 pr_cont(" |");
2041 dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); 2043 dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 5696a35184e4..69557c74ef9f 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -11,7 +11,7 @@
11 * ========================================================================== 11 * ==========================================================================
12 * 12 *
13 * A finite state machine consists of n states (struct ts_fsm_token) 13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read 14 * representing the pattern as a finite automaton. The data is read
15 * sequentially on an octet basis. Every state token specifies the number 15 * sequentially on an octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a 16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available 17 * specific character or ctype based set of characters. The available
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 632f783e65f1..ffbe66cbb0ed 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -27,7 +27,7 @@
27 * 27 *
28 * [1] Cormen, Leiserson, Rivest, Stein 28 * [1] Cormen, Leiserson, Rivest, Stein
29 * Introdcution to Algorithms, 2nd Edition, MIT Press 29 * Introdcution to Algorithms, 2nd Edition, MIT Press
30 * [2] See finite automation theory 30 * [2] See finite automaton theory
31 */ 31 */
32 32
33#include <linux/module.h> 33#include <linux/module.h>
diff --git a/mm/cma.c b/mm/cma.c
index c0da318c020e..022e52bd8370 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
460 460
461 trace_cma_alloc(pfn, page, count, align); 461 trace_cma_alloc(pfn, page, count, align);
462 462
463 if (ret) { 463 if (ret && !(gfp_mask & __GFP_NOWARN)) {
464 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", 464 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
465 __func__, count, ret); 465 __func__, count, ret);
466 cma_debug_show_areas(cma); 466 cma_debug_show_areas(cma);
diff --git a/mm/madvise.c b/mm/madvise.c
index 25bade36e9ca..fd70d6aabc3e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -757,6 +757,9 @@ madvise_behavior_valid(int behavior)
757 * MADV_DONTFORK - omit this area from child's address space when forking: 757 * MADV_DONTFORK - omit this area from child's address space when forking:
758 * typically, to avoid COWing pages pinned by get_user_pages(). 758 * typically, to avoid COWing pages pinned by get_user_pages().
759 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 759 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
760 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
761 * range after a fork.
762 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
760 * MADV_HWPOISON - trigger memory error handler as if the given memory range 763 * MADV_HWPOISON - trigger memory error handler as if the given memory range
761 * were corrupted by unrecoverable hardware memory failure. 764 * were corrupted by unrecoverable hardware memory failure.
762 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 765 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
@@ -777,7 +780,9 @@ madvise_behavior_valid(int behavior)
777 * zero - success 780 * zero - success
778 * -EINVAL - start + len < 0, start is not page-aligned, 781 * -EINVAL - start + len < 0, start is not page-aligned,
779 * "behavior" is not a valid value, or application 782 * "behavior" is not a valid value, or application
780 * is attempting to release locked or shared pages. 783 * is attempting to release locked or shared pages,
784 * or the specified address range includes file, Huge TLB,
785 * MAP_SHARED or VMPFNMAP range.
781 * -ENOMEM - addresses in the specified range are not currently 786 * -ENOMEM - addresses in the specified range are not currently
782 * mapped, or are outside the AS of the process. 787 * mapped, or are outside the AS of the process.
783 * -EIO - an I/O error occurred while paging in data. 788 * -EIO - an I/O error occurred while paging in data.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5f3a62887cf..661f046ad318 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
5828 if (!mem_cgroup_sockets_enabled) 5828 if (!mem_cgroup_sockets_enabled)
5829 return; 5829 return;
5830 5830
5831 /*
5832 * Socket cloning can throw us here with sk_memcg already
5833 * filled. It won't however, necessarily happen from
5834 * process context. So the test for root memcg given
5835 * the current task's memcg won't help us in this case.
5836 *
5837 * Respecting the original socket's memcg is a better
5838 * decision in this case.
5839 */
5840 if (sk->sk_memcg) {
5841 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5842 css_get(&sk->sk_memcg->css);
5843 return;
5844 }
5845
5846 rcu_read_lock(); 5831 rcu_read_lock();
5847 memcg = mem_cgroup_from_task(current); 5832 memcg = mem_cgroup_from_task(current);
5848 if (memcg == root_mem_cgroup) 5833 if (memcg == root_mem_cgroup)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 006ba625c0b8..a2af6d58a68f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1920 struct page *page; 1920 struct page *page;
1921 1921
1922 page = __alloc_pages(gfp, order, nid); 1922 page = __alloc_pages(gfp, order, nid);
1923 if (page && page_to_nid(page) == nid) 1923 if (page && page_to_nid(page) == nid) {
1924 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 1924 preempt_disable();
1925 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1926 preempt_enable();
1927 }
1925 return page; 1928 return page;
1926} 1929}
1927 1930
diff --git a/mm/migrate.c b/mm/migrate.c
index 6954c1435833..e00814ca390e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start,
2146 unsigned long addr; 2146 unsigned long addr;
2147 2147
2148 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 2148 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2149 migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE; 2149 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2150 migrate->dst[migrate->npages] = 0; 2150 migrate->dst[migrate->npages] = 0;
2151 migrate->npages++;
2151 migrate->cpages++; 2152 migrate->cpages++;
2152 } 2153 }
2153 2154
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 6a03946469a9..53afbb919a1c 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -6,17 +6,6 @@
6 6
7#include "internal.h" 7#include "internal.h"
8 8
9static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
10{
11 pmd_t pmde;
12 /*
13 * Make sure we don't re-load pmd between present and !trans_huge check.
14 * We need a consistent view.
15 */
16 pmde = READ_ONCE(*pvmw->pmd);
17 return pmd_present(pmde) && !pmd_trans_huge(pmde);
18}
19
20static inline bool not_found(struct page_vma_mapped_walk *pvmw) 9static inline bool not_found(struct page_vma_mapped_walk *pvmw)
21{ 10{
22 page_vma_mapped_walk_done(pvmw); 11 page_vma_mapped_walk_done(pvmw);
@@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
116 pgd_t *pgd; 105 pgd_t *pgd;
117 p4d_t *p4d; 106 p4d_t *p4d;
118 pud_t *pud; 107 pud_t *pud;
108 pmd_t pmde;
119 109
120 /* The only possible pmd mapping has been handled on last iteration */ 110 /* The only possible pmd mapping has been handled on last iteration */
121 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
@@ -148,7 +138,13 @@ restart:
148 if (!pud_present(*pud)) 138 if (!pud_present(*pud))
149 return false; 139 return false;
150 pvmw->pmd = pmd_offset(pud, pvmw->address); 140 pvmw->pmd = pmd_offset(pud, pvmw->address);
151 if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { 141 /*
142 * Make sure the pmd value isn't cached in a register by the
143 * compiler and used as a stale value after we've observed a
144 * subsequent update.
145 */
146 pmde = READ_ONCE(*pvmw->pmd);
147 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
152 pvmw->ptl = pmd_lock(mm, pvmw->pmd); 148 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
153 if (likely(pmd_trans_huge(*pvmw->pmd))) { 149 if (likely(pmd_trans_huge(*pvmw->pmd))) {
154 if (pvmw->flags & PVMW_MIGRATION) 150 if (pvmw->flags & PVMW_MIGRATION)
@@ -167,17 +163,15 @@ restart:
167 return not_found(pvmw); 163 return not_found(pvmw);
168 return true; 164 return true;
169 } 165 }
170 } else 166 }
171 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
172 return not_found(pvmw); 167 return not_found(pvmw);
173 } else { 168 } else {
174 /* THP pmd was split under us: handle on pte level */ 169 /* THP pmd was split under us: handle on pte level */
175 spin_unlock(pvmw->ptl); 170 spin_unlock(pvmw->ptl);
176 pvmw->ptl = NULL; 171 pvmw->ptl = NULL;
177 } 172 }
178 } else { 173 } else if (!pmd_present(pmde)) {
179 if (!check_pmd(pvmw)) 174 return false;
180 return false;
181 } 175 }
182 if (!map_pte(pvmw)) 176 if (!map_pte(pvmw))
183 goto next_pte; 177 goto next_pte;
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1329 * @gfp: allocation flags 1329 * @gfp: allocation flags
1330 * 1330 *
1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1332 * contain %GFP_KERNEL, the allocation is atomic. 1332 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1333 * then no warning will be triggered on invalid or failed allocation
1334 * requests.
1333 * 1335 *
1334 * RETURNS: 1336 * RETURNS:
1335 * Percpu pointer to the allocated area on success, NULL on failure. 1337 * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1337static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1339static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1338 gfp_t gfp) 1340 gfp_t gfp)
1339{ 1341{
1342 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1343 bool do_warn = !(gfp & __GFP_NOWARN);
1340 static int warn_limit = 10; 1344 static int warn_limit = 10;
1341 struct pcpu_chunk *chunk; 1345 struct pcpu_chunk *chunk;
1342 const char *err; 1346 const char *err;
1343 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1344 int slot, off, cpu, ret; 1347 int slot, off, cpu, ret;
1345 unsigned long flags; 1348 unsigned long flags;
1346 void __percpu *ptr; 1349 void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1361 1364
1362 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1365 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1363 !is_power_of_2(align))) { 1366 !is_power_of_2(align))) {
1364 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1367 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1365 size, align); 1368 size, align);
1366 return NULL; 1369 return NULL;
1367 } 1370 }
@@ -1482,7 +1485,7 @@ fail_unlock:
1482fail: 1485fail:
1483 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1486 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1484 1487
1485 if (!is_atomic && warn_limit) { 1488 if (!is_atomic && do_warn && warn_limit) {
1486 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1489 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1487 size, align, is_atomic, err); 1490 size, align, is_atomic, err);
1488 dump_stack(); 1491 dump_stack();
@@ -1507,7 +1510,9 @@ fail:
1507 * 1510 *
1508 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1511 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1509 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1512 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1510 * be called from any context but is a lot more likely to fail. 1513 * be called from any context but is a lot more likely to fail. If @gfp
1514 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1515 * allocation requests.
1511 * 1516 *
1512 * RETURNS: 1517 * RETURNS:
1513 * Percpu pointer to the allocated area on success, NULL on failure. 1518 * Percpu pointer to the allocated area on success, NULL on failure.
diff --git a/mm/swap_state.c b/mm/swap_state.c
index ed91091d1e68..05b6803f0cce 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES];
39static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; 39static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
40bool swap_vma_readahead = true; 40bool swap_vma_readahead = true;
41 41
42#define SWAP_RA_MAX_ORDER_DEFAULT 3
43
44static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;
45
46#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 42#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
47#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 43#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
48#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 44#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
@@ -664,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
664 pte_t *tpte; 660 pte_t *tpte;
665#endif 661#endif
666 662
663 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
664 SWAP_RA_ORDER_CEILING);
665 if (max_win == 1) {
666 swap_ra->win = 1;
667 return NULL;
668 }
669
667 faddr = vmf->address; 670 faddr = vmf->address;
668 entry = pte_to_swp_entry(vmf->orig_pte); 671 entry = pte_to_swp_entry(vmf->orig_pte);
669 if ((unlikely(non_swap_entry(entry)))) 672 if ((unlikely(non_swap_entry(entry))))
@@ -672,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
672 if (page) 675 if (page)
673 return page; 676 return page;
674 677
675 max_win = 1 << READ_ONCE(swap_ra_max_order);
676 if (max_win == 1) {
677 swap_ra->win = 1;
678 return NULL;
679 }
680
681 fpfn = PFN_DOWN(faddr); 678 fpfn = PFN_DOWN(faddr);
682 swap_ra_info = GET_SWAP_RA_VAL(vma); 679 swap_ra_info = GET_SWAP_RA_VAL(vma);
683 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); 680 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
@@ -786,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr =
786 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 783 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
787 vma_ra_enabled_store); 784 vma_ra_enabled_store);
788 785
789static ssize_t vma_ra_max_order_show(struct kobject *kobj,
790 struct kobj_attribute *attr, char *buf)
791{
792 return sprintf(buf, "%d\n", swap_ra_max_order);
793}
794static ssize_t vma_ra_max_order_store(struct kobject *kobj,
795 struct kobj_attribute *attr,
796 const char *buf, size_t count)
797{
798 int err, v;
799
800 err = kstrtoint(buf, 10, &v);
801 if (err || v > SWAP_RA_ORDER_CEILING || v <= 0)
802 return -EINVAL;
803
804 swap_ra_max_order = v;
805
806 return count;
807}
808static struct kobj_attribute vma_ra_max_order_attr =
809 __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show,
810 vma_ra_max_order_store);
811
812static struct attribute *swap_attrs[] = { 786static struct attribute *swap_attrs[] = {
813 &vma_ra_enabled_attr.attr, 787 &vma_ra_enabled_attr.attr,
814 &vma_ra_max_order_attr.attr,
815 NULL, 788 NULL,
816}; 789};
817 790
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8a43db6284eb..673942094328 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1695 for (i = 0; i < area->nr_pages; i++) { 1695 for (i = 0; i < area->nr_pages; i++) {
1696 struct page *page; 1696 struct page *page;
1697 1697
1698 if (fatal_signal_pending(current)) {
1699 area->nr_pages = i;
1700 goto fail_no_warn;
1701 }
1702
1703 if (node == NUMA_NO_NODE) 1698 if (node == NUMA_NO_NODE)
1704 page = alloc_page(alloc_mask|highmem_mask); 1699 page = alloc_page(alloc_mask|highmem_mask);
1705 else 1700 else
@@ -1723,7 +1718,6 @@ fail:
1723 warn_alloc(gfp_mask, NULL, 1718 warn_alloc(gfp_mask, NULL,
1724 "vmalloc: allocation failure, allocated %ld of %ld bytes", 1719 "vmalloc: allocation failure, allocated %ld of %ld bytes",
1725 (area->nr_pages*PAGE_SIZE), area->size); 1720 (area->nr_pages*PAGE_SIZE), area->size);
1726fail_no_warn:
1727 vfree(area->addr); 1721 vfree(area->addr);
1728 return NULL; 1722 return NULL;
1729} 1723}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f0e82682e071..fb61b6c79235 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -578,7 +578,7 @@ static int br_process_vlan_info(struct net_bridge *br,
578 } 578 }
579 *vinfo_last = NULL; 579 *vinfo_last = NULL;
580 580
581 return 0; 581 return err;
582 } 582 }
583 583
584 return br_vlan_info(br, p, cmd, vinfo_curr); 584 return br_vlan_info(br, p, cmd, vinfo_curr);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 1f75e11ac35a..003b2d6d655f 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
78static struct kmem_cache *rcv_cache __read_mostly; 78static struct kmem_cache *rcv_cache __read_mostly;
79 79
80/* table of registered CAN protocols */ 80/* table of registered CAN protocols */
81static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 81static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
82static DEFINE_MUTEX(proto_tab_lock); 82static DEFINE_MUTEX(proto_tab_lock);
83 83
84static atomic_t skbcounter = ATOMIC_INIT(0); 84static atomic_t skbcounter = ATOMIC_INIT(0);
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
788 788
789 mutex_lock(&proto_tab_lock); 789 mutex_lock(&proto_tab_lock);
790 790
791 if (proto_tab[proto]) { 791 if (rcu_access_pointer(proto_tab[proto])) {
792 pr_err("can: protocol %d already registered\n", proto); 792 pr_err("can: protocol %d already registered\n", proto);
793 err = -EBUSY; 793 err = -EBUSY;
794 } else 794 } else
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
812 int proto = cp->protocol; 812 int proto = cp->protocol;
813 813
814 mutex_lock(&proto_tab_lock); 814 mutex_lock(&proto_tab_lock);
815 BUG_ON(proto_tab[proto] != cp); 815 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
816 RCU_INIT_POINTER(proto_tab[proto], NULL); 816 RCU_INIT_POINTER(proto_tab[proto], NULL);
817 mutex_unlock(&proto_tab_lock); 817 mutex_unlock(&proto_tab_lock);
818 818
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
875 spin_lock_init(&net->can.can_rcvlists_lock); 875 spin_lock_init(&net->can.can_rcvlists_lock);
876 net->can.can_rx_alldev_list = 876 net->can.can_rx_alldev_list =
877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
878 878 if (!net->can.can_rx_alldev_list)
879 goto out;
879 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 880 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
881 if (!net->can.can_stats)
882 goto out_free_alldev_list;
880 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 883 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
884 if (!net->can.can_pstats)
885 goto out_free_can_stats;
881 886
882 if (IS_ENABLED(CONFIG_PROC_FS)) { 887 if (IS_ENABLED(CONFIG_PROC_FS)) {
883 /* the statistics are updated every second (timer triggered) */ 888 /* the statistics are updated every second (timer triggered) */
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
892 } 897 }
893 898
894 return 0; 899 return 0;
900
901 out_free_can_stats:
902 kfree(net->can.can_stats);
903 out_free_alldev_list:
904 kfree(net->can.can_rx_alldev_list);
905 out:
906 return -ENOMEM;
895} 907}
896 908
897static void can_pernet_exit(struct net *net) 909static void can_pernet_exit(struct net *net)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 47a8748d953a..13690334efa3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
1493static int bcm_release(struct socket *sock) 1493static int bcm_release(struct socket *sock)
1494{ 1494{
1495 struct sock *sk = sock->sk; 1495 struct sock *sk = sock->sk;
1496 struct net *net = sock_net(sk); 1496 struct net *net;
1497 struct bcm_sock *bo; 1497 struct bcm_sock *bo;
1498 struct bcm_op *op, *next; 1498 struct bcm_op *op, *next;
1499 1499
1500 if (sk == NULL) 1500 if (!sk)
1501 return 0; 1501 return 0;
1502 1502
1503 net = sock_net(sk);
1503 bo = bcm_sk(sk); 1504 bo = bcm_sk(sk);
1504 1505
1505 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1506 /* remove bcm_ops, timer, rx_unregister(), etc. */
diff --git a/net/core/dev.c b/net/core/dev.c
index cf5894f0e6eb..24ac9083bc13 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1149,9 +1149,8 @@ static int dev_alloc_name_ns(struct net *net,
1149 return ret; 1149 return ret;
1150} 1150}
1151 1151
1152static int dev_get_valid_name(struct net *net, 1152int dev_get_valid_name(struct net *net, struct net_device *dev,
1153 struct net_device *dev, 1153 const char *name)
1154 const char *name)
1155{ 1154{
1156 BUG_ON(!net); 1155 BUG_ON(!net);
1157 1156
@@ -1167,6 +1166,7 @@ static int dev_get_valid_name(struct net *net,
1167 1166
1168 return 0; 1167 return 0;
1169} 1168}
1169EXPORT_SYMBOL(dev_get_valid_name);
1170 1170
1171/** 1171/**
1172 * dev_change_name - change name of a device 1172 * dev_change_name - change name of a device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 709a4e6fb447..f9c7a88cd981 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
303 case SIOCSIFTXQLEN: 303 case SIOCSIFTXQLEN:
304 if (ifr->ifr_qlen < 0) 304 if (ifr->ifr_qlen < 0)
305 return -EINVAL; 305 return -EINVAL;
306 dev->tx_queue_len = ifr->ifr_qlen; 306 if (dev->tx_queue_len ^ ifr->ifr_qlen) {
307 unsigned int orig_len = dev->tx_queue_len;
308
309 dev->tx_queue_len = ifr->ifr_qlen;
310 err = call_netdevice_notifiers(
311 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
312 err = notifier_to_errno(err);
313 if (err) {
314 dev->tx_queue_len = orig_len;
315 return err;
316 }
317 }
307 return 0; 318 return 0;
308 319
309 case SIOCSIFNAME: 320 case SIOCSIFNAME:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0c406306792a..f8fcf450a36e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -452,7 +452,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
452EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 452EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
453 453
454/* return false if legacy contained non-0 deprecated fields 454/* return false if legacy contained non-0 deprecated fields
455 * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated 455 * maxtxpkt/maxrxpkt. rest of ksettings always updated
456 */ 456 */
457static bool 457static bool
458convert_legacy_settings_to_link_ksettings( 458convert_legacy_settings_to_link_ksettings(
@@ -467,8 +467,7 @@ convert_legacy_settings_to_link_ksettings(
467 * deprecated legacy fields, and they should not use 467 * deprecated legacy fields, and they should not use
468 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS 468 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
469 */ 469 */
470 if (legacy_settings->transceiver || 470 if (legacy_settings->maxtxpkt ||
471 legacy_settings->maxtxpkt ||
472 legacy_settings->maxrxpkt) 471 legacy_settings->maxrxpkt)
473 retval = false; 472 retval = false;
474 473
diff --git a/net/core/filter.c b/net/core/filter.c
index ccf62f44140a..b79c44cc8145 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1840,31 +1840,31 @@ static const struct bpf_func_proto bpf_redirect_proto = {
1840 .arg2_type = ARG_ANYTHING, 1840 .arg2_type = ARG_ANYTHING,
1841}; 1841};
1842 1842
1843BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) 1843BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1844 struct bpf_map *, map, u32, key, u64, flags)
1844{ 1845{
1845 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1846 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1846 1847
1847 if (unlikely(flags)) 1848 if (unlikely(flags))
1848 return SK_ABORTED; 1849 return SK_ABORTED;
1849 1850
1850 ri->ifindex = key; 1851 tcb->bpf.key = key;
1851 ri->flags = flags; 1852 tcb->bpf.flags = flags;
1852 ri->map = map; 1853 tcb->bpf.map = map;
1853 1854
1854 return SK_REDIRECT; 1855 return SK_REDIRECT;
1855} 1856}
1856 1857
1857struct sock *do_sk_redirect_map(void) 1858struct sock *do_sk_redirect_map(struct sk_buff *skb)
1858{ 1859{
1859 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1860 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1860 struct sock *sk = NULL; 1861 struct sock *sk = NULL;
1861 1862
1862 if (ri->map) { 1863 if (tcb->bpf.map) {
1863 sk = __sock_map_lookup_elem(ri->map, ri->ifindex); 1864 sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
1864 1865
1865 ri->ifindex = 0; 1866 tcb->bpf.key = 0;
1866 ri->map = NULL; 1867 tcb->bpf.map = NULL;
1867 /* we do not clear flags for future lookup */
1868 } 1868 }
1869 1869
1870 return sk; 1870 return sk;
@@ -1874,9 +1874,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
1874 .func = bpf_sk_redirect_map, 1874 .func = bpf_sk_redirect_map,
1875 .gpl_only = false, 1875 .gpl_only = false,
1876 .ret_type = RET_INTEGER, 1876 .ret_type = RET_INTEGER,
1877 .arg1_type = ARG_CONST_MAP_PTR, 1877 .arg1_type = ARG_PTR_TO_CTX,
1878 .arg2_type = ARG_ANYTHING, 1878 .arg2_type = ARG_CONST_MAP_PTR,
1879 .arg3_type = ARG_ANYTHING, 1879 .arg3_type = ARG_ANYTHING,
1880 .arg4_type = ARG_ANYTHING,
1880}; 1881};
1881 1882
1882BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 1883BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3902,7 +3903,6 @@ static bool sk_skb_is_valid_access(int off, int size,
3902 3903
3903 if (type == BPF_WRITE) { 3904 if (type == BPF_WRITE) {
3904 switch (off) { 3905 switch (off) {
3905 case bpf_ctx_range(struct __sk_buff, mark):
3906 case bpf_ctx_range(struct __sk_buff, tc_index): 3906 case bpf_ctx_range(struct __sk_buff, tc_index):
3907 case bpf_ctx_range(struct __sk_buff, priority): 3907 case bpf_ctx_range(struct __sk_buff, priority):
3908 break; 3908 break;
@@ -3912,6 +3912,8 @@ static bool sk_skb_is_valid_access(int off, int size,
3912 } 3912 }
3913 3913
3914 switch (off) { 3914 switch (off) {
3915 case bpf_ctx_range(struct __sk_buff, mark):
3916 return false;
3915 case bpf_ctx_range(struct __sk_buff, data): 3917 case bpf_ctx_range(struct __sk_buff, data):
3916 info->reg_type = PTR_TO_PACKET; 3918 info->reg_type = PTR_TO_PACKET;
3917 break; 3919 break;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 20b550d07fe3..04680a53c8dd 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1551,7 +1551,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1551 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1551 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1552 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1552 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1553 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1553 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1554 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1554 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1555 * allow 0-length string (needed to remove an alias).
1556 */
1557 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1555 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1558 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1556 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1559 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1557 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1560 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
@@ -2172,7 +2175,7 @@ static int do_setlink(const struct sk_buff *skb,
2172 dev->tx_queue_len = orig_len; 2175 dev->tx_queue_len = orig_len;
2173 goto errout; 2176 goto errout;
2174 } 2177 }
2175 status |= DO_SETLINK_NOTIFY; 2178 status |= DO_SETLINK_MODIFIED;
2176 } 2179 }
2177 } 2180 }
2178 2181
@@ -2332,7 +2335,7 @@ static int do_setlink(const struct sk_buff *skb,
2332 2335
2333errout: 2336errout:
2334 if (status & DO_SETLINK_MODIFIED) { 2337 if (status & DO_SETLINK_MODIFIED) {
2335 if (status & DO_SETLINK_NOTIFY) 2338 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2336 netdev_state_change(dev); 2339 netdev_state_change(dev);
2337 2340
2338 if (err < 0) 2341 if (err < 0)
@@ -4373,13 +4376,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
4373 4376
4374 switch (event) { 4377 switch (event) {
4375 case NETDEV_REBOOT: 4378 case NETDEV_REBOOT:
4379 case NETDEV_CHANGEMTU:
4376 case NETDEV_CHANGEADDR: 4380 case NETDEV_CHANGEADDR:
4377 case NETDEV_CHANGENAME: 4381 case NETDEV_CHANGENAME:
4378 case NETDEV_FEAT_CHANGE: 4382 case NETDEV_FEAT_CHANGE:
4379 case NETDEV_BONDING_FAILOVER: 4383 case NETDEV_BONDING_FAILOVER:
4384 case NETDEV_POST_TYPE_CHANGE:
4380 case NETDEV_NOTIFY_PEERS: 4385 case NETDEV_NOTIFY_PEERS:
4386 case NETDEV_CHANGEUPPER:
4381 case NETDEV_RESEND_IGMP: 4387 case NETDEV_RESEND_IGMP:
4382 case NETDEV_CHANGEINFODATA: 4388 case NETDEV_CHANGEINFODATA:
4389 case NETDEV_CHANGE_TX_QUEUE_LEN:
4383 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 4390 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4384 GFP_KERNEL, NULL); 4391 GFP_KERNEL, NULL);
4385 break; 4392 break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 40717501cbdd..97e604d55d55 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1124 1124
1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1127 struct sock *save_sk = skb->sk;
1128
1127 /* Streams do not free skb on error. Reset to prev state. */ 1129 /* Streams do not free skb on error. Reset to prev state. */
1128 msg->msg_iter = orig_iter; 1130 msg->msg_iter = orig_iter;
1131 skb->sk = sk;
1129 ___pskb_trim(skb, orig_len); 1132 ___pskb_trim(skb, orig_len);
1133 skb->sk = save_sk;
1130 return err; 1134 return err;
1131 } 1135 }
1132 1136
@@ -1895,7 +1899,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1895 } 1899 }
1896 1900
1897 /* If we need update frag list, we are in troubles. 1901 /* If we need update frag list, we are in troubles.
1898 * Certainly, it possible to add an offset to skb data, 1902 * Certainly, it is possible to add an offset to skb data,
1899 * but taking into account that pulling is expected to 1903 * but taking into account that pulling is expected to
1900 * be very rare operation, it is worth to fight against 1904 * be very rare operation, it is worth to fight against
1901 * further bloating skb head and crucify ourselves here instead. 1905 * further bloating skb head and crucify ourselves here instead.
diff --git a/net/core/sock.c b/net/core/sock.c
index 35656a9e4e44..759400053110 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1677 newsk->sk_dst_pending_confirm = 0; 1677 newsk->sk_dst_pending_confirm = 0;
1678 newsk->sk_wmem_queued = 0; 1678 newsk->sk_wmem_queued = 0;
1679 newsk->sk_forward_alloc = 0; 1679 newsk->sk_forward_alloc = 0;
1680
1681 /* sk->sk_memcg will be populated at accept() time */
1682 newsk->sk_memcg = NULL;
1683
1680 atomic_set(&newsk->sk_drops, 0); 1684 atomic_set(&newsk->sk_drops, 0);
1681 newsk->sk_send_head = NULL; 1685 newsk->sk_send_head = NULL;
1682 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1686 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1683 atomic_set(&newsk->sk_zckey, 0); 1687 atomic_set(&newsk->sk_zckey, 0);
1684 1688
1685 sock_reset_flag(newsk, SOCK_DONE); 1689 sock_reset_flag(newsk, SOCK_DONE);
1690 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1686 1691
1687 rcu_read_lock(); 1692 rcu_read_lock();
1688 filter = rcu_dereference(sk->sk_filter); 1693 filter = rcu_dereference(sk->sk_filter);
@@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1714 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1719 newsk->sk_incoming_cpu = raw_smp_processor_id();
1715 atomic64_set(&newsk->sk_cookie, 0); 1720 atomic64_set(&newsk->sk_cookie, 0);
1716 1721
1717 mem_cgroup_sk_alloc(newsk);
1718 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1719
1720 /* 1722 /*
1721 * Before updating sk_refcnt, we must commit prior changes to memory 1723 * Before updating sk_refcnt, we must commit prior changes to memory
1722 * (Documentation/RCU/rculist_nulls.txt for details) 1724 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index eed1ebf7f29d..b1e0dbea1e8c 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
36 * soft irq of receive path or setsockopt from process context 36 * soft irq of receive path or setsockopt from process context
37 */ 37 */
38 spin_lock_bh(&reuseport_lock); 38 spin_lock_bh(&reuseport_lock);
39 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, 39
40 lockdep_is_held(&reuseport_lock)), 40 /* Allocation attempts can occur concurrently via the setsockopt path
41 "multiple allocations for the same socket"); 41 * and the bind/hash path. Nothing to do when we lose the race.
42 */
43 if (rcu_dereference_protected(sk->sk_reuseport_cb,
44 lockdep_is_held(&reuseport_lock)))
45 goto out;
46
42 reuse = __reuseport_alloc(INIT_SOCKS); 47 reuse = __reuseport_alloc(INIT_SOCKS);
43 if (!reuse) { 48 if (!reuse) {
44 spin_unlock_bh(&reuseport_lock); 49 spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
49 reuse->num_socks = 1; 54 reuse->num_socks = 1;
50 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 55 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
51 56
57out:
52 spin_unlock_bh(&reuseport_lock); 58 spin_unlock_bh(&reuseport_lock);
53 59
54 return 0; 60 return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 001c08696334..0490916864f9 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
414 sk_daddr_set(newsk, ireq->ir_rmt_addr); 414 sk_daddr_set(newsk, ireq->ir_rmt_addr);
415 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 415 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
416 newinet->inet_saddr = ireq->ir_loc_addr; 416 newinet->inet_saddr = ireq->ir_loc_addr;
417 newinet->inet_opt = ireq->opt; 417 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
418 ireq->opt = NULL;
419 newinet->mc_index = inet_iif(skb); 418 newinet->mc_index = inet_iif(skb);
420 newinet->mc_ttl = ip_hdr(skb)->ttl; 419 newinet->mc_ttl = ip_hdr(skb)->ttl;
421 newinet->inet_id = jiffies; 420 newinet->inet_id = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
430 if (__inet_inherit_port(sk, newsk) < 0) 429 if (__inet_inherit_port(sk, newsk) < 0)
431 goto put_and_exit; 430 goto put_and_exit;
432 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 431 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
433 432 if (*own_req)
433 ireq->ireq_opt = NULL;
434 else
435 newinet->inet_opt = NULL;
434 return newsk; 436 return newsk;
435 437
436exit_overflow: 438exit_overflow:
@@ -441,6 +443,7 @@ exit:
441 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 443 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
442 return NULL; 444 return NULL;
443put_and_exit: 445put_and_exit:
446 newinet->inet_opt = NULL;
444 inet_csk_prepare_forced_close(newsk); 447 inet_csk_prepare_forced_close(newsk);
445 dccp_done(newsk); 448 dccp_done(newsk);
446 goto exit; 449 goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
492 ireq->ir_rmt_addr); 495 ireq->ir_rmt_addr);
493 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
494 ireq->ir_rmt_addr, 497 ireq->ir_rmt_addr,
495 ireq->opt); 498 rcu_dereference(ireq->ireq_opt));
496 err = net_xmit_eval(err); 499 err = net_xmit_eval(err);
497 } 500 }
498 501
@@ -548,7 +551,7 @@ out:
548static void dccp_v4_reqsk_destructor(struct request_sock *req) 551static void dccp_v4_reqsk_destructor(struct request_sock *req)
549{ 552{
550 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); 553 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
551 kfree(inet_rsk(req)->opt); 554 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
552} 555}
553 556
554void dccp_syn_ack_timeout(const struct request_sock *req) 557void dccp_syn_ack_timeout(const struct request_sock *req)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8737412c7b27..e1d4d898a007 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
224static void dns_resolver_describe(const struct key *key, struct seq_file *m) 224static void dns_resolver_describe(const struct key *key, struct seq_file *m)
225{ 225{
226 seq_puts(m, key->description); 226 seq_puts(m, key->description);
227 if (key_is_instantiated(key)) { 227 if (key_is_positive(key)) {
228 int err = PTR_ERR(key->payload.data[dns_key_error]); 228 int err = PTR_ERR(key->payload.data[dns_key_error]);
229 229
230 if (err) 230 if (err)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 91a2557942fa..f48fe6fc7e8c 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
70 address into account. Furthermore, the TOS (Type-Of-Service) field 70 address into account. Furthermore, the TOS (Type-Of-Service) field
71 of the packet can be used for routing decisions as well. 71 of the packet can be used for routing decisions as well.
72 72
73 If you are interested in this, please see the preliminary 73 If you need more information, see the Linux Advanced
74 documentation at <http://www.compendium.com.ar/policy-routing.txt> 74 Routing and Traffic Control documentation at
75 and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>. 75 <http://lartc.org/howto/lartc.rpdb.html>
76 You will need supporting software from
77 <ftp://ftp.tux.org/pub/net/ip-routing/>.
78 76
79 If unsure, say N. 77 If unsure, say N.
80 78
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2ae8f54cb321..82178cc69c96 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1951 buf = NULL; 1951 buf = NULL;
1952 1952
1953 req_inet = inet_rsk(req); 1953 req_inet = inet_rsk(req);
1954 opt = xchg(&req_inet->opt, opt); 1954 opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
1955 if (opt) 1955 if (opt)
1956 kfree_rcu(opt, rcu); 1956 kfree_rcu(opt, rcu);
1957 1957
@@ -1973,11 +1973,13 @@ req_setattr_failure:
1973 * values on failure. 1973 * values on failure.
1974 * 1974 *
1975 */ 1975 */
1976static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) 1976static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
1977{ 1977{
1978 struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
1978 int hdr_delta = 0; 1979 int hdr_delta = 0;
1979 struct ip_options_rcu *opt = *opt_ptr;
1980 1980
1981 if (!opt || opt->opt.cipso == 0)
1982 return 0;
1981 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { 1983 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
1982 u8 cipso_len; 1984 u8 cipso_len;
1983 u8 cipso_off; 1985 u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2039 */ 2041 */
2040void cipso_v4_sock_delattr(struct sock *sk) 2042void cipso_v4_sock_delattr(struct sock *sk)
2041{ 2043{
2042 int hdr_delta;
2043 struct ip_options_rcu *opt;
2044 struct inet_sock *sk_inet; 2044 struct inet_sock *sk_inet;
2045 int hdr_delta;
2045 2046
2046 sk_inet = inet_sk(sk); 2047 sk_inet = inet_sk(sk);
2047 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2048 if (!opt || opt->opt.cipso == 0)
2049 return;
2050 2048
2051 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); 2049 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2052 if (sk_inet->is_icsk && hdr_delta > 0) { 2050 if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
2066 */ 2064 */
2067void cipso_v4_req_delattr(struct request_sock *req) 2065void cipso_v4_req_delattr(struct request_sock *req)
2068{ 2066{
2069 struct ip_options_rcu *opt; 2067 cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
2070 struct inet_request_sock *req_inet;
2071
2072 req_inet = inet_rsk(req);
2073 opt = req_inet->opt;
2074 if (!opt || opt->opt.cipso == 0)
2075 return;
2076
2077 cipso_v4_delopt(&req_inet->opt);
2078} 2068}
2079 2069
2080/** 2070/**
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 5c965ecc96a0..ca03a1dcbc8f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
475 } 475 }
476 spin_unlock_bh(&queue->fastopenq.lock); 476 spin_unlock_bh(&queue->fastopenq.lock);
477 } 477 }
478 mem_cgroup_sk_alloc(newsk);
478out: 479out:
479 release_sock(sk); 480 release_sock(sk);
480 if (req) 481 if (req)
@@ -537,9 +538,10 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
537{ 538{
538 const struct inet_request_sock *ireq = inet_rsk(req); 539 const struct inet_request_sock *ireq = inet_rsk(req);
539 struct net *net = read_pnet(&ireq->ireq_net); 540 struct net *net = read_pnet(&ireq->ireq_net);
540 struct ip_options_rcu *opt = ireq->opt; 541 struct ip_options_rcu *opt;
541 struct rtable *rt; 542 struct rtable *rt;
542 543
544 opt = rcu_dereference(ireq->ireq_opt);
543 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 545 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
544 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 546 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
545 sk->sk_protocol, inet_sk_flowi_flags(sk), 547 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -573,10 +575,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
573 struct flowi4 *fl4; 575 struct flowi4 *fl4;
574 struct rtable *rt; 576 struct rtable *rt;
575 577
578 opt = rcu_dereference(ireq->ireq_opt);
576 fl4 = &newinet->cork.fl.u.ip4; 579 fl4 = &newinet->cork.fl.u.ip4;
577 580
578 rcu_read_lock();
579 opt = rcu_dereference(newinet->inet_opt);
580 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 581 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
581 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 582 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
582 sk->sk_protocol, inet_sk_flowi_flags(sk), 583 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -589,13 +590,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
589 goto no_route; 590 goto no_route;
590 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 591 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
591 goto route_err; 592 goto route_err;
592 rcu_read_unlock();
593 return &rt->dst; 593 return &rt->dst;
594 594
595route_err: 595route_err:
596 ip_rt_put(rt); 596 ip_rt_put(rt);
597no_route: 597no_route:
598 rcu_read_unlock();
599 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 598 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
600 return NULL; 599 return NULL;
601} 600}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 597bb4cfe805..e7d15fb0d94d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
456 return reuseport_add_sock(sk, sk2); 456 return reuseport_add_sock(sk, sk2);
457 } 457 }
458 458
459 /* Initial allocation may have already happened via setsockopt */ 459 return reuseport_alloc(sk);
460 if (!rcu_access_pointer(sk->sk_reuseport_cb))
461 return reuseport_alloc(sk);
462 return 0;
463} 460}
464 461
465int __inet_hash(struct sock *sk, struct sock *osk) 462int __inet_hash(struct sock *sk, struct sock *osk)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b1bb1b3a1082..77cf32a80952 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
355 /* We throwed the options of the initial SYN away, so we hope 355 /* We throwed the options of the initial SYN away, so we hope
356 * the ACK carries the same options again (see RFC1122 4.2.3.8) 356 * the ACK carries the same options again (see RFC1122 4.2.3.8)
357 */ 357 */
358 ireq->opt = tcp_v4_save_options(sock_net(sk), skb); 358 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
359 359
360 if (security_inet_conn_request(sk, skb, req)) { 360 if (security_inet_conn_request(sk, skb, req)) {
361 reqsk_free(req); 361 reqsk_free(req);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b2390bfdc68f..ab3f12898245 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6167,7 +6167,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
6167 struct inet_request_sock *ireq = inet_rsk(req); 6167 struct inet_request_sock *ireq = inet_rsk(req);
6168 6168
6169 kmemcheck_annotate_bitfield(ireq, flags); 6169 kmemcheck_annotate_bitfield(ireq, flags);
6170 ireq->opt = NULL; 6170 ireq->ireq_opt = NULL;
6171#if IS_ENABLED(CONFIG_IPV6) 6171#if IS_ENABLED(CONFIG_IPV6)
6172 ireq->pktopts = NULL; 6172 ireq->pktopts = NULL;
6173#endif 6173#endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 28ca4e177047..e22439f05e46 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
877 877
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 ireq->ir_rmt_addr, 879 ireq->ir_rmt_addr,
880 ireq->opt); 880 rcu_dereference(ireq->ireq_opt));
881 err = net_xmit_eval(err); 881 err = net_xmit_eval(err);
882 } 882 }
883 883
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
889 */ 889 */
890static void tcp_v4_reqsk_destructor(struct request_sock *req) 890static void tcp_v4_reqsk_destructor(struct request_sock *req)
891{ 891{
892 kfree(inet_rsk(req)->opt); 892 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
893} 893}
894 894
895#ifdef CONFIG_TCP_MD5SIG 895#ifdef CONFIG_TCP_MD5SIG
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
1265 struct sk_buff *skb) 1265 struct sk_buff *skb)
1266{ 1266{
1267 struct inet_request_sock *ireq = inet_rsk(req); 1267 struct inet_request_sock *ireq = inet_rsk(req);
1268 struct net *net = sock_net(sk_listener);
1268 1269
1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1270 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 1271 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); 1272 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1272} 1273}
1273 1274
1274static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1275static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1355 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1356 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1357 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1357 newsk->sk_bound_dev_if = ireq->ir_iif; 1358 newsk->sk_bound_dev_if = ireq->ir_iif;
1358 newinet->inet_saddr = ireq->ir_loc_addr; 1359 newinet->inet_saddr = ireq->ir_loc_addr;
1359 inet_opt = ireq->opt; 1360 inet_opt = rcu_dereference(ireq->ireq_opt);
1360 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1361 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1361 ireq->opt = NULL;
1362 newinet->mc_index = inet_iif(skb); 1362 newinet->mc_index = inet_iif(skb);
1363 newinet->mc_ttl = ip_hdr(skb)->ttl; 1363 newinet->mc_ttl = ip_hdr(skb)->ttl;
1364 newinet->rcv_tos = ip_hdr(skb)->tos; 1364 newinet->rcv_tos = ip_hdr(skb)->tos;
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1403 if (__inet_inherit_port(sk, newsk) < 0) 1403 if (__inet_inherit_port(sk, newsk) < 0)
1404 goto put_and_exit; 1404 goto put_and_exit;
1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1406 if (*own_req) 1406 if (likely(*own_req)) {
1407 tcp_move_syn(newtp, req); 1407 tcp_move_syn(newtp, req);
1408 1408 ireq->ireq_opt = NULL;
1409 } else {
1410 newinet->inet_opt = NULL;
1411 }
1409 return newsk; 1412 return newsk;
1410 1413
1411exit_overflow: 1414exit_overflow:
@@ -1416,6 +1419,7 @@ exit:
1416 tcp_listendrop(sk); 1419 tcp_listendrop(sk);
1417 return NULL; 1420 return NULL;
1418put_and_exit: 1421put_and_exit:
1422 newinet->inet_opt = NULL;
1419 inet_csk_prepare_forced_close(newsk); 1423 inet_csk_prepare_forced_close(newsk);
1420 tcp_done(newsk); 1424 tcp_done(newsk);
1421 goto exit; 1425 goto exit;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 7c9a6e4a7253..a6699af05539 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
231 } 231 }
232 } 232 }
233 233
234 /* Initial allocation may have already happened via setsockopt */ 234 return reuseport_alloc(sk);
235 if (!rcu_access_pointer(sk->sk_reuseport_cb))
236 return reuseport_alloc(sk);
237 return 0;
238} 235}
239 236
240/** 237/**
@@ -1061,7 +1058,7 @@ back_from_confirm:
1061 /* ... which is an evident application bug. --ANK */ 1058 /* ... which is an evident application bug. --ANK */
1062 release_sock(sk); 1059 release_sock(sk);
1063 1060
1064 net_dbg_ratelimited("cork app bug 2\n"); 1061 net_dbg_ratelimited("socket already corked\n");
1065 err = -EINVAL; 1062 err = -EINVAL;
1066 goto out; 1063 goto out;
1067 } 1064 }
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1144 if (unlikely(!up->pending)) { 1141 if (unlikely(!up->pending)) {
1145 release_sock(sk); 1142 release_sock(sk);
1146 1143
1147 net_dbg_ratelimited("udp cork app bug 3\n"); 1144 net_dbg_ratelimited("cork failed\n");
1148 return -EINVAL; 1145 return -EINVAL;
1149 } 1146 }
1150 1147
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 8081bafe441b..15535ee327c5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
315 } 315 }
316 opt_space->dst1opt = fopt->dst1opt; 316 opt_space->dst1opt = fopt->dst1opt;
317 opt_space->opt_flen = fopt->opt_flen; 317 opt_space->opt_flen = fopt->opt_flen;
318 opt_space->tot_len = fopt->tot_len;
318 return opt_space; 319 return opt_space;
319} 320}
320EXPORT_SYMBOL_GPL(fl6_merge_options); 321EXPORT_SYMBOL_GPL(fl6_merge_options);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 43ca864327c7..5110a418cc4d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1161 if (WARN_ON(v6_cork->opt)) 1161 if (WARN_ON(v6_cork->opt))
1162 return -EINVAL; 1162 return -EINVAL;
1163 1163
1164 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); 1164 v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1165 if (unlikely(!v6_cork->opt)) 1165 if (unlikely(!v6_cork->opt))
1166 return -ENOBUFS; 1166 return -ENOBUFS;
1167 1167
1168 v6_cork->opt->tot_len = opt->tot_len; 1168 v6_cork->opt->tot_len = sizeof(*opt);
1169 v6_cork->opt->opt_flen = opt->opt_flen; 1169 v6_cork->opt->opt_flen = opt->opt_flen;
1170 v6_cork->opt->opt_nflen = opt->opt_nflen; 1170 v6_cork->opt->opt_nflen = opt->opt_nflen;
1171 1171
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index bc6e8bfc5be4..f50452b919d5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
988 session->name, cmd, arg); 988 session->name, cmd, arg);
989 989
990 sk = ps->sock; 990 sk = ps->sock;
991 if (!sk)
992 return -EBADR;
993
991 sock_hold(sk); 994 sock_hold(sk);
992 995
993 switch (cmd) { 996 switch (cmd) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a98fc2b5e0dc..ae995c8480db 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,7 +4,7 @@
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright 2015 Intel Deutschland GmbH 7 * Copyright 2015-2017 Intel Deutschland GmbH
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -620,9 +620,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
620 620
621 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; 621 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
622 idx = key->conf.keyidx; 622 idx = key->conf.keyidx;
623 key->local = sdata->local;
624 key->sdata = sdata;
625 key->sta = sta;
626 623
627 mutex_lock(&sdata->local->key_mtx); 624 mutex_lock(&sdata->local->key_mtx);
628 625
@@ -633,6 +630,21 @@ int ieee80211_key_link(struct ieee80211_key *key,
633 else 630 else
634 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 631 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
635 632
633 /*
634 * Silently accept key re-installation without really installing the
635 * new version of the key to avoid nonce reuse or replay issues.
636 */
637 if (old_key && key->conf.keylen == old_key->conf.keylen &&
638 !memcmp(key->conf.key, old_key->conf.key, key->conf.keylen)) {
639 ieee80211_key_free_unused(key);
640 ret = 0;
641 goto out;
642 }
643
644 key->local = sdata->local;
645 key->sdata = sdata;
646 key->sta = sta;
647
636 increment_tailroom_need_count(sdata); 648 increment_tailroom_need_count(sdata);
637 649
638 ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 650 ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +660,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
648 ret = 0; 660 ret = 0;
649 } 661 }
650 662
663 out:
651 mutex_unlock(&sdata->local->key_mtx); 664 mutex_unlock(&sdata->local->key_mtx);
652 665
653 return ret; 666 return ret;
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index af3d636534ef..d30f7bd741d0 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -286,6 +286,7 @@ struct ncsi_dev_priv {
286 struct work_struct work; /* For channel management */ 286 struct work_struct work; /* For channel management */
287 struct packet_type ptype; /* NCSI packet Rx handler */ 287 struct packet_type ptype; /* NCSI packet Rx handler */
288 struct list_head node; /* Form NCSI device list */ 288 struct list_head node; /* Form NCSI device list */
289#define NCSI_MAX_VLAN_VIDS 15
289 struct list_head vlan_vids; /* List of active VLAN IDs */ 290 struct list_head vlan_vids; /* List of active VLAN IDs */
290}; 291};
291 292
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 6898e7229285..f135938bf781 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
187} ncsi_aen_handlers[] = { 187} ncsi_aen_handlers[] = {
188 { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc }, 188 { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc },
189 { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr }, 189 { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr },
190 { NCSI_PKT_AEN_HNCDSC, 4, ncsi_aen_handler_hncdsc } 190 { NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc }
191}; 191};
192 192
193int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb) 193int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 3fd3c39e6278..28c42b22b748 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
189 struct ncsi_channel *nc = (struct ncsi_channel *)data; 189 struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 struct ncsi_package *np = nc->package; 190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp; 191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_channel_mode *ncm;
192 struct ncsi_cmd_arg nca; 193 struct ncsi_cmd_arg nca;
193 bool enabled, chained; 194 bool enabled, chained;
194 unsigned int monitor_state; 195 unsigned int monitor_state;
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
202 monitor_state = nc->monitor.state; 203 monitor_state = nc->monitor.state;
203 spin_unlock_irqrestore(&nc->lock, flags); 204 spin_unlock_irqrestore(&nc->lock, flags);
204 205
205 if (!enabled || chained) 206 if (!enabled || chained) {
207 ncsi_stop_channel_monitor(nc);
206 return; 208 return;
209 }
207 if (state != NCSI_CHANNEL_INACTIVE && 210 if (state != NCSI_CHANNEL_INACTIVE &&
208 state != NCSI_CHANNEL_ACTIVE) 211 state != NCSI_CHANNEL_ACTIVE) {
212 ncsi_stop_channel_monitor(nc);
209 return; 213 return;
214 }
210 215
211 switch (monitor_state) { 216 switch (monitor_state) {
212 case NCSI_CHANNEL_MONITOR_START: 217 case NCSI_CHANNEL_MONITOR_START:
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
217 nca.type = NCSI_PKT_CMD_GLS; 222 nca.type = NCSI_PKT_CMD_GLS;
218 nca.req_flags = 0; 223 nca.req_flags = 0;
219 ret = ncsi_xmit_cmd(&nca); 224 ret = ncsi_xmit_cmd(&nca);
220 if (ret) { 225 if (ret)
221 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 226 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
222 ret); 227 ret);
223 return;
224 }
225
226 break; 228 break;
227 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 229 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
228 break; 230 break;
229 default: 231 default:
230 if (!(ndp->flags & NCSI_DEV_HWA) && 232 if (!(ndp->flags & NCSI_DEV_HWA)) {
231 state == NCSI_CHANNEL_ACTIVE) {
232 ncsi_report_link(ndp, true); 233 ncsi_report_link(ndp, true);
233 ndp->flags |= NCSI_DEV_RESHUFFLE; 234 ndp->flags |= NCSI_DEV_RESHUFFLE;
234 } 235 }
235 236
237 ncsi_stop_channel_monitor(nc);
238
239 ncm = &nc->modes[NCSI_MODE_LINK];
236 spin_lock_irqsave(&nc->lock, flags); 240 spin_lock_irqsave(&nc->lock, flags);
237 nc->state = NCSI_CHANNEL_INVISIBLE; 241 nc->state = NCSI_CHANNEL_INVISIBLE;
242 ncm->data[2] &= ~0x1;
238 spin_unlock_irqrestore(&nc->lock, flags); 243 spin_unlock_irqrestore(&nc->lock, flags);
239 244
240 spin_lock_irqsave(&ndp->lock, flags); 245 spin_lock_irqsave(&ndp->lock, flags);
241 nc->state = NCSI_CHANNEL_INACTIVE; 246 nc->state = NCSI_CHANNEL_ACTIVE;
242 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 247 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
243 spin_unlock_irqrestore(&ndp->lock, flags); 248 spin_unlock_irqrestore(&ndp->lock, flags);
244 ncsi_process_next_channel(ndp); 249 ncsi_process_next_channel(ndp);
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
732 if (index < 0) { 737 if (index < 0) {
733 netdev_err(ndp->ndev.dev, 738 netdev_err(ndp->ndev.dev,
734 "Failed to add new VLAN tag, error %d\n", index); 739 "Failed to add new VLAN tag, error %d\n", index);
740 if (index == -ENOSPC)
741 netdev_err(ndp->ndev.dev,
742 "Channel %u already has all VLAN filters set\n",
743 nc->id);
735 return -1; 744 return -1;
736 } 745 }
737 746
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
998 struct ncsi_package *np; 1007 struct ncsi_package *np;
999 struct ncsi_channel *nc; 1008 struct ncsi_channel *nc;
1000 unsigned int cap; 1009 unsigned int cap;
1010 bool has_channel = false;
1001 1011
1002 /* The hardware arbitration is disabled if any one channel 1012 /* The hardware arbitration is disabled if any one channel
1003 * doesn't support explicitly. 1013 * doesn't support explicitly.
1004 */ 1014 */
1005 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1015 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1006 NCSI_FOR_EACH_CHANNEL(np, nc) { 1016 NCSI_FOR_EACH_CHANNEL(np, nc) {
1017 has_channel = true;
1018
1007 cap = nc->caps[NCSI_CAP_GENERIC].cap; 1019 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1008 if (!(cap & NCSI_CAP_GENERIC_HWA) || 1020 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1009 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 1021 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1014 } 1026 }
1015 } 1027 }
1016 1028
1017 ndp->flags |= NCSI_DEV_HWA; 1029 if (has_channel) {
1018 return true; 1030 ndp->flags |= NCSI_DEV_HWA;
1031 return true;
1032 }
1033
1034 ndp->flags &= ~NCSI_DEV_HWA;
1035 return false;
1019} 1036}
1020 1037
1021static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 1038static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1403 1420
1404int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1421int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1405{ 1422{
1406 struct ncsi_channel_filter *ncf;
1407 struct ncsi_dev_priv *ndp; 1423 struct ncsi_dev_priv *ndp;
1408 unsigned int n_vids = 0; 1424 unsigned int n_vids = 0;
1409 struct vlan_vid *vlan; 1425 struct vlan_vid *vlan;
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1420 } 1436 }
1421 1437
1422 ndp = TO_NCSI_DEV_PRIV(nd); 1438 ndp = TO_NCSI_DEV_PRIV(nd);
1423 ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
1424 1439
1425 /* Add the VLAN id to our internal list */ 1440 /* Add the VLAN id to our internal list */
1426 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1441 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1431 return 0; 1446 return 0;
1432 } 1447 }
1433 } 1448 }
1434 1449 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1435 if (n_vids >= ncf->total) { 1450 netdev_warn(dev,
1436 netdev_info(dev, 1451 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1437 "NCSI Channel supports up to %u VLAN tags but %u are already set\n", 1452 vid, NCSI_MAX_VLAN_VIDS);
1438 ncf->total, n_vids); 1453 return -ENOSPC;
1439 return -EINVAL;
1440 } 1454 }
1441 1455
1442 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1456 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 265b9a892d41..927dad4759d1 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
959 { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf }, 959 { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf },
960 { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf }, 960 { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf },
961 { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc }, 961 { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc },
962 { NCSI_PKT_RSP_GVI, 36, ncsi_rsp_handler_gvi }, 962 { NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi },
963 { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc }, 963 { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc },
964 { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp }, 964 { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp },
965 { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps }, 965 { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps },
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 336d9c6dcad9..767c84e10e20 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2307,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2307 size_t tlvlen = 0; 2307 size_t tlvlen = 0;
2308 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); 2308 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2309 unsigned int flags = 0; 2309 unsigned int flags = 0;
2310 bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
2310 2311
2311 /* Error messages get the original request appened, unless the user 2312 /* Error messages get the original request appened, unless the user
2312 * requests to cap the error message, and get extra error data if 2313 * requests to cap the error message, and get extra error data if
@@ -2317,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2317 payload += nlmsg_len(nlh); 2318 payload += nlmsg_len(nlh);
2318 else 2319 else
2319 flags |= NLM_F_CAPPED; 2320 flags |= NLM_F_CAPPED;
2320 if (nlk->flags & NETLINK_F_EXT_ACK && extack) { 2321 if (nlk_has_extack && extack) {
2321 if (extack->_msg) 2322 if (extack->_msg)
2322 tlvlen += nla_total_size(strlen(extack->_msg) + 1); 2323 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2323 if (extack->bad_attr) 2324 if (extack->bad_attr)
@@ -2326,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2326 } else { 2327 } else {
2327 flags |= NLM_F_CAPPED; 2328 flags |= NLM_F_CAPPED;
2328 2329
2329 if (nlk->flags & NETLINK_F_EXT_ACK && 2330 if (nlk_has_extack && extack && extack->cookie_len)
2330 extack && extack->cookie_len)
2331 tlvlen += nla_total_size(extack->cookie_len); 2331 tlvlen += nla_total_size(extack->cookie_len);
2332 } 2332 }
2333 2333
@@ -2347,7 +2347,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2347 errmsg->error = err; 2347 errmsg->error = err;
2348 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); 2348 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2349 2349
2350 if (nlk->flags & NETLINK_F_EXT_ACK && extack) { 2350 if (nlk_has_extack && extack) {
2351 if (err) { 2351 if (err) {
2352 if (extack->_msg) 2352 if (extack->_msg)
2353 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, 2353 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3f5caa3fbd06..4f4fa323171d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1767,7 +1767,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1767 1767
1768out: 1768out:
1769 if (err && rollover) { 1769 if (err && rollover) {
1770 kfree(rollover); 1770 kfree_rcu(rollover, rcu);
1771 po->rollover = NULL; 1771 po->rollover = NULL;
1772 } 1772 }
1773 mutex_unlock(&fanout_mutex); 1773 mutex_unlock(&fanout_mutex);
@@ -1794,8 +1794,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
1794 else 1794 else
1795 f = NULL; 1795 f = NULL;
1796 1796
1797 if (po->rollover) 1797 if (po->rollover) {
1798 kfree_rcu(po->rollover, rcu); 1798 kfree_rcu(po->rollover, rcu);
1799 po->rollover = NULL;
1800 }
1799 } 1801 }
1800 mutex_unlock(&fanout_mutex); 1802 mutex_unlock(&fanout_mutex);
1801 1803
@@ -3849,6 +3851,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3849 void *data = &val; 3851 void *data = &val;
3850 union tpacket_stats_u st; 3852 union tpacket_stats_u st;
3851 struct tpacket_rollover_stats rstats; 3853 struct tpacket_rollover_stats rstats;
3854 struct packet_rollover *rollover;
3852 3855
3853 if (level != SOL_PACKET) 3856 if (level != SOL_PACKET)
3854 return -ENOPROTOOPT; 3857 return -ENOPROTOOPT;
@@ -3927,13 +3930,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3927 0); 3930 0);
3928 break; 3931 break;
3929 case PACKET_ROLLOVER_STATS: 3932 case PACKET_ROLLOVER_STATS:
3930 if (!po->rollover) 3933 rcu_read_lock();
3934 rollover = rcu_dereference(po->rollover);
3935 if (rollover) {
3936 rstats.tp_all = atomic_long_read(&rollover->num);
3937 rstats.tp_huge = atomic_long_read(&rollover->num_huge);
3938 rstats.tp_failed = atomic_long_read(&rollover->num_failed);
3939 data = &rstats;
3940 lv = sizeof(rstats);
3941 }
3942 rcu_read_unlock();
3943 if (!rollover)
3931 return -EINVAL; 3944 return -EINVAL;
3932 rstats.tp_all = atomic_long_read(&po->rollover->num);
3933 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3934 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3935 data = &rstats;
3936 lv = sizeof(rstats);
3937 break; 3945 break;
3938 case PACKET_TX_HAS_OFF: 3946 case PACKET_TX_HAS_OFF:
3939 val = po->tp_tx_has_off; 3947 val = po->tp_tx_has_off;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 73c980e26581..054e32872808 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -311,10 +311,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
311 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, 311 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
312 gfp); 312 gfp);
313 /* The socket has been unlocked. */ 313 /* The socket has been unlocked. */
314 if (!IS_ERR(call)) 314 if (!IS_ERR(call)) {
315 call->notify_rx = notify_rx; 315 call->notify_rx = notify_rx;
316 mutex_unlock(&call->user_mutex);
317 }
316 318
317 mutex_unlock(&call->user_mutex);
318 _leave(" = %p", call); 319 _leave(" = %p", call);
319 return call; 320 return call;
320} 321}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 92a07141fd07..34f10e75f3b9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
421{ 421{
422 struct dst_entry *dst; 422 struct dst_entry *dst;
423 423
424 if (!t) 424 if (sock_owned_by_user(sk) || !t)
425 return; 425 return;
426 dst = sctp_transport_dst_check(t); 426 dst = sctp_transport_dst_check(t);
427 if (dst) 427 if (dst)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 88c28421ec15..c75acdf71a6f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4978,6 +4978,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4978 struct socket *sock; 4978 struct socket *sock;
4979 int err = 0; 4979 int err = 0;
4980 4980
4981 /* Do not peel off from one netns to another one. */
4982 if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
4983 return -EINVAL;
4984
4981 if (!asoc) 4985 if (!asoc)
4982 return -EINVAL; 4986 return -EINVAL;
4983 4987
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index bbac023e70d1..5583df708b8c 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan)
310 struct sock *sk = get_per_channel_state(chan); 310 struct sock *sk = get_per_channel_state(chan);
311 struct vsock_sock *vsk = vsock_sk(sk); 311 struct vsock_sock *vsk = vsock_sk(sk);
312 312
313 lock_sock(sk);
314
313 sk->sk_state = TCP_CLOSE; 315 sk->sk_state = TCP_CLOSE;
314 sock_set_flag(sk, SOCK_DONE); 316 sock_set_flag(sk, SOCK_DONE);
315 vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN; 317 vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
316 318
317 sk->sk_state_change(sk); 319 sk->sk_state_change(sk);
320
321 release_sock(sk);
318} 322}
319 323
320static void hvs_open_connection(struct vmbus_channel *chan) 324static void hvs_open_connection(struct vmbus_channel *chan)
@@ -344,6 +348,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
344 if (!sk) 348 if (!sk)
345 return; 349 return;
346 350
351 lock_sock(sk);
347 if ((conn_from_host && sk->sk_state != TCP_LISTEN) || 352 if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
348 (!conn_from_host && sk->sk_state != TCP_SYN_SENT)) 353 (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
349 goto out; 354 goto out;
@@ -395,9 +400,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
395 400
396 vsock_insert_connected(vnew); 401 vsock_insert_connected(vnew);
397 402
398 lock_sock(sk);
399 vsock_enqueue_accept(sk, new); 403 vsock_enqueue_accept(sk, new);
400 release_sock(sk);
401 } else { 404 } else {
402 sk->sk_state = TCP_ESTABLISHED; 405 sk->sk_state = TCP_ESTABLISHED;
403 sk->sk_socket->state = SS_CONNECTED; 406 sk->sk_socket->state = SS_CONNECTED;
@@ -410,6 +413,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
410out: 413out:
411 /* Release refcnt obtained when we called vsock_find_bound_socket() */ 414 /* Release refcnt obtained when we called vsock_find_bound_socket() */
412 sock_put(sk); 415 sock_put(sk);
416
417 release_sock(sk);
413} 418}
414 419
415static u32 hvs_get_local_cid(void) 420static u32 hvs_get_local_cid(void)
@@ -476,13 +481,21 @@ out:
476 481
477static void hvs_release(struct vsock_sock *vsk) 482static void hvs_release(struct vsock_sock *vsk)
478{ 483{
484 struct sock *sk = sk_vsock(vsk);
479 struct hvsock *hvs = vsk->trans; 485 struct hvsock *hvs = vsk->trans;
480 struct vmbus_channel *chan = hvs->chan; 486 struct vmbus_channel *chan;
487
488 lock_sock(sk);
489
490 sk->sk_state = SS_DISCONNECTING;
491 vsock_remove_sock(vsk);
492
493 release_sock(sk);
481 494
495 chan = hvs->chan;
482 if (chan) 496 if (chan)
483 hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN); 497 hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
484 498
485 vsock_remove_sock(vsk);
486} 499}
487 500
488static void hvs_destruct(struct vsock_sock *vsk) 501static void hvs_destruct(struct vsock_sock *vsk)
diff --git a/samples/sockmap/sockmap_kern.c b/samples/sockmap/sockmap_kern.c
index f9b38ef82dc2..52b0053274f4 100644
--- a/samples/sockmap/sockmap_kern.c
+++ b/samples/sockmap/sockmap_kern.c
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb)
62 ret = 1; 62 ret = 1;
63 63
64 bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret); 64 bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret);
65 return bpf_sk_redirect_map(&sock_map, ret, 0); 65 return bpf_sk_redirect_map(skb, &sock_map, ret, 0);
66} 66}
67 67
68SEC("sockops") 68SEC("sockops")
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index bc7fcf010a5b..446beb7ac48d 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
78} 78}
79 79
80static DEFINE_MUTEX(thread_mutex); 80static DEFINE_MUTEX(thread_mutex);
81static bool simple_thread_cnt;
81 82
82int foo_bar_reg(void) 83int foo_bar_reg(void)
83{ 84{
85 mutex_lock(&thread_mutex);
86 if (simple_thread_cnt++)
87 goto out;
88
84 pr_info("Starting thread for foo_bar_fn\n"); 89 pr_info("Starting thread for foo_bar_fn\n");
85 /* 90 /*
86 * We shouldn't be able to start a trace when the module is 91 * We shouldn't be able to start a trace when the module is
87 * unloading (there's other locks to prevent that). But 92 * unloading (there's other locks to prevent that). But
88 * for consistency sake, we still take the thread_mutex. 93 * for consistency sake, we still take the thread_mutex.
89 */ 94 */
90 mutex_lock(&thread_mutex);
91 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); 95 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
96 out:
92 mutex_unlock(&thread_mutex); 97 mutex_unlock(&thread_mutex);
93 return 0; 98 return 0;
94} 99}
95 100
96void foo_bar_unreg(void) 101void foo_bar_unreg(void)
97{ 102{
98 pr_info("Killing thread for foo_bar_fn\n");
99 /* protect against module unloading */
100 mutex_lock(&thread_mutex); 103 mutex_lock(&thread_mutex);
104 if (--simple_thread_cnt)
105 goto out;
106
107 pr_info("Killing thread for foo_bar_fn\n");
101 if (simple_tsk_fn) 108 if (simple_tsk_fn)
102 kthread_stop(simple_tsk_fn); 109 kthread_stop(simple_tsk_fn);
103 simple_tsk_fn = NULL; 110 simple_tsk_fn = NULL;
111 out:
104 mutex_unlock(&thread_mutex); 112 mutex_unlock(&thread_mutex);
105} 113}
106 114
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 29df825d375c..2f6ce802397d 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -103,11 +103,12 @@ __faddr2line() {
103 103
104 # Go through each of the object's symbols which match the func name. 104 # Go through each of the object's symbols which match the func name.
105 # In rare cases there might be duplicates. 105 # In rare cases there might be duplicates.
106 file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
106 while read symbol; do 107 while read symbol; do
107 local fields=($symbol) 108 local fields=($symbol)
108 local sym_base=0x${fields[0]} 109 local sym_base=0x${fields[0]}
109 local sym_type=${fields[1]} 110 local sym_type=${fields[1]}
110 local sym_end=0x${fields[3]} 111 local sym_end=${fields[3]}
111 112
112 # calculate the size 113 # calculate the size
113 local sym_size=$(($sym_end - $sym_base)) 114 local sym_size=$(($sym_end - $sym_base))
@@ -157,7 +158,7 @@ __faddr2line() {
157 addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" 158 addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
158 DONE=1 159 DONE=1
159 160
160 done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }') 161 done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
161} 162}
162 163
163[[ $# -lt 2 ]] && usage 164[[ $# -lt 2 ]] && usage
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 5d554419170b..9ee9bf7fd1a2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s)
158 else if (str[0] == '$') 158 else if (str[0] == '$')
159 return -1; 159 return -1;
160 /* exclude debugging symbols */ 160 /* exclude debugging symbols */
161 else if (stype == 'N') 161 else if (stype == 'N' || stype == 'n')
162 return -1; 162 return -1;
163 163
164 /* include the type field in the symbol name, so that it gets 164 /* include the type field in the symbol name, so that it gets
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 57263f2f8f2f..c6ad9b1585a1 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -105,6 +105,7 @@ int main(void)
105 DEVID_FIELD(input_device_id, sndbit); 105 DEVID_FIELD(input_device_id, sndbit);
106 DEVID_FIELD(input_device_id, ffbit); 106 DEVID_FIELD(input_device_id, ffbit);
107 DEVID_FIELD(input_device_id, swbit); 107 DEVID_FIELD(input_device_id, swbit);
108 DEVID_FIELD(input_device_id, propbit);
108 109
109 DEVID(eisa_device_id); 110 DEVID(eisa_device_id);
110 DEVID_FIELD(eisa_device_id, sig); 111 DEVID_FIELD(eisa_device_id, sig);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 6ef6e63f96fd..2b9395501d81 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -761,7 +761,7 @@ static void do_input(char *alias,
761 sprintf(alias + strlen(alias), "%X,*", i); 761 sprintf(alias + strlen(alias), "%X,*", i);
762} 762}
763 763
764/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */ 764/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwXprX where X is comma-separated %02X. */
765static int do_input_entry(const char *filename, void *symval, 765static int do_input_entry(const char *filename, void *symval,
766 char *alias) 766 char *alias)
767{ 767{
@@ -779,6 +779,7 @@ static int do_input_entry(const char *filename, void *symval,
779 DEF_FIELD_ADDR(symval, input_device_id, sndbit); 779 DEF_FIELD_ADDR(symval, input_device_id, sndbit);
780 DEF_FIELD_ADDR(symval, input_device_id, ffbit); 780 DEF_FIELD_ADDR(symval, input_device_id, ffbit);
781 DEF_FIELD_ADDR(symval, input_device_id, swbit); 781 DEF_FIELD_ADDR(symval, input_device_id, swbit);
782 DEF_FIELD_ADDR(symval, input_device_id, propbit);
782 783
783 sprintf(alias, "input:"); 784 sprintf(alias, "input:");
784 785
@@ -816,6 +817,9 @@ static int do_input_entry(const char *filename, void *symval,
816 sprintf(alias + strlen(alias), "w*"); 817 sprintf(alias + strlen(alias), "w*");
817 if (flags & INPUT_DEVICE_ID_MATCH_SWBIT) 818 if (flags & INPUT_DEVICE_ID_MATCH_SWBIT)
818 do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX); 819 do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
820 sprintf(alias + strlen(alias), "pr*");
821 if (flags & INPUT_DEVICE_ID_MATCH_PROPBIT)
822 do_input(alias, *propbit, 0, INPUT_DEVICE_ID_PROP_MAX);
819 return 1; 823 return 1;
820} 824}
821ADD_TO_DEVTABLE("input", input_device_id, do_input_entry); 825ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
diff --git a/security/commoncap.c b/security/commoncap.c
index c25e0d27537f..fc46f5b85251 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
585 struct vfs_ns_cap_data data, *nscaps = &data; 585 struct vfs_ns_cap_data data, *nscaps = &data;
586 struct vfs_cap_data *caps = (struct vfs_cap_data *) &data; 586 struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
587 kuid_t rootkuid; 587 kuid_t rootkuid;
588 struct user_namespace *fs_ns = inode->i_sb->s_user_ns; 588 struct user_namespace *fs_ns;
589 589
590 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data)); 590 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
591 591
592 if (!inode) 592 if (!inode)
593 return -ENODATA; 593 return -ENODATA;
594 594
595 fs_ns = inode->i_sb->s_user_ns;
595 size = __vfs_getxattr((struct dentry *)dentry, inode, 596 size = __vfs_getxattr((struct dentry *)dentry, inode,
596 XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ); 597 XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
597 if (size == -ENODATA || size == -EOPNOTSUPP) 598 if (size == -ENODATA || size == -EOPNOTSUPP)
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 91eafada3164..6462e6654ccf 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -45,6 +45,7 @@ config BIG_KEYS
45 bool "Large payload keys" 45 bool "Large payload keys"
46 depends on KEYS 46 depends on KEYS
47 depends on TMPFS 47 depends on TMPFS
48 select CRYPTO
48 select CRYPTO_AES 49 select CRYPTO_AES
49 select CRYPTO_GCM 50 select CRYPTO_GCM
50 help 51 help
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index e607830b6154..929e14978c42 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -247,7 +247,7 @@ void big_key_revoke(struct key *key)
247 247
248 /* clear the quota */ 248 /* clear the quota */
249 key_payload_reserve(key, 0); 249 key_payload_reserve(key, 0);
250 if (key_is_instantiated(key) && 250 if (key_is_positive(key) &&
251 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) 251 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
252 vfs_truncate(path, 0); 252 vfs_truncate(path, 0);
253} 253}
@@ -279,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
279 279
280 seq_puts(m, key->description); 280 seq_puts(m, key->description);
281 281
282 if (key_is_instantiated(key)) 282 if (key_is_positive(key))
283 seq_printf(m, ": %zu [%s]", 283 seq_printf(m, ": %zu [%s]",
284 datalen, 284 datalen,
285 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); 285 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 69855ba0d3b3..d92cbf9687c3 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
309 309
310 down_read(&ukey->sem); 310 down_read(&ukey->sem);
311 upayload = user_key_payload_locked(ukey); 311 upayload = user_key_payload_locked(ukey);
312 if (!upayload) {
313 /* key was revoked before we acquired its semaphore */
314 up_read(&ukey->sem);
315 key_put(ukey);
316 ukey = ERR_PTR(-EKEYREVOKED);
317 goto error;
318 }
312 *master_key = upayload->data; 319 *master_key = upayload->data;
313 *master_keylen = upayload->datalen; 320 *master_keylen = upayload->datalen;
314error: 321error:
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
847 size_t datalen = prep->datalen; 854 size_t datalen = prep->datalen;
848 int ret = 0; 855 int ret = 0;
849 856
850 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 857 if (key_is_negative(key))
851 return -ENOKEY; 858 return -ENOKEY;
852 if (datalen <= 0 || datalen > 32767 || !prep->data) 859 if (datalen <= 0 || datalen > 32767 || !prep->data)
853 return -EINVAL; 860 return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 87cb260e4890..f01d48cb3de1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
129 while (!list_empty(keys)) { 129 while (!list_empty(keys)) {
130 struct key *key = 130 struct key *key =
131 list_entry(keys->next, struct key, graveyard_link); 131 list_entry(keys->next, struct key, graveyard_link);
132 short state = key->state;
133
132 list_del(&key->graveyard_link); 134 list_del(&key->graveyard_link);
133 135
134 kdebug("- %u", key->serial); 136 kdebug("- %u", key->serial);
135 key_check(key); 137 key_check(key);
136 138
137 /* Throw away the key data if the key is instantiated */ 139 /* Throw away the key data if the key is instantiated */
138 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 140 if (state == KEY_IS_POSITIVE && key->type->destroy)
139 !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
140 key->type->destroy)
141 key->type->destroy(key); 141 key->type->destroy(key);
142 142
143 security_key_free(key); 143 security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
151 } 151 }
152 152
153 atomic_dec(&key->user->nkeys); 153 atomic_dec(&key->user->nkeys);
154 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 154 if (state != KEY_IS_UNINSTANTIATED)
155 atomic_dec(&key->user->nikeys); 155 atomic_dec(&key->user->nikeys);
156 156
157 key_user_put(key->user); 157 key_user_put(key->user);
diff --git a/security/keys/key.c b/security/keys/key.c
index eb914a838840..83bf4b4afd49 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -402,6 +402,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
402EXPORT_SYMBOL(key_payload_reserve); 402EXPORT_SYMBOL(key_payload_reserve);
403 403
404/* 404/*
405 * Change the key state to being instantiated.
406 */
407static void mark_key_instantiated(struct key *key, int reject_error)
408{
409 /* Commit the payload before setting the state; barrier versus
410 * key_read_state().
411 */
412 smp_store_release(&key->state,
413 (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
414}
415
416/*
405 * Instantiate a key and link it into the target keyring atomically. Must be 417 * Instantiate a key and link it into the target keyring atomically. Must be
406 * called with the target keyring's semaphore writelocked. The target key's 418 * called with the target keyring's semaphore writelocked. The target key's
407 * semaphore need not be locked as instantiation is serialised by 419 * semaphore need not be locked as instantiation is serialised by
@@ -424,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
424 mutex_lock(&key_construction_mutex); 436 mutex_lock(&key_construction_mutex);
425 437
426 /* can't instantiate twice */ 438 /* can't instantiate twice */
427 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 439 if (key->state == KEY_IS_UNINSTANTIATED) {
428 /* instantiate the key */ 440 /* instantiate the key */
429 ret = key->type->instantiate(key, prep); 441 ret = key->type->instantiate(key, prep);
430 442
431 if (ret == 0) { 443 if (ret == 0) {
432 /* mark the key as being instantiated */ 444 /* mark the key as being instantiated */
433 atomic_inc(&key->user->nikeys); 445 atomic_inc(&key->user->nikeys);
434 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 446 mark_key_instantiated(key, 0);
435 447
436 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 448 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
437 awaken = 1; 449 awaken = 1;
@@ -577,13 +589,10 @@ int key_reject_and_link(struct key *key,
577 mutex_lock(&key_construction_mutex); 589 mutex_lock(&key_construction_mutex);
578 590
579 /* can't instantiate twice */ 591 /* can't instantiate twice */
580 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 592 if (key->state == KEY_IS_UNINSTANTIATED) {
581 /* mark the key as being negatively instantiated */ 593 /* mark the key as being negatively instantiated */
582 atomic_inc(&key->user->nikeys); 594 atomic_inc(&key->user->nikeys);
583 key->reject_error = -error; 595 mark_key_instantiated(key, -error);
584 smp_wmb();
585 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
586 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
587 now = current_kernel_time(); 596 now = current_kernel_time();
588 key->expiry = now.tv_sec + timeout; 597 key->expiry = now.tv_sec + timeout;
589 key_schedule_gc(key->expiry + key_gc_delay); 598 key_schedule_gc(key->expiry + key_gc_delay);
@@ -752,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
752 761
753 ret = key->type->update(key, prep); 762 ret = key->type->update(key, prep);
754 if (ret == 0) 763 if (ret == 0)
755 /* updating a negative key instantiates it */ 764 /* Updating a negative key positively instantiates it */
756 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 765 mark_key_instantiated(key, 0);
757 766
758 up_write(&key->sem); 767 up_write(&key->sem);
759 768
@@ -936,6 +945,16 @@ error:
936 */ 945 */
937 __key_link_end(keyring, &index_key, edit); 946 __key_link_end(keyring, &index_key, edit);
938 947
948 key = key_ref_to_ptr(key_ref);
949 if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
950 ret = wait_for_key_construction(key, true);
951 if (ret < 0) {
952 key_ref_put(key_ref);
953 key_ref = ERR_PTR(ret);
954 goto error_free_prep;
955 }
956 }
957
939 key_ref = __key_update(key_ref, &prep); 958 key_ref = __key_update(key_ref, &prep);
940 goto error_free_prep; 959 goto error_free_prep;
941} 960}
@@ -986,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
986 1005
987 ret = key->type->update(key, &prep); 1006 ret = key->type->update(key, &prep);
988 if (ret == 0) 1007 if (ret == 0)
989 /* updating a negative key instantiates it */ 1008 /* Updating a negative key positively instantiates it */
990 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 1009 mark_key_instantiated(key, 0);
991 1010
992 up_write(&key->sem); 1011 up_write(&key->sem);
993 1012
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 365ff85d7e27..76d22f726ae4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -766,10 +766,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
766 766
767 key = key_ref_to_ptr(key_ref); 767 key = key_ref_to_ptr(key_ref);
768 768
769 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { 769 ret = key_read_state(key);
770 ret = -ENOKEY; 770 if (ret < 0)
771 goto error2; 771 goto error2; /* Negatively instantiated */
772 }
773 772
774 /* see if we can read it directly */ 773 /* see if we can read it directly */
775 ret = key_permission(key_ref, KEY_NEED_READ); 774 ret = key_permission(key_ref, KEY_NEED_READ);
@@ -901,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
901 atomic_dec(&key->user->nkeys); 900 atomic_dec(&key->user->nkeys);
902 atomic_inc(&newowner->nkeys); 901 atomic_inc(&newowner->nkeys);
903 902
904 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 903 if (key->state != KEY_IS_UNINSTANTIATED) {
905 atomic_dec(&key->user->nikeys); 904 atomic_dec(&key->user->nikeys);
906 atomic_inc(&newowner->nikeys); 905 atomic_inc(&newowner->nikeys);
907 } 906 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4fa82a8a9c0e..a7e51f793867 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
414 else 414 else
415 seq_puts(m, "[anon]"); 415 seq_puts(m, "[anon]");
416 416
417 if (key_is_instantiated(keyring)) { 417 if (key_is_positive(keyring)) {
418 if (keyring->keys.nr_leaves_on_tree != 0) 418 if (keyring->keys.nr_leaves_on_tree != 0)
419 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); 419 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
420 else 420 else
@@ -553,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
553{ 553{
554 struct keyring_search_context *ctx = iterator_data; 554 struct keyring_search_context *ctx = iterator_data;
555 const struct key *key = keyring_ptr_to_key(object); 555 const struct key *key = keyring_ptr_to_key(object);
556 unsigned long kflags = key->flags; 556 unsigned long kflags = READ_ONCE(key->flags);
557 short state = READ_ONCE(key->state);
557 558
558 kenter("{%d}", key->serial); 559 kenter("{%d}", key->serial);
559 560
@@ -565,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
565 566
566 /* skip invalidated, revoked and expired keys */ 567 /* skip invalidated, revoked and expired keys */
567 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 568 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
569 time_t expiry = READ_ONCE(key->expiry);
570
568 if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 571 if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
569 (1 << KEY_FLAG_REVOKED))) { 572 (1 << KEY_FLAG_REVOKED))) {
570 ctx->result = ERR_PTR(-EKEYREVOKED); 573 ctx->result = ERR_PTR(-EKEYREVOKED);
@@ -572,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
572 goto skipped; 575 goto skipped;
573 } 576 }
574 577
575 if (key->expiry && ctx->now.tv_sec >= key->expiry) { 578 if (expiry && ctx->now.tv_sec >= expiry) {
576 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) 579 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
577 ctx->result = ERR_PTR(-EKEYEXPIRED); 580 ctx->result = ERR_PTR(-EKEYEXPIRED);
578 kleave(" = %d [expire]", ctx->skipped_ret); 581 kleave(" = %d [expire]", ctx->skipped_ret);
@@ -597,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
597 600
598 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 601 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
599 /* we set a different error code if we pass a negative key */ 602 /* we set a different error code if we pass a negative key */
600 if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 603 if (state < 0) {
601 smp_rmb(); 604 ctx->result = ERR_PTR(state);
602 ctx->result = ERR_PTR(key->reject_error);
603 kleave(" = %d [neg]", ctx->skipped_ret); 605 kleave(" = %d [neg]", ctx->skipped_ret);
604 goto skipped; 606 goto skipped;
605 } 607 }
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 732cc0beffdf..a72b4dd70c8a 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission);
88 */ 88 */
89int key_validate(const struct key *key) 89int key_validate(const struct key *key)
90{ 90{
91 unsigned long flags = key->flags; 91 unsigned long flags = READ_ONCE(key->flags);
92 time_t expiry = READ_ONCE(key->expiry);
92 93
93 if (flags & (1 << KEY_FLAG_INVALIDATED)) 94 if (flags & (1 << KEY_FLAG_INVALIDATED))
94 return -ENOKEY; 95 return -ENOKEY;
@@ -99,9 +100,9 @@ int key_validate(const struct key *key)
99 return -EKEYREVOKED; 100 return -EKEYREVOKED;
100 101
101 /* check it hasn't expired */ 102 /* check it hasn't expired */
102 if (key->expiry) { 103 if (expiry) {
103 struct timespec now = current_kernel_time(); 104 struct timespec now = current_kernel_time();
104 if (now.tv_sec >= key->expiry) 105 if (now.tv_sec >= expiry)
105 return -EKEYEXPIRED; 106 return -EKEYEXPIRED;
106 } 107 }
107 108
diff --git a/security/keys/proc.c b/security/keys/proc.c
index de834309d100..6d1fcbba1e09 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -179,9 +179,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
179 struct rb_node *_p = v; 179 struct rb_node *_p = v;
180 struct key *key = rb_entry(_p, struct key, serial_node); 180 struct key *key = rb_entry(_p, struct key, serial_node);
181 struct timespec now; 181 struct timespec now;
182 time_t expiry;
182 unsigned long timo; 183 unsigned long timo;
184 unsigned long flags;
183 key_ref_t key_ref, skey_ref; 185 key_ref_t key_ref, skey_ref;
184 char xbuf[16]; 186 char xbuf[16];
187 short state;
185 int rc; 188 int rc;
186 189
187 struct keyring_search_context ctx = { 190 struct keyring_search_context ctx = {
@@ -217,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
217 rcu_read_lock(); 220 rcu_read_lock();
218 221
219 /* come up with a suitable timeout value */ 222 /* come up with a suitable timeout value */
220 if (key->expiry == 0) { 223 expiry = READ_ONCE(key->expiry);
224 if (expiry == 0) {
221 memcpy(xbuf, "perm", 5); 225 memcpy(xbuf, "perm", 5);
222 } else if (now.tv_sec >= key->expiry) { 226 } else if (now.tv_sec >= expiry) {
223 memcpy(xbuf, "expd", 5); 227 memcpy(xbuf, "expd", 5);
224 } else { 228 } else {
225 timo = key->expiry - now.tv_sec; 229 timo = expiry - now.tv_sec;
226 230
227 if (timo < 60) 231 if (timo < 60)
228 sprintf(xbuf, "%lus", timo); 232 sprintf(xbuf, "%lus", timo);
@@ -236,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
236 sprintf(xbuf, "%luw", timo / (60*60*24*7)); 240 sprintf(xbuf, "%luw", timo / (60*60*24*7));
237 } 241 }
238 242
239#define showflag(KEY, LETTER, FLAG) \ 243 state = key_read_state(key);
240 (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
241 244
245#define showflag(FLAGS, LETTER, FLAG) \
246 ((FLAGS & (1 << FLAG)) ? LETTER : '-')
247
248 flags = READ_ONCE(key->flags);
242 seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", 249 seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
243 key->serial, 250 key->serial,
244 showflag(key, 'I', KEY_FLAG_INSTANTIATED), 251 state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
245 showflag(key, 'R', KEY_FLAG_REVOKED), 252 showflag(flags, 'R', KEY_FLAG_REVOKED),
246 showflag(key, 'D', KEY_FLAG_DEAD), 253 showflag(flags, 'D', KEY_FLAG_DEAD),
247 showflag(key, 'Q', KEY_FLAG_IN_QUOTA), 254 showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
248 showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), 255 showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
249 showflag(key, 'N', KEY_FLAG_NEGATIVE), 256 state < 0 ? 'N' : '-',
250 showflag(key, 'i', KEY_FLAG_INVALIDATED), 257 showflag(flags, 'i', KEY_FLAG_INVALIDATED),
251 refcount_read(&key->usage), 258 refcount_read(&key->usage),
252 xbuf, 259 xbuf,
253 key->perm, 260 key->perm,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 293d3598153b..740affd65ee9 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -730,7 +730,7 @@ try_again:
730 730
731 ret = -EIO; 731 ret = -EIO;
732 if (!(lflags & KEY_LOOKUP_PARTIAL) && 732 if (!(lflags & KEY_LOOKUP_PARTIAL) &&
733 !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 733 key_read_state(key) == KEY_IS_UNINSTANTIATED)
734 goto invalid_key; 734 goto invalid_key;
735 735
736 /* check the permissions */ 736 /* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 63e63a42db3c..e8036cd0ad54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
595 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 595 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
596 if (ret) 596 if (ret)
597 return -ERESTARTSYS; 597 return -ERESTARTSYS;
598 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { 598 ret = key_read_state(key);
599 smp_rmb(); 599 if (ret < 0)
600 return key->reject_error; 600 return ret;
601 }
602 return key_validate(key); 601 return key_validate(key);
603} 602}
604EXPORT_SYMBOL(wait_for_key_construction); 603EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6ebf1af8fce9..424e1d90412e 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
73 73
74 seq_puts(m, "key:"); 74 seq_puts(m, "key:");
75 seq_puts(m, key->description); 75 seq_puts(m, key->description);
76 if (key_is_instantiated(key)) 76 if (key_is_positive(key))
77 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); 77 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
78} 78}
79 79
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ddfaebf60fc8..bd85315cbfeb 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1066 char *datablob; 1066 char *datablob;
1067 int ret = 0; 1067 int ret = 0;
1068 1068
1069 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 1069 if (key_is_negative(key))
1070 return -ENOKEY; 1070 return -ENOKEY;
1071 p = key->payload.data[0]; 1071 p = key->payload.data[0];
1072 if (!p->migratable) 1072 if (!p->migratable)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 3d8c68eba516..9f558bedba23 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
114 114
115 /* attach the new data, displacing the old */ 115 /* attach the new data, displacing the old */
116 key->expiry = prep->expiry; 116 key->expiry = prep->expiry;
117 if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 117 if (key_is_positive(key))
118 zap = dereference_key_locked(key); 118 zap = dereference_key_locked(key);
119 rcu_assign_keypointer(key, prep->payload.data[0]); 119 rcu_assign_keypointer(key, prep->payload.data[0]);
120 prep->payload.data[0] = NULL; 120 prep->payload.data[0] = NULL;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
162void user_describe(const struct key *key, struct seq_file *m) 162void user_describe(const struct key *key, struct seq_file *m)
163{ 163{
164 seq_puts(m, key->description); 164 seq_puts(m, key->description);
165 if (key_is_instantiated(key)) 165 if (key_is_positive(key))
166 seq_printf(m, ": %u", key->datalen); 166 seq_printf(m, ": %u", key->datalen);
167} 167}
168 168
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index ea2d0ae85bd3..6c9cba2166d9 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1259 struct snd_seq_port_info *info = arg; 1259 struct snd_seq_port_info *info = arg;
1260 struct snd_seq_client_port *port; 1260 struct snd_seq_client_port *port;
1261 struct snd_seq_port_callback *callback; 1261 struct snd_seq_port_callback *callback;
1262 int port_idx;
1262 1263
1263 /* it is not allowed to create the port for an another client */ 1264 /* it is not allowed to create the port for an another client */
1264 if (info->addr.client != client->number) 1265 if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1269 return -ENOMEM; 1270 return -ENOMEM;
1270 1271
1271 if (client->type == USER_CLIENT && info->kernel) { 1272 if (client->type == USER_CLIENT && info->kernel) {
1272 snd_seq_delete_port(client, port->addr.port); 1273 port_idx = port->addr.port;
1274 snd_seq_port_unlock(port);
1275 snd_seq_delete_port(client, port_idx);
1273 return -EINVAL; 1276 return -EINVAL;
1274 } 1277 }
1275 if (client->type == KERNEL_CLIENT) { 1278 if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1290 1293
1291 snd_seq_set_port_info(port, info); 1294 snd_seq_set_port_info(port, info);
1292 snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); 1295 snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
1296 snd_seq_port_unlock(port);
1293 1297
1294 return 0; 1298 return 0;
1295} 1299}
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 0ff7926a5a69..cda64b489e42 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
23#include <sound/core.h> 23#include <sound/core.h>
24#include "seq_lock.h" 24#include "seq_lock.h"
25 25
26#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
27
28/* wait until all locks are released */ 26/* wait until all locks are released */
29void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) 27void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
30{ 28{
@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
41 } 39 }
42} 40}
43EXPORT_SYMBOL(snd_use_lock_sync_helper); 41EXPORT_SYMBOL(snd_use_lock_sync_helper);
44
45#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc2c9ef..ac38031c370e 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5 5
6#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
7
8typedef atomic_t snd_use_lock_t; 6typedef atomic_t snd_use_lock_t;
9 7
10/* initialize lock */ 8/* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
20void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line); 18void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
21#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__) 19#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
22 20
23#else /* SMP || CONFIG_SND_DEBUG */
24
25typedef spinlock_t snd_use_lock_t; /* dummy */
26#define snd_use_lock_init(lockp) /**/
27#define snd_use_lock_use(lockp) /**/
28#define snd_use_lock_free(lockp) /**/
29#define snd_use_lock_sync(lockp) /**/
30
31#endif /* SMP || CONFIG_SND_DEBUG */
32
33#endif /* __SND_SEQ_LOCK_H */ 21#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 0a7020c82bfc..d21ece9f8d73 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
122} 122}
123 123
124 124
125/* create a port, port number is returned (-1 on failure) */ 125/* create a port, port number is returned (-1 on failure);
126 * the caller needs to unref the port via snd_seq_port_unlock() appropriately
127 */
126struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, 128struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
127 int port) 129 int port)
128{ 130{
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
151 snd_use_lock_init(&new_port->use_lock); 153 snd_use_lock_init(&new_port->use_lock);
152 port_subs_info_init(&new_port->c_src); 154 port_subs_info_init(&new_port->c_src);
153 port_subs_info_init(&new_port->c_dest); 155 port_subs_info_init(&new_port->c_dest);
156 snd_use_lock_use(&new_port->use_lock);
154 157
155 num = port >= 0 ? port : 0; 158 num = port >= 0 ? port : 0;
156 mutex_lock(&client->ports_mutex); 159 mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
165 list_add_tail(&new_port->list, &p->list); 168 list_add_tail(&new_port->list, &p->list);
166 client->num_ports++; 169 client->num_ports++;
167 new_port->addr.port = num; /* store the port number in the port */ 170 new_port->addr.port = num; /* store the port number in the port */
171 sprintf(new_port->name, "port-%d", num);
168 write_unlock_irqrestore(&client->ports_lock, flags); 172 write_unlock_irqrestore(&client->ports_lock, flags);
169 mutex_unlock(&client->ports_mutex); 173 mutex_unlock(&client->ports_mutex);
170 sprintf(new_port->name, "port-%d", num);
171 174
172 return new_port; 175 return new_port;
173} 176}
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 8d93a4021c78..f48a4cd24ffc 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
77 * decode input event and put to read buffer of each opened file 77 * decode input event and put to read buffer of each opened file
78 */ 78 */
79static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, 79static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
80 struct snd_seq_event *ev) 80 struct snd_seq_event *ev,
81 bool atomic)
81{ 82{
82 struct snd_virmidi *vmidi; 83 struct snd_virmidi *vmidi;
83 unsigned char msg[4]; 84 unsigned char msg[4];
84 int len; 85 int len;
85 86
86 read_lock(&rdev->filelist_lock); 87 if (atomic)
88 read_lock(&rdev->filelist_lock);
89 else
90 down_read(&rdev->filelist_sem);
87 list_for_each_entry(vmidi, &rdev->filelist, list) { 91 list_for_each_entry(vmidi, &rdev->filelist, list) {
88 if (!vmidi->trigger) 92 if (!vmidi->trigger)
89 continue; 93 continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
97 snd_rawmidi_receive(vmidi->substream, msg, len); 101 snd_rawmidi_receive(vmidi->substream, msg, len);
98 } 102 }
99 } 103 }
100 read_unlock(&rdev->filelist_lock); 104 if (atomic)
105 read_unlock(&rdev->filelist_lock);
106 else
107 up_read(&rdev->filelist_sem);
101 108
102 return 0; 109 return 0;
103} 110}
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
115 struct snd_virmidi_dev *rdev; 122 struct snd_virmidi_dev *rdev;
116 123
117 rdev = rmidi->private_data; 124 rdev = rmidi->private_data;
118 return snd_virmidi_dev_receive_event(rdev, ev); 125 return snd_virmidi_dev_receive_event(rdev, ev, true);
119} 126}
120#endif /* 0 */ 127#endif /* 0 */
121 128
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
130 rdev = private_data; 137 rdev = private_data;
131 if (!(rdev->flags & SNDRV_VIRMIDI_USE)) 138 if (!(rdev->flags & SNDRV_VIRMIDI_USE))
132 return 0; /* ignored */ 139 return 0; /* ignored */
133 return snd_virmidi_dev_receive_event(rdev, ev); 140 return snd_virmidi_dev_receive_event(rdev, ev, atomic);
134} 141}
135 142
136/* 143/*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
209 struct snd_virmidi_dev *rdev = substream->rmidi->private_data; 216 struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
210 struct snd_rawmidi_runtime *runtime = substream->runtime; 217 struct snd_rawmidi_runtime *runtime = substream->runtime;
211 struct snd_virmidi *vmidi; 218 struct snd_virmidi *vmidi;
212 unsigned long flags;
213 219
214 vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL); 220 vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
215 if (vmidi == NULL) 221 if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
223 vmidi->client = rdev->client; 229 vmidi->client = rdev->client;
224 vmidi->port = rdev->port; 230 vmidi->port = rdev->port;
225 runtime->private_data = vmidi; 231 runtime->private_data = vmidi;
226 write_lock_irqsave(&rdev->filelist_lock, flags); 232 down_write(&rdev->filelist_sem);
233 write_lock_irq(&rdev->filelist_lock);
227 list_add_tail(&vmidi->list, &rdev->filelist); 234 list_add_tail(&vmidi->list, &rdev->filelist);
228 write_unlock_irqrestore(&rdev->filelist_lock, flags); 235 write_unlock_irq(&rdev->filelist_lock);
236 up_write(&rdev->filelist_sem);
229 vmidi->rdev = rdev; 237 vmidi->rdev = rdev;
230 return 0; 238 return 0;
231} 239}
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
264 struct snd_virmidi_dev *rdev = substream->rmidi->private_data; 272 struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
265 struct snd_virmidi *vmidi = substream->runtime->private_data; 273 struct snd_virmidi *vmidi = substream->runtime->private_data;
266 274
275 down_write(&rdev->filelist_sem);
267 write_lock_irq(&rdev->filelist_lock); 276 write_lock_irq(&rdev->filelist_lock);
268 list_del(&vmidi->list); 277 list_del(&vmidi->list);
269 write_unlock_irq(&rdev->filelist_lock); 278 write_unlock_irq(&rdev->filelist_lock);
279 up_write(&rdev->filelist_sem);
270 snd_midi_event_free(vmidi->parser); 280 snd_midi_event_free(vmidi->parser);
271 substream->runtime->private_data = NULL; 281 substream->runtime->private_data = NULL;
272 kfree(vmidi); 282 kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
520 rdev->rmidi = rmidi; 530 rdev->rmidi = rmidi;
521 rdev->device = device; 531 rdev->device = device;
522 rdev->client = -1; 532 rdev->client = -1;
533 init_rwsem(&rdev->filelist_sem);
523 rwlock_init(&rdev->filelist_lock); 534 rwlock_init(&rdev->filelist_lock);
524 INIT_LIST_HEAD(&rdev->filelist); 535 INIT_LIST_HEAD(&rdev->filelist);
525 rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH; 536 rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f73a01..e43af18d4383 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
484 master->hook(master->hook_private_data, master->val); 484 master->hook(master->hook_private_data, master->val);
485} 485}
486EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster); 486EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
487
488/**
489 * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
490 * @kctl: vmaster kctl element
491 * @func: function to apply
492 * @arg: optional function argument
493 *
494 * Apply the function @func to each slave kctl of the given vmaster kctl.
495 * Returns 0 if successful, or a negative error code.
496 */
497int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
498 int (*func)(struct snd_kcontrol *, void *),
499 void *arg)
500{
501 struct link_master *master;
502 struct link_slave *slave;
503 int err;
504
505 master = snd_kcontrol_chip(kctl);
506 err = master_init(master);
507 if (err < 0)
508 return err;
509 list_for_each_entry(slave, &master->slaves, list) {
510 err = func(&slave->slave, arg);
511 if (err < 0)
512 return err;
513 }
514
515 return 0;
516}
517EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 978dc1801b3a..f6d2985b2520 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
284 dev_dbg(bus->dev, "HDA capability ID: 0x%x\n", 284 dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
285 (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF); 285 (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
286 286
287 if (cur_cap == -1) {
288 dev_dbg(bus->dev, "Invalid capability reg read\n");
289 break;
290 }
291
287 switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) { 292 switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
288 case AZX_ML_CAP_ID: 293 case AZX_ML_CAP_ID:
289 dev_dbg(bus->dev, "Found ML capability\n"); 294 dev_dbg(bus->dev, "Found ML capability\n");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3db26c451837..a0989d231fd0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
1803 return 1; 1803 return 1;
1804} 1804}
1805 1805
1806/* guess the value corresponding to 0dB */
1807static int get_kctl_0dB_offset(struct hda_codec *codec,
1808 struct snd_kcontrol *kctl, int *step_to_check)
1809{
1810 int _tlv[4];
1811 const int *tlv = NULL;
1812 int val = -1;
1813
1814 if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
1815 kctl->tlv.c == snd_hda_mixer_amp_tlv) {
1816 get_ctl_amp_tlv(kctl, _tlv);
1817 tlv = _tlv;
1818 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
1819 tlv = kctl->tlv.p;
1820 if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
1821 int step = tlv[3];
1822 step &= ~TLV_DB_SCALE_MUTE;
1823 if (!step)
1824 return -1;
1825 if (*step_to_check && *step_to_check != step) {
1826 codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
1827- *step_to_check, step);
1828 return -1;
1829 }
1830 *step_to_check = step;
1831 val = -tlv[2] / step;
1832 }
1833 return val;
1834}
1835
1836/* call kctl->put with the given value(s) */ 1806/* call kctl->put with the given value(s) */
1837static int put_kctl_with_value(struct snd_kcontrol *kctl, int val) 1807static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
1838{ 1808{
@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
1847 return 0; 1817 return 0;
1848} 1818}
1849 1819
1850/* initialize the slave volume with 0dB */ 1820struct slave_init_arg {
1851static int init_slave_0dB(struct hda_codec *codec, 1821 struct hda_codec *codec;
1852 void *data, struct snd_kcontrol *slave) 1822 int step;
1823};
1824
1825/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
1826static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
1853{ 1827{
1854 int offset = get_kctl_0dB_offset(codec, slave, data); 1828 struct slave_init_arg *arg = _arg;
1855 if (offset > 0) 1829 int _tlv[4];
1856 put_kctl_with_value(slave, offset); 1830 const int *tlv = NULL;
1831 int step;
1832 int val;
1833
1834 if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
1835 if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
1836 codec_err(arg->codec,
1837 "Unexpected TLV callback for slave %s:%d\n",
1838 kctl->id.name, kctl->id.index);
1839 return 0; /* ignore */
1840 }
1841 get_ctl_amp_tlv(kctl, _tlv);
1842 tlv = _tlv;
1843 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
1844 tlv = kctl->tlv.p;
1845
1846 if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
1847 return 0;
1848
1849 step = tlv[3];
1850 step &= ~TLV_DB_SCALE_MUTE;
1851 if (!step)
1852 return 0;
1853 if (arg->step && arg->step != step) {
1854 codec_err(arg->codec,
1855 "Mismatching dB step for vmaster slave (%d!=%d)\n",
1856 arg->step, step);
1857 return 0;
1858 }
1859
1860 arg->step = step;
1861 val = -tlv[2] / step;
1862 if (val > 0) {
1863 put_kctl_with_value(kctl, val);
1864 return val;
1865 }
1866
1857 return 0; 1867 return 0;
1858} 1868}
1859 1869
1860/* unmute the slave */ 1870/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
1861static int init_slave_unmute(struct hda_codec *codec, 1871static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
1862 void *data, struct snd_kcontrol *slave)
1863{ 1872{
1864 return put_kctl_with_value(slave, 1); 1873 return put_kctl_with_value(slave, 1);
1865} 1874}
@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
1919 /* init with master mute & zero volume */ 1928 /* init with master mute & zero volume */
1920 put_kctl_with_value(kctl, 0); 1929 put_kctl_with_value(kctl, 0);
1921 if (init_slave_vol) { 1930 if (init_slave_vol) {
1922 int step = 0; 1931 struct slave_init_arg arg = {
1923 map_slaves(codec, slaves, suffix, 1932 .codec = codec,
1924 tlv ? init_slave_0dB : init_slave_unmute, &step); 1933 .step = 0,
1934 };
1935 snd_ctl_apply_vmaster_slaves(kctl,
1936 tlv ? init_slave_0dB : init_slave_unmute,
1937 &arg);
1925 } 1938 }
1926 1939
1927 if (ctl_ret) 1940 if (ctl_ret)
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 0fb6b1b79261..d8409d9ae55b 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
469 469
470 err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); 470 err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
471 if (err) 471 if (err)
472 return err; 472 goto err_kill_urb;
473 473
474 if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) 474 if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
475 return -ENODEV; 475 err = -ENODEV;
476 goto err_kill_urb;
477 }
476 478
477 usb_string(usb_dev, usb_dev->descriptor.iManufacturer, 479 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
478 cdev->vendor_name, CAIAQ_USB_STR_LEN); 480 cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
507 509
508 setup_card(cdev); 510 setup_card(cdev);
509 return 0; 511 return 0;
512
513 err_kill_urb:
514 usb_kill_urb(&cdev->ep1_in_urb);
515 return err;
510} 516}
511 517
512static int snd_probe(struct usb_interface *intf, 518static int snd_probe(struct usb_interface *intf,
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 0ff5a7d2e19f..c8f723c3a033 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
779 return 0; 779 return 0;
780 780
781 error: 781 error:
782 if (line6->disconnect) 782 /* we can call disconnect callback here because no close-sync is
783 line6->disconnect(line6); 783 * needed yet at this point
784 snd_card_free(card); 784 */
785 line6_disconnect(interface);
785 return ret; 786 return ret;
786} 787}
787EXPORT_SYMBOL_GPL(line6_probe); 788EXPORT_SYMBOL_GPL(line6_probe);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 956f847a96e4..451007c27743 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
301 301
302 intf = usb_ifnum_to_if(line6->usbdev, 302 intf = usb_ifnum_to_if(line6->usbdev,
303 pod->line6.properties->ctrl_if); 303 pod->line6.properties->ctrl_if);
304 usb_driver_release_interface(&podhd_driver, intf); 304 if (intf)
305 usb_driver_release_interface(&podhd_driver, intf);
305 } 306 }
306} 307}
307 308
@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
317 318
318 line6->disconnect = podhd_disconnect; 319 line6->disconnect = podhd_disconnect;
319 320
321 init_timer(&pod->startup_timer);
322 INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
323
320 if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) { 324 if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
321 /* claim the data interface */ 325 /* claim the data interface */
322 intf = usb_ifnum_to_if(line6->usbdev, 326 intf = usb_ifnum_to_if(line6->usbdev,
@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
358 } 362 }
359 363
360 /* init device and delay registering */ 364 /* init device and delay registering */
361 init_timer(&pod->startup_timer);
362 INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
363 podhd_startup(pod); 365 podhd_startup(pod);
364 return 0; 366 return 0;
365} 367}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9732edf77f86..91bc8f18791e 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
2234 2234
2235static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) 2235static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
2236{ 2236{
2237 /* kill pending URBs */
2238 snd_usb_mixer_disconnect(mixer);
2239
2237 kfree(mixer->id_elems); 2240 kfree(mixer->id_elems);
2238 if (mixer->urb) { 2241 if (mixer->urb) {
2239 kfree(mixer->urb->transfer_buffer); 2242 kfree(mixer->urb->transfer_buffer);
@@ -2584,8 +2587,13 @@ _error:
2584 2587
2585void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) 2588void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
2586{ 2589{
2587 usb_kill_urb(mixer->urb); 2590 if (mixer->disconnected)
2588 usb_kill_urb(mixer->rc_urb); 2591 return;
2592 if (mixer->urb)
2593 usb_kill_urb(mixer->urb);
2594 if (mixer->rc_urb)
2595 usb_kill_urb(mixer->rc_urb);
2596 mixer->disconnected = true;
2589} 2597}
2590 2598
2591#ifdef CONFIG_PM 2599#ifdef CONFIG_PM
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 2b4b067646ab..545d99b09706 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
22 struct urb *rc_urb; 22 struct urb *rc_urb;
23 struct usb_ctrlrequest *rc_setup_packet; 23 struct usb_ctrlrequest *rc_setup_packet;
24 u8 rc_buffer[6]; 24 u8 rc_buffer[6];
25
26 bool disconnected;
25}; 27};
26 28
27#define MAX_CHANNELS 16 /* max logical channels */ 29#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index b8cb57aeec77..4f5f18f22974 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1138,6 +1138,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1138 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ 1138 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
1139 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ 1139 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
1140 case USB_ID(0x047F, 0xC022): /* Plantronics C310 */ 1140 case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
1141 case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
1141 case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */ 1142 case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
1142 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1143 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1143 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ 1144 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
@@ -1353,6 +1354,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1353 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */ 1354 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
1354 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */ 1355 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
1355 case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */ 1356 case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
1357 case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
1356 if (fp->altsetting == 2) 1358 if (fp->altsetting == 2)
1357 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1359 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1358 break; 1360 break;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index fa93033dc521..850a5497dcc3 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -366,7 +366,7 @@ union bpf_attr {
366 * jump into another BPF program 366 * jump into another BPF program
367 * @ctx: context pointer passed to next program 367 * @ctx: context pointer passed to next program
368 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 368 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
369 * @index: index inside array that selects specific program to run 369 * @index: 32-bit index inside array that selects specific program to run
370 * Return: 0 on success or negative error 370 * Return: 0 on success or negative error
371 * 371 *
372 * int bpf_clone_redirect(skb, ifindex, flags) 372 * int bpf_clone_redirect(skb, ifindex, flags)
@@ -623,9 +623,10 @@ union bpf_attr {
623 * @flags: reserved for future use 623 * @flags: reserved for future use
624 * Return: 0 on success or negative error code 624 * Return: 0 on success or negative error code
625 * 625 *
626 * int bpf_sk_redirect_map(map, key, flags) 626 * int bpf_sk_redirect_map(skb, map, key, flags)
627 * Redirect skb to a sock in map using key as a lookup key for the 627 * Redirect skb to a sock in map using key as a lookup key for the
628 * sock in map. 628 * sock in map.
629 * @skb: pointer to skb
629 * @map: pointer to sockmap 630 * @map: pointer to sockmap
630 * @key: key to lookup sock in map 631 * @key: key to lookup sock in map
631 * @flags: reserved for future use 632 * @flags: reserved for future use
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 3d4c3b5e1868..0c977b6e0f8b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample,
586 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); 586 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
587 } 587 }
588 588
589 printf("0x%"PRIx64, from); 589 printf(" 0x%"PRIx64, from);
590 if (PRINT_FIELD(DSO)) { 590 if (PRINT_FIELD(DSO)) {
591 printf("("); 591 printf("(");
592 map__fprintf_dsoname(alf.map, stdout); 592 map__fprintf_dsoname(alf.map, stdout);
@@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample,
681 if (alt.map && !alt.map->dso->adjust_symbols) 681 if (alt.map && !alt.map->dso->adjust_symbols)
682 to = map__map_ip(alt.map, to); 682 to = map__map_ip(alt.map, to);
683 683
684 printf("0x%"PRIx64, from); 684 printf(" 0x%"PRIx64, from);
685 if (PRINT_FIELD(DSO)) { 685 if (PRINT_FIELD(DSO)) {
686 printf("("); 686 printf("(");
687 map__fprintf_dsoname(alf.map, stdout); 687 map__fprintf_dsoname(alf.map, stdout);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index be09d77cade0..a971caf3759d 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -685,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
685{ 685{
686 struct symbol *sym = node->sym; 686 struct symbol *sym = node->sym;
687 u64 left, right; 687 u64 left, right;
688 struct dso *left_dso = NULL;
689 struct dso *right_dso = NULL;
688 690
689 if (callchain_param.key == CCKEY_SRCLINE) { 691 if (callchain_param.key == CCKEY_SRCLINE) {
690 enum match_result match = match_chain_srcline(node, cnode); 692 enum match_result match = match_chain_srcline(node, cnode);
@@ -696,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
696 if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { 698 if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
697 left = cnode->ms.sym->start; 699 left = cnode->ms.sym->start;
698 right = sym->start; 700 right = sym->start;
701 left_dso = cnode->ms.map->dso;
702 right_dso = node->map->dso;
699 } else { 703 } else {
700 left = cnode->ip; 704 left = cnode->ip;
701 right = node->ip; 705 right = node->ip;
702 } 706 }
703 707
704 if (left == right) { 708 if (left == right && left_dso == right_dso) {
705 if (node->branch) { 709 if (node->branch) {
706 cnode->branch_count++; 710 cnode->branch_count++;
707 711
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f6257fb4f08c..39b15968eab1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
309static struct perf_evsel * 309static struct perf_evsel *
310__add_event(struct list_head *list, int *idx, 310__add_event(struct list_head *list, int *idx,
311 struct perf_event_attr *attr, 311 struct perf_event_attr *attr,
312 char *name, struct cpu_map *cpus, 312 char *name, struct perf_pmu *pmu,
313 struct list_head *config_terms, bool auto_merge_stats) 313 struct list_head *config_terms, bool auto_merge_stats)
314{ 314{
315 struct perf_evsel *evsel; 315 struct perf_evsel *evsel;
316 struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
316 317
317 event_attr_init(attr); 318 event_attr_init(attr);
318 319
@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
323 (*idx)++; 324 (*idx)++;
324 evsel->cpus = cpu_map__get(cpus); 325 evsel->cpus = cpu_map__get(cpus);
325 evsel->own_cpus = cpu_map__get(cpus); 326 evsel->own_cpus = cpu_map__get(cpus);
326 evsel->system_wide = !!cpus; 327 evsel->system_wide = pmu ? pmu->is_uncore : false;
327 evsel->auto_merge_stats = auto_merge_stats; 328 evsel->auto_merge_stats = auto_merge_stats;
328 329
329 if (name) 330 if (name)
@@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
1233 1234
1234 if (!head_config) { 1235 if (!head_config) {
1235 attr.type = pmu->type; 1236 attr.type = pmu->type;
1236 evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats); 1237 evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
1237 return evsel ? 0 : -ENOMEM; 1238 return evsel ? 0 : -ENOMEM;
1238 } 1239 }
1239 1240
@@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
1254 return -EINVAL; 1255 return -EINVAL;
1255 1256
1256 evsel = __add_event(list, &parse_state->idx, &attr, 1257 evsel = __add_event(list, &parse_state->idx, &attr,
1257 get_config_name(head_config), pmu->cpus, 1258 get_config_name(head_config), pmu,
1258 &config_terms, auto_merge_stats); 1259 &config_terms, auto_merge_stats);
1259 if (evsel) { 1260 if (evsel) {
1260 evsel->unit = info.unit; 1261 evsel->unit = info.unit;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index ac16a9db1fb5..1c4d7b4e4fb5 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -470,17 +470,36 @@ static void pmu_read_sysfs(void)
470 closedir(dir); 470 closedir(dir);
471} 471}
472 472
473static struct cpu_map *__pmu_cpumask(const char *path)
474{
475 FILE *file;
476 struct cpu_map *cpus;
477
478 file = fopen(path, "r");
479 if (!file)
480 return NULL;
481
482 cpus = cpu_map__read(file);
483 fclose(file);
484 return cpus;
485}
486
487/*
488 * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
489 * may have a "cpus" file.
490 */
491#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
492#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
493
473static struct cpu_map *pmu_cpumask(const char *name) 494static struct cpu_map *pmu_cpumask(const char *name)
474{ 495{
475 struct stat st;
476 char path[PATH_MAX]; 496 char path[PATH_MAX];
477 FILE *file;
478 struct cpu_map *cpus; 497 struct cpu_map *cpus;
479 const char *sysfs = sysfs__mountpoint(); 498 const char *sysfs = sysfs__mountpoint();
480 const char *templates[] = { 499 const char *templates[] = {
481 "%s/bus/event_source/devices/%s/cpumask", 500 CPUS_TEMPLATE_UNCORE,
482 "%s/bus/event_source/devices/%s/cpus", 501 CPUS_TEMPLATE_CPU,
483 NULL 502 NULL
484 }; 503 };
485 const char **template; 504 const char **template;
486 505
@@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name)
489 508
490 for (template = templates; *template; template++) { 509 for (template = templates; *template; template++) {
491 snprintf(path, PATH_MAX, *template, sysfs, name); 510 snprintf(path, PATH_MAX, *template, sysfs, name);
492 if (stat(path, &st) == 0) 511 cpus = __pmu_cpumask(path);
493 break; 512 if (cpus)
513 return cpus;
494 } 514 }
495 515
496 if (!*template) 516 return NULL;
497 return NULL; 517}
498 518
499 file = fopen(path, "r"); 519static bool pmu_is_uncore(const char *name)
500 if (!file) 520{
501 return NULL; 521 char path[PATH_MAX];
522 struct cpu_map *cpus;
523 const char *sysfs = sysfs__mountpoint();
502 524
503 cpus = cpu_map__read(file); 525 snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
504 fclose(file); 526 cpus = __pmu_cpumask(path);
505 return cpus; 527 cpu_map__put(cpus);
528
529 return !!cpus;
506} 530}
507 531
508/* 532/*
@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
617 641
618 pmu->cpus = pmu_cpumask(name); 642 pmu->cpus = pmu_cpumask(name);
619 643
644 pmu->is_uncore = pmu_is_uncore(name);
645
620 INIT_LIST_HEAD(&pmu->format); 646 INIT_LIST_HEAD(&pmu->format);
621 INIT_LIST_HEAD(&pmu->aliases); 647 INIT_LIST_HEAD(&pmu->aliases);
622 list_splice(&format, &pmu->format); 648 list_splice(&format, &pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 389e9729331f..fe0de0502ce2 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -22,6 +22,7 @@ struct perf_pmu {
22 char *name; 22 char *name;
23 __u32 type; 23 __u32 type;
24 bool selectable; 24 bool selectable;
25 bool is_uncore;
25 struct perf_event_attr *default_config; 26 struct perf_event_attr *default_config;
26 struct cpu_map *cpus; 27 struct cpu_map *cpus;
27 struct list_head format; /* HEAD struct perf_pmu_format -> list */ 28 struct list_head format; /* HEAD struct perf_pmu_format -> list */
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0dafba2c1e7d..bd9c6b31a504 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
92unsigned int crystal_hz; 92unsigned int crystal_hz;
93unsigned long long tsc_hz; 93unsigned long long tsc_hz;
94int base_cpu; 94int base_cpu;
95int do_migrate;
96double discover_bclk(unsigned int family, unsigned int model); 95double discover_bclk(unsigned int family, unsigned int model);
97unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ 96unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
98 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ 97 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
303 302
304int cpu_migrate(int cpu) 303int cpu_migrate(int cpu)
305{ 304{
306 if (!do_migrate)
307 return 0;
308
309 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 305 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
310 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); 306 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
311 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) 307 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
5007 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help 5003 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help
5008 {"Joules", no_argument, 0, 'J'}, 5004 {"Joules", no_argument, 0, 'J'},
5009 {"list", no_argument, 0, 'l'}, 5005 {"list", no_argument, 0, 'l'},
5010 {"migrate", no_argument, 0, 'm'},
5011 {"out", required_argument, 0, 'o'}, 5006 {"out", required_argument, 0, 'o'},
5012 {"quiet", no_argument, 0, 'q'}, 5007 {"quiet", no_argument, 0, 'q'},
5013 {"show", required_argument, 0, 's'}, 5008 {"show", required_argument, 0, 's'},
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
5019 5014
5020 progname = argv[0]; 5015 progname = argv[0];
5021 5016
5022 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v", 5017 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
5023 long_options, &option_index)) != -1) { 5018 long_options, &option_index)) != -1) {
5024 switch (opt) { 5019 switch (opt) {
5025 case 'a': 5020 case 'a':
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
5062 list_header_only++; 5057 list_header_only++;
5063 quiet++; 5058 quiet++;
5064 break; 5059 break;
5065 case 'm':
5066 do_migrate = 1;
5067 break;
5068 case 'o': 5060 case 'o':
5069 outf = fopen_or_die(optarg, "w"); 5061 outf = fopen_or_die(optarg, "w");
5070 break; 5062 break;
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 609514f74482..abfa4c5c8527 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -70,7 +70,7 @@ static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
70static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval, 70static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
71 int optlen) = 71 int optlen) =
72 (void *) BPF_FUNC_getsockopt; 72 (void *) BPF_FUNC_getsockopt;
73static int (*bpf_sk_redirect_map)(void *map, int key, int flags) = 73static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
74 (void *) BPF_FUNC_sk_redirect_map; 74 (void *) BPF_FUNC_sk_redirect_map;
75static int (*bpf_sock_map_update)(void *map, void *key, void *value, 75static int (*bpf_sock_map_update)(void *map, void *key, void *value,
76 unsigned long long flags) = 76 unsigned long long flags) =
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
index 9b99bd10807d..2cd2d552938b 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb)
61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk); 61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
62 62
63 if (!map) 63 if (!map)
64 return bpf_sk_redirect_map(&sock_map_rx, sk, 0); 64 return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
65 return bpf_sk_redirect_map(&sock_map_tx, sk, 0); 65 return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
66} 66}
67 67
68char _license[] SEC("license") = "GPL"; 68char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 896f23cfe918..057da0cba517 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data)
466 int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc; 466 int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
467 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; 467 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
468 int ports[] = {50200, 50201, 50202, 50204}; 468 int ports[] = {50200, 50201, 50202, 50204};
469 int err, i, fd, sfd[6] = {0xdeadbeef}; 469 int err, i, fd, udp, sfd[6] = {0xdeadbeef};
470 u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; 470 u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
471 int parse_prog, verdict_prog; 471 int parse_prog, verdict_prog;
472 struct sockaddr_in addr; 472 struct sockaddr_in addr;
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data)
548 goto out_sockmap; 548 goto out_sockmap;
549 } 549 }
550 550
551 /* Test update with unsupported UDP socket */
552 udp = socket(AF_INET, SOCK_DGRAM, 0);
553 i = 0;
554 err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
555 if (!err) {
556 printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
557 i, udp);
558 goto out_sockmap;
559 }
560
551 /* Test update without programs */ 561 /* Test update without programs */
552 for (i = 0; i < 6; i++) { 562 for (i = 0; i < 6; i++) {
553 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); 563 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index cc91d0159f43..1b93941bdfea 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = {
1130 .errstr = "invalid bpf_context access", 1130 .errstr = "invalid bpf_context access",
1131 }, 1131 },
1132 { 1132 {
1133 "check skb->mark is writeable by SK_SKB", 1133 "invalid access of skb->mark for SK_SKB",
1134 .insns = {
1135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1136 offsetof(struct __sk_buff, mark)),
1137 BPF_EXIT_INSN(),
1138 },
1139 .result = REJECT,
1140 .prog_type = BPF_PROG_TYPE_SK_SKB,
1141 .errstr = "invalid bpf_context access",
1142 },
1143 {
1144 "check skb->mark is not writeable by SK_SKB",
1134 .insns = { 1145 .insns = {
1135 BPF_MOV64_IMM(BPF_REG_0, 0), 1146 BPF_MOV64_IMM(BPF_REG_0, 0),
1136 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1147 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1137 offsetof(struct __sk_buff, mark)), 1148 offsetof(struct __sk_buff, mark)),
1138 BPF_EXIT_INSN(), 1149 BPF_EXIT_INSN(),
1139 }, 1150 },
1140 .result = ACCEPT, 1151 .result = REJECT,
1141 .prog_type = BPF_PROG_TYPE_SK_SKB, 1152 .prog_type = BPF_PROG_TYPE_SK_SKB,
1153 .errstr = "invalid bpf_context access",
1142 }, 1154 },
1143 { 1155 {
1144 "check skb->tc_index is writeable by SK_SKB", 1156 "check skb->tc_index is writeable by SK_SKB",
@@ -6893,6 +6905,351 @@ static struct bpf_test tests[] = {
6893 .prog_type = BPF_PROG_TYPE_XDP, 6905 .prog_type = BPF_PROG_TYPE_XDP,
6894 }, 6906 },
6895 { 6907 {
6908 "arithmetic ops make PTR_TO_CTX unusable",
6909 .insns = {
6910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6911 offsetof(struct __sk_buff, data) -
6912 offsetof(struct __sk_buff, mark)),
6913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6914 offsetof(struct __sk_buff, mark)),
6915 BPF_EXIT_INSN(),
6916 },
6917 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
6918 .result = REJECT,
6919 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6920 },
6921 {
6922 "XDP pkt read, pkt_end mangling, bad access 1",
6923 .insns = {
6924 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6925 offsetof(struct xdp_md, data)),
6926 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6927 offsetof(struct xdp_md, data_end)),
6928 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6929 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
6931 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6932 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6933 BPF_MOV64_IMM(BPF_REG_0, 0),
6934 BPF_EXIT_INSN(),
6935 },
6936 .errstr = "R1 offset is outside of the packet",
6937 .result = REJECT,
6938 .prog_type = BPF_PROG_TYPE_XDP,
6939 },
6940 {
6941 "XDP pkt read, pkt_end mangling, bad access 2",
6942 .insns = {
6943 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6944 offsetof(struct xdp_md, data)),
6945 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6946 offsetof(struct xdp_md, data_end)),
6947 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6949 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
6950 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6951 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6952 BPF_MOV64_IMM(BPF_REG_0, 0),
6953 BPF_EXIT_INSN(),
6954 },
6955 .errstr = "R1 offset is outside of the packet",
6956 .result = REJECT,
6957 .prog_type = BPF_PROG_TYPE_XDP,
6958 },
6959 {
6960 "XDP pkt read, pkt_data' > pkt_end, good access",
6961 .insns = {
6962 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6963 offsetof(struct xdp_md, data)),
6964 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6965 offsetof(struct xdp_md, data_end)),
6966 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6968 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6969 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6970 BPF_MOV64_IMM(BPF_REG_0, 0),
6971 BPF_EXIT_INSN(),
6972 },
6973 .result = ACCEPT,
6974 .prog_type = BPF_PROG_TYPE_XDP,
6975 },
6976 {
6977 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
6978 .insns = {
6979 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6980 offsetof(struct xdp_md, data)),
6981 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6982 offsetof(struct xdp_md, data_end)),
6983 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6984 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6985 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6986 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6987 BPF_MOV64_IMM(BPF_REG_0, 0),
6988 BPF_EXIT_INSN(),
6989 },
6990 .errstr = "R1 offset is outside of the packet",
6991 .result = REJECT,
6992 .prog_type = BPF_PROG_TYPE_XDP,
6993 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6994 },
6995 {
6996 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
6997 .insns = {
6998 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6999 offsetof(struct xdp_md, data)),
7000 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7001 offsetof(struct xdp_md, data_end)),
7002 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7004 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7005 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7006 BPF_MOV64_IMM(BPF_REG_0, 0),
7007 BPF_EXIT_INSN(),
7008 },
7009 .errstr = "R1 offset is outside of the packet",
7010 .result = REJECT,
7011 .prog_type = BPF_PROG_TYPE_XDP,
7012 },
7013 {
7014 "XDP pkt read, pkt_end > pkt_data', good access",
7015 .insns = {
7016 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7017 offsetof(struct xdp_md, data)),
7018 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7019 offsetof(struct xdp_md, data_end)),
7020 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7021 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7022 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7023 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7024 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7025 BPF_MOV64_IMM(BPF_REG_0, 0),
7026 BPF_EXIT_INSN(),
7027 },
7028 .result = ACCEPT,
7029 .prog_type = BPF_PROG_TYPE_XDP,
7030 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7031 },
7032 {
7033 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7034 .insns = {
7035 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7036 offsetof(struct xdp_md, data)),
7037 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7038 offsetof(struct xdp_md, data_end)),
7039 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7040 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7041 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7042 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7043 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7044 BPF_MOV64_IMM(BPF_REG_0, 0),
7045 BPF_EXIT_INSN(),
7046 },
7047 .errstr = "R1 offset is outside of the packet",
7048 .result = REJECT,
7049 .prog_type = BPF_PROG_TYPE_XDP,
7050 },
7051 {
7052 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7053 .insns = {
7054 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7055 offsetof(struct xdp_md, data)),
7056 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7057 offsetof(struct xdp_md, data_end)),
7058 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7059 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7060 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7061 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7062 BPF_MOV64_IMM(BPF_REG_0, 0),
7063 BPF_EXIT_INSN(),
7064 },
7065 .errstr = "R1 offset is outside of the packet",
7066 .result = REJECT,
7067 .prog_type = BPF_PROG_TYPE_XDP,
7068 },
7069 {
7070 "XDP pkt read, pkt_data' < pkt_end, good access",
7071 .insns = {
7072 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7073 offsetof(struct xdp_md, data)),
7074 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7075 offsetof(struct xdp_md, data_end)),
7076 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7078 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7079 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7080 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7081 BPF_MOV64_IMM(BPF_REG_0, 0),
7082 BPF_EXIT_INSN(),
7083 },
7084 .result = ACCEPT,
7085 .prog_type = BPF_PROG_TYPE_XDP,
7086 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7087 },
7088 {
7089 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7090 .insns = {
7091 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7092 offsetof(struct xdp_md, data)),
7093 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7094 offsetof(struct xdp_md, data_end)),
7095 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7097 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7098 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7099 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7100 BPF_MOV64_IMM(BPF_REG_0, 0),
7101 BPF_EXIT_INSN(),
7102 },
7103 .errstr = "R1 offset is outside of the packet",
7104 .result = REJECT,
7105 .prog_type = BPF_PROG_TYPE_XDP,
7106 },
7107 {
7108 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7109 .insns = {
7110 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7111 offsetof(struct xdp_md, data)),
7112 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7113 offsetof(struct xdp_md, data_end)),
7114 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7116 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7117 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7118 BPF_MOV64_IMM(BPF_REG_0, 0),
7119 BPF_EXIT_INSN(),
7120 },
7121 .errstr = "R1 offset is outside of the packet",
7122 .result = REJECT,
7123 .prog_type = BPF_PROG_TYPE_XDP,
7124 },
7125 {
7126 "XDP pkt read, pkt_end < pkt_data', good access",
7127 .insns = {
7128 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7129 offsetof(struct xdp_md, data)),
7130 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7131 offsetof(struct xdp_md, data_end)),
7132 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7134 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7135 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7136 BPF_MOV64_IMM(BPF_REG_0, 0),
7137 BPF_EXIT_INSN(),
7138 },
7139 .result = ACCEPT,
7140 .prog_type = BPF_PROG_TYPE_XDP,
7141 },
7142 {
7143 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7144 .insns = {
7145 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7146 offsetof(struct xdp_md, data)),
7147 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7148 offsetof(struct xdp_md, data_end)),
7149 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7150 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7151 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7152 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7153 BPF_MOV64_IMM(BPF_REG_0, 0),
7154 BPF_EXIT_INSN(),
7155 },
7156 .errstr = "R1 offset is outside of the packet",
7157 .result = REJECT,
7158 .prog_type = BPF_PROG_TYPE_XDP,
7159 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7160 },
7161 {
7162 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7163 .insns = {
7164 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7165 offsetof(struct xdp_md, data)),
7166 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7167 offsetof(struct xdp_md, data_end)),
7168 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7170 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7171 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7172 BPF_MOV64_IMM(BPF_REG_0, 0),
7173 BPF_EXIT_INSN(),
7174 },
7175 .errstr = "R1 offset is outside of the packet",
7176 .result = REJECT,
7177 .prog_type = BPF_PROG_TYPE_XDP,
7178 },
7179 {
7180 "XDP pkt read, pkt_data' >= pkt_end, good access",
7181 .insns = {
7182 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7183 offsetof(struct xdp_md, data)),
7184 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7185 offsetof(struct xdp_md, data_end)),
7186 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7187 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7188 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7189 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7190 BPF_MOV64_IMM(BPF_REG_0, 0),
7191 BPF_EXIT_INSN(),
7192 },
7193 .result = ACCEPT,
7194 .prog_type = BPF_PROG_TYPE_XDP,
7195 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7196 },
7197 {
7198 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7199 .insns = {
7200 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7201 offsetof(struct xdp_md, data)),
7202 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7203 offsetof(struct xdp_md, data_end)),
7204 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7206 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7207 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7208 BPF_MOV64_IMM(BPF_REG_0, 0),
7209 BPF_EXIT_INSN(),
7210 },
7211 .errstr = "R1 offset is outside of the packet",
7212 .result = REJECT,
7213 .prog_type = BPF_PROG_TYPE_XDP,
7214 },
7215 {
7216 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
7217 .insns = {
7218 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7219 offsetof(struct xdp_md, data)),
7220 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7221 offsetof(struct xdp_md, data_end)),
7222 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7223 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7224 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
7225 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7226 BPF_MOV64_IMM(BPF_REG_0, 0),
7227 BPF_EXIT_INSN(),
7228 },
7229 .errstr = "R1 offset is outside of the packet",
7230 .result = REJECT,
7231 .prog_type = BPF_PROG_TYPE_XDP,
7232 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7233 },
7234 {
7235 "XDP pkt read, pkt_end >= pkt_data', good access",
7236 .insns = {
7237 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7238 offsetof(struct xdp_md, data)),
7239 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7240 offsetof(struct xdp_md, data_end)),
7241 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7243 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7244 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7245 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7246 BPF_MOV64_IMM(BPF_REG_0, 0),
7247 BPF_EXIT_INSN(),
7248 },
7249 .result = ACCEPT,
7250 .prog_type = BPF_PROG_TYPE_XDP,
7251 },
7252 {
6896 "bpf_exit with invalid return code. test1", 7253 "bpf_exit with invalid return code. test1",
6897 .insns = { 7254 .insns = {
6898 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 7255 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
@@ -6964,6 +7321,155 @@ static struct bpf_test tests[] = {
6964 .result = REJECT, 7321 .result = REJECT,
6965 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, 7322 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
6966 }, 7323 },
7324 {
7325 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7326 .insns = {
7327 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7328 offsetof(struct xdp_md, data)),
7329 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7330 offsetof(struct xdp_md, data_end)),
7331 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7332 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7333 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7334 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7335 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7336 BPF_MOV64_IMM(BPF_REG_0, 0),
7337 BPF_EXIT_INSN(),
7338 },
7339 .errstr = "R1 offset is outside of the packet",
7340 .result = REJECT,
7341 .prog_type = BPF_PROG_TYPE_XDP,
7342 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7343 },
7344 {
7345 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7346 .insns = {
7347 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7348 offsetof(struct xdp_md, data)),
7349 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7350 offsetof(struct xdp_md, data_end)),
7351 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7353 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7354 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7355 BPF_MOV64_IMM(BPF_REG_0, 0),
7356 BPF_EXIT_INSN(),
7357 },
7358 .errstr = "R1 offset is outside of the packet",
7359 .result = REJECT,
7360 .prog_type = BPF_PROG_TYPE_XDP,
7361 },
7362 {
7363 "XDP pkt read, pkt_data' <= pkt_end, good access",
7364 .insns = {
7365 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7366 offsetof(struct xdp_md, data)),
7367 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7368 offsetof(struct xdp_md, data_end)),
7369 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7370 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7371 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7372 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7373 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7374 BPF_MOV64_IMM(BPF_REG_0, 0),
7375 BPF_EXIT_INSN(),
7376 },
7377 .result = ACCEPT,
7378 .prog_type = BPF_PROG_TYPE_XDP,
7379 },
7380 {
7381 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7382 .insns = {
7383 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7384 offsetof(struct xdp_md, data)),
7385 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7386 offsetof(struct xdp_md, data_end)),
7387 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7389 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7390 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7391 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7392 BPF_MOV64_IMM(BPF_REG_0, 0),
7393 BPF_EXIT_INSN(),
7394 },
7395 .errstr = "R1 offset is outside of the packet",
7396 .result = REJECT,
7397 .prog_type = BPF_PROG_TYPE_XDP,
7398 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7399 },
7400 {
7401 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7402 .insns = {
7403 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7404 offsetof(struct xdp_md, data)),
7405 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7406 offsetof(struct xdp_md, data_end)),
7407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7408 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7409 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7410 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7411 BPF_MOV64_IMM(BPF_REG_0, 0),
7412 BPF_EXIT_INSN(),
7413 },
7414 .errstr = "R1 offset is outside of the packet",
7415 .result = REJECT,
7416 .prog_type = BPF_PROG_TYPE_XDP,
7417 },
7418 {
7419 "XDP pkt read, pkt_end <= pkt_data', good access",
7420 .insns = {
7421 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7422 offsetof(struct xdp_md, data)),
7423 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7424 offsetof(struct xdp_md, data_end)),
7425 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7427 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7428 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7429 BPF_MOV64_IMM(BPF_REG_0, 0),
7430 BPF_EXIT_INSN(),
7431 },
7432 .result = ACCEPT,
7433 .prog_type = BPF_PROG_TYPE_XDP,
7434 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7435 },
7436 {
7437 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7438 .insns = {
7439 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7440 offsetof(struct xdp_md, data)),
7441 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7442 offsetof(struct xdp_md, data_end)),
7443 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7444 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7445 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7446 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7447 BPF_MOV64_IMM(BPF_REG_0, 0),
7448 BPF_EXIT_INSN(),
7449 },
7450 .errstr = "R1 offset is outside of the packet",
7451 .result = REJECT,
7452 .prog_type = BPF_PROG_TYPE_XDP,
7453 },
7454 {
7455 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7456 .insns = {
7457 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7458 offsetof(struct xdp_md, data)),
7459 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7460 offsetof(struct xdp_md, data_end)),
7461 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7463 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7464 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7465 BPF_MOV64_IMM(BPF_REG_0, 0),
7466 BPF_EXIT_INSN(),
7467 },
7468 .errstr = "R1 offset is outside of the packet",
7469 .result = REJECT,
7470 .prog_type = BPF_PROG_TYPE_XDP,
7471 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7472 },
6967}; 7473};
6968 7474
6969static int probe_filter_length(const struct bpf_insn *fp) 7475static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 0f5e347b068d..152823b6cb21 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests
5include ../lib.mk 5include ../lib.mk
6 6
7override define RUN_TESTS 7override define RUN_TESTS
8 $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" 8 @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
9 $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" 9 @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
10endef 10endef
11 11
12override define EMIT_TESTS 12override define EMIT_TESTS
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index a2c53a3d223d..de2f9ec8a87f 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
397 } 397 }
398} 398}
399 399
400static int copy_page(int ufd, unsigned long offset) 400static int __copy_page(int ufd, unsigned long offset, bool retry)
401{ 401{
402 struct uffdio_copy uffdio_copy; 402 struct uffdio_copy uffdio_copy;
403 403
@@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset)
418 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", 418 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
419 uffdio_copy.copy), exit(1); 419 uffdio_copy.copy), exit(1);
420 } else { 420 } else {
421 if (test_uffdio_copy_eexist) { 421 if (test_uffdio_copy_eexist && retry) {
422 test_uffdio_copy_eexist = false; 422 test_uffdio_copy_eexist = false;
423 retry_copy_page(ufd, &uffdio_copy, offset); 423 retry_copy_page(ufd, &uffdio_copy, offset);
424 } 424 }
@@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset)
427 return 0; 427 return 0;
428} 428}
429 429
430static int copy_page_retry(int ufd, unsigned long offset)
431{
432 return __copy_page(ufd, offset, true);
433}
434
435static int copy_page(int ufd, unsigned long offset)
436{
437 return __copy_page(ufd, offset, false);
438}
439
430static void *uffd_poll_thread(void *arg) 440static void *uffd_poll_thread(void *arg)
431{ 441{
432 unsigned long cpu = (unsigned long) arg; 442 unsigned long cpu = (unsigned long) arg;
@@ -544,7 +554,7 @@ static void *background_thread(void *arg)
544 for (page_nr = cpu * nr_pages_per_cpu; 554 for (page_nr = cpu * nr_pages_per_cpu;
545 page_nr < (cpu+1) * nr_pages_per_cpu; 555 page_nr < (cpu+1) * nr_pages_per_cpu;
546 page_nr++) 556 page_nr++)
547 copy_page(uffd, page_nr * page_size); 557 copy_page_retry(uffd, page_nr * page_size);
548 558
549 return NULL; 559 return NULL;
550} 560}
@@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd,
779 } 789 }
780} 790}
781 791
782static int uffdio_zeropage(int ufd, unsigned long offset) 792static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
783{ 793{
784 struct uffdio_zeropage uffdio_zeropage; 794 struct uffdio_zeropage uffdio_zeropage;
785 int ret; 795 int ret;
@@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
814 fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", 824 fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n",
815 uffdio_zeropage.zeropage), exit(1); 825 uffdio_zeropage.zeropage), exit(1);
816 } else { 826 } else {
817 if (test_uffdio_zeropage_eexist) { 827 if (test_uffdio_zeropage_eexist && retry) {
818 test_uffdio_zeropage_eexist = false; 828 test_uffdio_zeropage_eexist = false;
819 retry_uffdio_zeropage(ufd, &uffdio_zeropage, 829 retry_uffdio_zeropage(ufd, &uffdio_zeropage,
820 offset); 830 offset);
@@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
830 return 0; 840 return 0;
831} 841}
832 842
843static int uffdio_zeropage(int ufd, unsigned long offset)
844{
845 return __uffdio_zeropage(ufd, offset, false);
846}
847
833/* exercise UFFDIO_ZEROPAGE */ 848/* exercise UFFDIO_ZEROPAGE */
834static int userfaultfd_zeropage_test(void) 849static int userfaultfd_zeropage_test(void)
835{ 850{
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 97f187e2663f..0a74a20ca32b 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
20BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) 20BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
21BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) 21BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
22 22
23CFLAGS := -O2 -g -std=gnu99 -pthread -Wall 23CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
24 24
25UNAME_M := $(shell uname -m) 25UNAME_M := $(shell uname -m)
26CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) 26CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)