aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-08 18:00:17 -0500
committerDavid S. Miller <davem@davemloft.net>2019-02-08 18:00:17 -0500
commita655fe9f194842693258f43b5382855db1c2f654 (patch)
treea6e523c42378f43881c421530941dcbb529461ab
parent7499a288bf1a4a49be9d72beb0a5c7b9aa6ffec9 (diff)
parent27b4ad621e887ce8e5eb508a0103f13d30f6b38a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
An ipvlan bug fix in 'net' conflicted with the abstraction away of the IPV6 specific support in 'net-next'. Similarly, a bug fix for mlx5 in 'net' conflicted with the flow action conversion in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io6
-rw-r--r--Documentation/devicetree/bindings/Makefile6
-rw-r--r--Documentation/devicetree/bindings/serio/olpc,ap-sp.txt4
-rw-r--r--Documentation/sysctl/fs.txt26
-rw-r--r--Documentation/x86/resctrl_ui.txt2
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c4
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/kaslr.c1
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/mm/flush.c6
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/configs/defconfig8
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/entry.S18
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/smpboot.c6
-rw-r--r--arch/riscv/mm/init.c3
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/boot/compressed/pgtable.h2
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/resctrl_sched.h4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c3
-rw-r--r--arch/x86/kvm/vmx/nested.c1
-rw-r--r--arch/x86/kvm/vmx/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/iomem.c33
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/boot/dts/Makefile6
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig2
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig3
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/smp.c41
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--drivers/android/binder.c37
-rw-r--r--drivers/android/binder_internal.h9
-rw-r--r--drivers/android/binderfs.c14
-rw-r--r--drivers/base/cacheinfo.c6
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/clk/clk.c14
-rw-r--r--drivers/clk/imx/clk-frac-pll.c5
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c4
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c14
-rw-r--r--drivers/clk/ti/divider.c11
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c10
-rw-r--r--drivers/firmware/efi/arm-runtime.c5
-rw-r--r--drivers/fpga/stratix10-soc.c5
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c14
-rw-r--r--drivers/gpio/gpio-pcf857x.c26
-rw-r--r--drivers/gpio/gpio-vf610.c5
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c50
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c27
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/hid/hid-debug.c120
-rw-r--r--drivers/i3c/master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c18
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/iio/adc/axp288_adc.c76
-rw-r--r--drivers/iio/adc/ti-ads8688.c3
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/infiniband/core/core_priv.h1
-rw-r--r--drivers/infiniband/core/device.c13
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c25
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/input/serio/olpc_apsp.c14
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c40
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c6
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/misc/mei/client.c5
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mic/vop/vop_main.c73
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/net/dsa/b53/b53_srab.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c21
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c25
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c4
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/virtio_net.c171
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c15
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c16
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/mediatek/Kconfig3
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c44
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c31
-rw-r--r--drivers/s390/net/qeth_l2_main.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/scsi_debug.c41
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c6
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c3
-rw-r--r--drivers/tty/serial/8250/8250_pci.c9
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c13
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/sh-sci.c9
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musbhsdma.c21
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c3
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--fs/autofs/expire.c3
-rw-r--r--fs/autofs/inode.c4
-rw-r--r--fs/btrfs/ctree.c78
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/transaction.c24
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/file.c11
-rw-r--r--fs/cifs/smb2ops.c4
-rw-r--r--fs/cifs/smb2pdu.c54
-rw-r--r--fs/cifs/smb2pdu.h19
-rw-r--r--fs/dcache.c38
-rw-r--r--fs/debugfs/inode.c36
-rw-r--r--fs/drop_caches.c8
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/iomap.c37
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfs/write.c9
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/proc_net.c20
-rw-r--r--fs/xfs/scrub/repair.c11
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf.c19
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/filter.h21
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/hid-debug.h9
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/memory_hotplug.h18
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/net/l3mdev.h3
-rw-r--r--include/net/netfilter/nf_tables.h17
-rw-r--r--include/rdma/ib_verbs.h24
-rw-r--r--include/sound/compress_driver.h6
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/uapi/linux/virtio_config.h6
-rw-r--r--include/uapi/linux/virtio_ring.h10
-rw-r--r--include/uapi/rdma/hns-abi.h5
-rw-r--r--init/Kconfig13
-rw-r--r--kernel/bpf/btf.c3
-rw-r--r--kernel/bpf/cgroup.c2
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/percpu_freelist.c41
-rw-r--r--kernel/bpf/percpu_freelist.h4
-rw-r--r--kernel/bpf/syscall.c12
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/fair.c1
-rw-r--r--kernel/sched/psi.c21
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/trace/bpf_trace.c14
-rw-r--r--kernel/trace/trace_uprobe.c9
-rw-r--r--kernel/workqueue.c23
-rw-r--r--kernel/workqueue_internal.h6
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_rhashtable.c23
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/kasan/Makefile1
-rw-r--r--mm/memory-failure.c3
-rw-r--r--mm/memory_hotplug.c62
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/oom_kill.c12
-rw-r--r--net/batman-adv/bat_v_elp.c3
-rw-r--r--net/batman-adv/hard-interface.c5
-rw-r--r--net/batman-adv/soft-interface.c2
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/skmsg.c3
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dsa/master.c4
-rw-r--r--net/dsa/slave.c17
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv6/ip6_gre.c7
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/l2tp/l2tp_core.h20
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/l2tp/l2tp_ip6.c3
-rw-r--r--net/mac80211/tx.c12
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_tables_api.c85
-rw-r--r--net/netfilter/nft_compat.c62
-rw-r--r--net/netfilter/nft_dynset.c18
-rw-r--r--net/netfilter/nft_immediate.c6
-rw-r--r--net/netfilter/nft_lookup.c18
-rw-r--r--net/netfilter/nft_objref.c18
-rw-r--r--net/rds/bind.c6
-rw-r--r--net/rxrpc/recvmsg.c3
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/stream.c20
-rw-r--r--net/smc/af_smc.c11
-rw-r--r--net/smc/smc_cdc.c21
-rw-r--r--net/smc/smc_cdc.h34
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_close.c9
-rw-r--r--net/smc/smc_core.c6
-rw-r--r--net/smc/smc_core.h20
-rw-r--r--net/smc/smc_ib.c6
-rw-r--r--net/smc/smc_llc.c3
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/smc/smc_tx.c64
-rw-r--r--net/smc/smc_wr.c46
-rw-r--r--net/smc/smc_wr.h1
-rw-r--r--net/socket.c82
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c105
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
-rw-r--r--net/vmw_vsock/virtio_transport.c29
-rw-r--r--net/wireless/ap.c2
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/sme.c2
-rw-r--r--samples/mei/mei-amt-version.c2
-rw-r--r--security/apparmor/domain.c5
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--sound/core/pcm_lib.c9
-rw-r--r--sound/pci/hda/hda_bind.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c4
-rw-r--r--sound/pci/hda/patch_realtek.c138
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/bpf/bpftool/common.c6
-rw-r--r--tools/bpf/bpftool/map.c33
-rw-r--r--tools/bpf/bpftool/prog.c5
-rw-r--r--tools/iio/iio_generic_buffer.c2
-rw-r--r--tools/perf/builtin-script.c9
-rw-r--r--tools/perf/ui/browsers/annotate.c16
-rw-r--r--tools/perf/util/cpumap.c11
-rw-r--r--tools/perf/util/ordered-events.c6
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h30
-rw-r--r--tools/testing/selftests/bpf/test_btf.c9
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh13
-rw-r--r--tools/testing/selftests/filesystems/binderfs/.gitignore1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c275
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config3
-rw-r--r--tools/testing/selftests/ir/Makefile2
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh762
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/proc/Makefile1
-rw-r--r--tools/testing/selftests/proc/setns-dcache.c129
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c72
-rw-r--r--tools/testing/selftests/timers/Makefile2
-rw-r--r--virt/kvm/kvm_main.c3
399 files changed, 4109 insertions, 1563 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 9b642669cb16..169fe08a649b 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
24 cpld3_version 24 cpld3_version
25 25
26Date: November 2018 26Date: November 2018
27KernelVersion: 4.21 27KernelVersion: 5.0
28Contact: Vadim Pasternak <vadimpmellanox.com> 28Contact: Vadim Pasternak <vadimpmellanox.com>
29Description: These files show with which CPLD versions have been burned 29Description: These files show with which CPLD versions have been burned
30 on LED board. 30 on LED board.
@@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
35 jtag_enable 35 jtag_enable
36 36
37Date: November 2018 37Date: November 2018
38KernelVersion: 4.21 38KernelVersion: 5.0
39Contact: Vadim Pasternak <vadimpmellanox.com> 39Contact: Vadim Pasternak <vadimpmellanox.com>
40Description: These files enable and disable the access to the JTAG domain. 40Description: These files enable and disable the access to the JTAG domain.
41 By default access to the JTAG domain is disabled. 41 By default access to the JTAG domain is disabled.
@@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
105 reset_voltmon_upgrade_fail 105 reset_voltmon_upgrade_fail
106 106
107Date: November 2018 107Date: November 2018
108KernelVersion: 4.21 108KernelVersion: 5.0
109Contact: Vadim Pasternak <vadimpmellanox.com> 109Contact: Vadim Pasternak <vadimpmellanox.com>
110Description: These files show the system reset cause, as following: ComEx 110Description: These files show the system reset cause, as following: ComEx
111 power fail, reset from ComEx, system platform reset, reset 111 power fail, reset from ComEx, system platform reset, reset
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6e5cef0ed6fb..50daa0b3b032 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA)
17quiet_cmd_mk_schema = SCHEMA $@ 17quiet_cmd_mk_schema = SCHEMA $@
18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) 18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
19 19
20DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') 20DT_DOCS = $(shell \
21 cd $(srctree)/$(src) && \
22 find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
23 )
24
21DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 25DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
22 26
23extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) 27extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
index 36603419d6f8..0e72183f52bc 100644
--- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
+++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
@@ -4,14 +4,10 @@ Required properties:
4- compatible : "olpc,ap-sp" 4- compatible : "olpc,ap-sp"
5- reg : base address and length of SoC's WTM registers 5- reg : base address and length of SoC's WTM registers
6- interrupts : SP-AP interrupt 6- interrupts : SP-AP interrupt
7- clocks : phandle + clock-specifier for the clock that drives the WTM
8- clock-names: should be "sp"
9 7
10Example: 8Example:
11 ap-sp@d4290000 { 9 ap-sp@d4290000 {
12 compatible = "olpc,ap-sp"; 10 compatible = "olpc,ap-sp";
13 reg = <0xd4290000 0x1000>; 11 reg = <0xd4290000 0x1000>;
14 interrupts = <40>; 12 interrupts = <40>;
15 clocks = <&soc_clocks MMP2_CLK_SP>;
16 clock-names = "sp";
17 } 13 }
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 819caf8ca05f..58649bd4fcfc 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -56,26 +56,32 @@ of any kernel data structures.
56 56
57dentry-state: 57dentry-state:
58 58
59From linux/fs/dentry.c: 59From linux/include/linux/dcache.h:
60-------------------------------------------------------------- 60--------------------------------------------------------------
61struct { 61struct dentry_stat_t dentry_stat {
62 int nr_dentry; 62 int nr_dentry;
63 int nr_unused; 63 int nr_unused;
64 int age_limit; /* age in seconds */ 64 int age_limit; /* age in seconds */
65 int want_pages; /* pages requested by system */ 65 int want_pages; /* pages requested by system */
66 int dummy[2]; 66 int nr_negative; /* # of unused negative dentries */
67} dentry_stat = {0, 0, 45, 0,}; 67 int dummy; /* Reserved for future use */
68-------------------------------------------------------------- 68};
69 69--------------------------------------------------------------
70Dentries are dynamically allocated and deallocated, and 70
71nr_dentry seems to be 0 all the time. Hence it's safe to 71Dentries are dynamically allocated and deallocated.
72assume that only nr_unused, age_limit and want_pages are 72
73used. Nr_unused seems to be exactly what its name says. 73nr_dentry shows the total number of dentries allocated (active
74+ unused). nr_unused shows the number of dentries that are not
75actively used, but are saved in the LRU list for future reuse.
76
74Age_limit is the age in seconds after which dcache entries 77Age_limit is the age in seconds after which dcache entries
75can be reclaimed when memory is short and want_pages is 78can be reclaimed when memory is short and want_pages is
76nonzero when shrink_dcache_pages() has been called and the 79nonzero when shrink_dcache_pages() has been called and the
77dcache isn't pruned yet. 80dcache isn't pruned yet.
78 81
82nr_negative shows the number of unused dentries that are also
83negative dentries which do not mapped to actual files.
84
79============================================================== 85==============================================================
80 86
81dquot-max & dquot-nr: 87dquot-max & dquot-nr:
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt
index e8e8d14d3c4e..c1f95b59e14d 100644
--- a/Documentation/x86/resctrl_ui.txt
+++ b/Documentation/x86/resctrl_ui.txt
@@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
9Tony Luck <tony.luck@intel.com> 9Tony Luck <tony.luck@intel.com>
10Vikas Shivappa <vikas.shivappa@intel.com> 10Vikas Shivappa <vikas.shivappa@intel.com>
11 11
12This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo 12This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
13flag bits: 13flag bits:
14RDT (Resource Director Technology) Allocation - "rdt_a" 14RDT (Resource Director Technology) Allocation - "rdt_a"
15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" 15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
diff --git a/MAINTAINERS b/MAINTAINERS
index b4491132b9ce..604bca2fc05d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h
2848BPF (Safe dynamic programs and tools) 2848BPF (Safe dynamic programs and tools)
2849M: Alexei Starovoitov <ast@kernel.org> 2849M: Alexei Starovoitov <ast@kernel.org>
2850M: Daniel Borkmann <daniel@iogearbox.net> 2850M: Daniel Borkmann <daniel@iogearbox.net>
2851R: Martin KaFai Lau <kafai@fb.com>
2852R: Song Liu <songliubraving@fb.com>
2853R: Yonghong Song <yhs@fb.com>
2851L: netdev@vger.kernel.org 2854L: netdev@vger.kernel.org
2852L: linux-kernel@vger.kernel.org 2855L: linux-kernel@vger.kernel.org
2853T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git 2856T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@@ -2873,6 +2876,8 @@ F: samples/bpf/
2873F: tools/bpf/ 2876F: tools/bpf/
2874F: tools/lib/bpf/ 2877F: tools/lib/bpf/
2875F: tools/testing/selftests/bpf/ 2878F: tools/testing/selftests/bpf/
2879K: bpf
2880N: bpf
2876 2881
2877BPF JIT for ARM 2882BPF JIT for ARM
2878M: Shubham Bansal <illusionist.neo@gmail.com> 2883M: Shubham Bansal <illusionist.neo@gmail.com>
@@ -12890,6 +12895,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
12890F: drivers/net/dsa/realtek-smi* 12895F: drivers/net/dsa/realtek-smi*
12891F: drivers/net/dsa/rtl83* 12896F: drivers/net/dsa/rtl83*
12892 12897
12898REDPINE WIRELESS DRIVER
12899M: Amitkumar Karwar <amitkarwar@gmail.com>
12900M: Siva Rebbagondla <siva8118@gmail.com>
12901L: linux-wireless@vger.kernel.org
12902S: Maintained
12903F: drivers/net/wireless/rsi/
12904
12893REGISTER MAP ABSTRACTION 12905REGISTER MAP ABSTRACTION
12894M: Mark Brown <broonie@kernel.org> 12906M: Mark Brown <broonie@kernel.org>
12895L: linux-kernel@vger.kernel.org 12907L: linux-kernel@vger.kernel.org
@@ -13718,6 +13730,15 @@ L: netdev@vger.kernel.org
13718S: Supported 13730S: Supported
13719F: drivers/net/ethernet/sfc/ 13731F: drivers/net/ethernet/sfc/
13720 13732
13733SFF/SFP/SFP+ MODULE SUPPORT
13734M: Russell King <linux@armlinux.org.uk>
13735L: netdev@vger.kernel.org
13736S: Maintained
13737F: drivers/net/phy/phylink.c
13738F: drivers/net/phy/sfp*
13739F: include/linux/phylink.h
13740F: include/linux/sfp.h
13741
13721SGI GRU DRIVER 13742SGI GRU DRIVER
13722M: Dimitri Sivanich <sivanich@sgi.com> 13743M: Dimitri Sivanich <sivanich@sgi.com>
13723S: Maintained 13744S: Maintained
@@ -16663,6 +16684,15 @@ S: Maintained
16663F: drivers/platform/x86/ 16684F: drivers/platform/x86/
16664F: drivers/platform/olpc/ 16685F: drivers/platform/olpc/
16665 16686
16687X86 PLATFORM DRIVERS - ARCH
16688R: Darren Hart <dvhart@infradead.org>
16689R: Andy Shevchenko <andy@infradead.org>
16690L: platform-driver-x86@vger.kernel.org
16691L: x86@kernel.org
16692T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
16693S: Maintained
16694F: arch/x86/platform
16695
16666X86 VDSO 16696X86 VDSO
16667M: Andy Lutomirski <luto@kernel.org> 16697M: Andy Lutomirski <luto@kernel.org>
16668L: linux-kernel@vger.kernel.org 16698L: linux-kernel@vger.kernel.org
diff --git a/Makefile b/Makefile
index 141653226f3c..3142e67d03f1 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 0 3PATCHLEVEL = 0
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc4 5EXTRAVERSION = -rc5
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..95a11d5b3587 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
83 } else /* remote PCI bus */ 83 } else /* remote PCI bus */
84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20); 84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
85 85
86 return base + (where & 0xffc) + (devfn << 12); 86 return base + where + (devfn << 12);
87} 87}
88 88
89static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 89static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
93 u32 mask = (0x1ull << (size * 8)) - 1; 93 u32 mask = (0x1ull << (size * 8)) - 1;
94 int shift = (where % 4) * 8; 94 int shift = (where % 4) * 8;
95 95
96 ret = pci_generic_config_read32(bus, devfn, where, size, val); 96 ret = pci_generic_config_read(bus, devfn, where, size, val);
97 97
98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && 98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
99 (where & 0xffc) == PCI_CLASS_REVISION) 99 (where & 0xffc) == PCI_CLASS_REVISION)
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99688f3..9859e1178e6b 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
299 dcache_clean_range(__idmap_text_start, __idmap_text_end); 299 dcache_clean_range(__idmap_text_start, __idmap_text_end);
300 300
301 /* Clean kvm setup code to PoC? */ 301 /* Clean kvm setup code to PoC? */
302 if (el2_reset_needed()) 302 if (el2_reset_needed()) {
303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); 303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
304 dcache_clean_range(__hyp_text_start, __hyp_text_end);
305 }
304 306
305 /* make the crash dump kernel image protected again */ 307 /* make the crash dump kernel image protected again */
306 crash_post_resume(); 308 crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fbaa374..17f325ba831e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
28#include <asm/virt.h> 28#include <asm/virt.h>
29 29
30 .text 30 .text
31 .pushsection .hyp.text, "ax"
32
31 .align 11 33 .align 11
32 34
33ENTRY(__hyp_stub_vectors) 35ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index ba6b41790fcd..b09b6f75f759 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
88 * we end up running with module randomization disabled. 88 * we end up running with module randomization disabled.
89 */ 89 */
90 module_alloc_base = (u64)_etext - MODULES_VSIZE; 90 module_alloc_base = (u64)_etext - MODULES_VSIZE;
91 __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
91 92
92 /* 93 /*
93 * Try to map the FDT early. If this fails, we simply bail, 94 * Try to map the FDT early. If this fails, we simply bail,
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a5b338b2542..f17afb99890c 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
478 addr < (unsigned long)__entry_text_end) || 478 addr < (unsigned long)__entry_text_end) ||
479 (addr >= (unsigned long)__idmap_text_start && 479 (addr >= (unsigned long)__idmap_text_start &&
480 addr < (unsigned long)__idmap_text_end) || 480 addr < (unsigned long)__idmap_text_end) ||
481 (addr >= (unsigned long)__hyp_text_start &&
482 addr < (unsigned long)__hyp_text_end) ||
481 !!search_exception_tables(addr)) 483 !!search_exception_tables(addr))
482 return true; 484 return true;
483 485
484 if (!is_kernel_in_hyp_mode()) { 486 if (!is_kernel_in_hyp_mode()) {
485 if ((addr >= (unsigned long)__hyp_text_start && 487 if ((addr >= (unsigned long)__hyp_idmap_text_start &&
486 addr < (unsigned long)__hyp_text_end) ||
487 (addr >= (unsigned long)__hyp_idmap_text_start &&
488 addr < (unsigned long)__hyp_idmap_text_end)) 488 addr < (unsigned long)__hyp_idmap_text_end))
489 return true; 489 return true;
490 } 490 }
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 30695a868107..5c9073bace83 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
33 __clean_dcache_area_pou(kaddr, len); 33 __clean_dcache_area_pou(kaddr, len);
34 __flush_icache_all(); 34 __flush_icache_all();
35 } else { 35 } else {
36 flush_icache_range(addr, addr + len); 36 /*
37 * Don't issue kick_all_cpus_sync() after I-cache invalidation
38 * for user mappings.
39 */
40 __flush_icache_range(addr, addr + len);
37 } 41 }
38} 42}
39 43
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 33a2c94fed0d..63b4a1705182 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += pgalloc.h
30generic-y += preempt.h 30generic-y += preempt.h
31generic-y += segment.h 31generic-y += segment.h
32generic-y += serial.h 32generic-y += serial.h
33generic-y += shmparam.h
33generic-y += tlbflush.h 34generic-y += tlbflush.h
34generic-y += topology.h 35generic-y += topology.h
35generic-y += trace_clock.h 36generic-y += trace_clock.h
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index cd400d353d18..961c1dc064e1 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -40,6 +40,7 @@ generic-y += preempt.h
40generic-y += scatterlist.h 40generic-y += scatterlist.h
41generic-y += sections.h 41generic-y += sections.h
42generic-y += serial.h 42generic-y += serial.h
43generic-y += shmparam.h
43generic-y += sizes.h 44generic-y += sizes.h
44generic-y += spinlock.h 45generic-y += spinlock.h
45generic-y += timex.h 46generic-y += timex.h
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 47c4da3d64a4..b25fd42aa0f4 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += rwsem.h
30generic-y += sections.h 30generic-y += sections.h
31generic-y += segment.h 31generic-y += segment.h
32generic-y += serial.h 32generic-y += serial.h
33generic-y += shmparam.h
33generic-y += sizes.h 34generic-y += sizes.h
34generic-y += topology.h 35generic-y += topology.h
35generic-y += trace_clock.h 36generic-y += trace_clock.h
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index 61d955c1747a..c1b06dcf6cf8 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,4 +1,3 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += shmparam.h
4generic-y += ucontext.h 3generic-y += ucontext.h
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9f1dd26903e3..95f8f631c4df 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h
20generic-y += percpu.h 20generic-y += percpu.h
21generic-y += preempt.h 21generic-y += preempt.h
22generic-y += sections.h 22generic-y += sections.h
23generic-y += shmparam.h
23generic-y += spinlock.h 24generic-y += spinlock.h
24generic-y += topology.h 25generic-y += topology.h
25generic-y += trace_clock.h 26generic-y += trace_clock.h
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index b8b3525271fa..960bf1e4be53 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += shmparam.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9c7d1d25bf3d..791cc8d54d0a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += parport.h
26generic-y += percpu.h 26generic-y += percpu.h
27generic-y += preempt.h 27generic-y += preempt.h
28generic-y += serial.h 28generic-y += serial.h
29generic-y += shmparam.h
29generic-y += syscalls.h 30generic-y += syscalls.h
30generic-y += topology.h 31generic-y += topology.h
31generic-y += trace_clock.h 32generic-y += trace_clock.h
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 28823e3db825..97823ec46e97 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += shmparam.h
6generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index eb87cd8327c8..1f04844b6b82 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -34,6 +34,7 @@ generic-y += qrwlock_types.h
34generic-y += qrwlock.h 34generic-y += qrwlock.h
35generic-y += sections.h 35generic-y += sections.h
36generic-y += segment.h 36generic-y += segment.h
37generic-y += shmparam.h
37generic-y += string.h 38generic-y += string.h
38generic-y += switch_to.h 39generic-y += switch_to.h
39generic-y += topology.h 40generic-y += topology.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e64c657060bb..bd149905a5b5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -104,7 +104,7 @@ choice
104 prompt "Base ISA" 104 prompt "Base ISA"
105 default ARCH_RV64I 105 default ARCH_RV64I
106 help 106 help
107 This selects the base ISA that this kernel will traget and must match 107 This selects the base ISA that this kernel will target and must match
108 the target platform. 108 the target platform.
109 109
110config ARCH_RV32I 110config ARCH_RV32I
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index f399659d3b8d..2fd3461e50ab 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
13CONFIG_EXPERT=y 13CONFIG_EXPERT=y
14CONFIG_BPF_SYSCALL=y 14CONFIG_BPF_SYSCALL=y
15CONFIG_SMP=y 15CONFIG_SMP=y
16CONFIG_PCI=y
17CONFIG_PCIE_XILINX=y
18CONFIG_MODULES=y 16CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y 17CONFIG_MODULE_UNLOAD=y
20CONFIG_NET=y 18CONFIG_NET=y
@@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y
28CONFIG_IP_PNP_BOOTP=y 26CONFIG_IP_PNP_BOOTP=y
29CONFIG_IP_PNP_RARP=y 27CONFIG_IP_PNP_RARP=y
30CONFIG_NETLINK_DIAG=y 28CONFIG_NETLINK_DIAG=y
29CONFIG_PCI=y
30CONFIG_PCIEPORTBUS=y
31CONFIG_PCI_HOST_GENERIC=y
32CONFIG_PCIE_XILINX=y
31CONFIG_DEVTMPFS=y 33CONFIG_DEVTMPFS=y
32CONFIG_BLK_DEV_LOOP=y 34CONFIG_BLK_DEV_LOOP=y
33CONFIG_VIRTIO_BLK=y 35CONFIG_VIRTIO_BLK=y
@@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y
63CONFIG_USB_UAS=y 65CONFIG_USB_UAS=y
64CONFIG_VIRTIO_MMIO=y 66CONFIG_VIRTIO_MMIO=y
65CONFIG_SIFIVE_PLIC=y 67CONFIG_SIFIVE_PLIC=y
66CONFIG_RAS=y
67CONFIG_EXT4_FS=y 68CONFIG_EXT4_FS=y
68CONFIG_EXT4_FS_POSIX_ACL=y 69CONFIG_EXT4_FS_POSIX_ACL=y
69CONFIG_AUTOFS4_FS=y 70CONFIG_AUTOFS4_FS=y
@@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y
77CONFIG_NFS_V4_2=y 78CONFIG_NFS_V4_2=y
78CONFIG_ROOT_NFS=y 79CONFIG_ROOT_NFS=y
79CONFIG_CRYPTO_USER_API_HASH=y 80CONFIG_CRYPTO_USER_API_HASH=y
81CONFIG_CRYPTO_DEV_VIRTIO=y
80CONFIG_PRINTK_TIME=y 82CONFIG_PRINTK_TIME=y
81# CONFIG_RCU_TRACE is not set 83# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 06cfbb3aacbb..2a546a52f02a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -80,7 +80,7 @@ typedef struct page *pgtable_t;
80#define __pgd(x) ((pgd_t) { (x) }) 80#define __pgd(x) ((pgd_t) { (x) })
81#define __pgprot(x) ((pgprot_t) { (x) }) 81#define __pgprot(x) ((pgprot_t) { (x) })
82 82
83#ifdef CONFIG_64BITS 83#ifdef CONFIG_64BIT
84#define PTE_FMT "%016lx" 84#define PTE_FMT "%016lx"
85#else 85#else
86#define PTE_FMT "%08lx" 86#define PTE_FMT "%08lx"
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 0531f49af5c3..ce70bceb8872 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
22 * This decides where the kernel will search for a free chunk of vm 22 * This decides where the kernel will search for a free chunk of vm
23 * space during mmap's. 23 * space during mmap's.
24 */ 24 */
25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) 25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
26 26
27#define STACK_TOP TASK_SIZE 27#define STACK_TOP TASK_SIZE
28#define STACK_TOP_MAX STACK_TOP 28#define STACK_TOP_MAX STACK_TOP
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 6a92a2fe198e..dac98348c6a3 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ void asm_offsets(void)
39 OFFSET(TASK_STACK, task_struct, stack); 39 OFFSET(TASK_STACK, task_struct, stack);
40 OFFSET(TASK_TI, task_struct, thread_info); 40 OFFSET(TASK_TI, task_struct, thread_info);
41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); 41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
42 OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
42 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); 43 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
43 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); 44 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
44 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); 45 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 355166f57205..fd9b57c8b4ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -144,6 +144,10 @@ _save_context:
144 REG_L x2, PT_SP(sp) 144 REG_L x2, PT_SP(sp)
145 .endm 145 .endm
146 146
147#if !IS_ENABLED(CONFIG_PREEMPT)
148.set resume_kernel, restore_all
149#endif
150
147ENTRY(handle_exception) 151ENTRY(handle_exception)
148 SAVE_ALL 152 SAVE_ALL
149 153
@@ -228,7 +232,7 @@ ret_from_exception:
228 REG_L s0, PT_SSTATUS(sp) 232 REG_L s0, PT_SSTATUS(sp)
229 csrc sstatus, SR_SIE 233 csrc sstatus, SR_SIE
230 andi s0, s0, SR_SPP 234 andi s0, s0, SR_SPP
231 bnez s0, restore_all 235 bnez s0, resume_kernel
232 236
233resume_userspace: 237resume_userspace:
234 /* Interrupts must be disabled here so flags are checked atomically */ 238 /* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
250 RESTORE_ALL 254 RESTORE_ALL
251 sret 255 sret
252 256
257#if IS_ENABLED(CONFIG_PREEMPT)
258resume_kernel:
259 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
260 bnez s0, restore_all
261need_resched:
262 REG_L s0, TASK_TI_FLAGS(tp)
263 andi s0, s0, _TIF_NEED_RESCHED
264 beqz s0, restore_all
265 call preempt_schedule_irq
266 j need_resched
267#endif
268
253work_pending: 269work_pending:
254 /* Enter slow path for supplementary processing */ 270 /* Enter slow path for supplementary processing */
255 la ra, ret_from_exception 271 la ra, ret_from_exception
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 6e079e94b638..77564310235f 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -181,7 +181,7 @@ static void __init setup_bootmem(void)
181 BUG_ON(mem_size == 0); 181 BUG_ON(mem_size == 0);
182 182
183 set_max_mapnr(PFN_DOWN(mem_size)); 183 set_max_mapnr(PFN_DOWN(mem_size));
184 max_low_pfn = memblock_end_of_DRAM(); 184 max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
185 185
186#ifdef CONFIG_BLK_DEV_INITRD 186#ifdef CONFIG_BLK_DEV_INITRD
187 setup_initrd(); 187 setup_initrd();
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index fc185ecabb0a..18cda0e8cf94 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -57,15 +57,12 @@ void __init setup_smp(void)
57 57
58 while ((dn = of_find_node_by_type(dn, "cpu"))) { 58 while ((dn = of_find_node_by_type(dn, "cpu"))) {
59 hart = riscv_of_processor_hartid(dn); 59 hart = riscv_of_processor_hartid(dn);
60 if (hart < 0) { 60 if (hart < 0)
61 of_node_put(dn);
62 continue; 61 continue;
63 }
64 62
65 if (hart == cpuid_to_hartid_map(0)) { 63 if (hart == cpuid_to_hartid_map(0)) {
66 BUG_ON(found_boot_cpu); 64 BUG_ON(found_boot_cpu);
67 found_boot_cpu = 1; 65 found_boot_cpu = 1;
68 of_node_put(dn);
69 continue; 66 continue;
70 } 67 }
71 68
@@ -73,7 +70,6 @@ void __init setup_smp(void)
73 set_cpu_possible(cpuid, true); 70 set_cpu_possible(cpuid, true);
74 set_cpu_present(cpuid, true); 71 set_cpu_present(cpuid, true);
75 cpuid++; 72 cpuid++;
76 of_node_put(dn);
77 } 73 }
78 74
79 BUG_ON(!found_boot_cpu); 75 BUG_ON(!found_boot_cpu);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1d9bfaff60bc..658ebf645f42 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
29 29
30#ifdef CONFIG_ZONE_DMA32 30#ifdef CONFIG_ZONE_DMA32
31 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); 31 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
32 (unsigned long) PFN_PHYS(max_low_pfn)));
32#endif 33#endif
33 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 34 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
34 35
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1372553dc0a9..1d1544b6ca74 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += preempt.h
28generic-y += sections.h 28generic-y += sections.h
29generic-y += segment.h 29generic-y += segment.h
30generic-y += serial.h 30generic-y += serial.h
31generic-y += shmparam.h
31generic-y += sizes.h 32generic-y += sizes.h
32generic-y += syscalls.h 33generic-y += syscalls.h
33generic-y += topology.h 34generic-y += topology.h
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 26387c7bf305..68261430fe6e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -446,12 +446,12 @@ config RETPOLINE
446 branches. Requires a compiler with -mindirect-branch=thunk-extern 446 branches. Requires a compiler with -mindirect-branch=thunk-extern
447 support for full protection. The kernel may run slower. 447 support for full protection. The kernel may run slower.
448 448
449config X86_RESCTRL 449config X86_CPU_RESCTRL
450 bool "Resource Control support" 450 bool "x86 CPU resource control support"
451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
452 select KERNFS 452 select KERNFS
453 help 453 help
454 Enable Resource Control support. 454 Enable x86 CPU resource control support.
455 455
456 Provide support for the allocation and monitoring of system resources 456 Provide support for the allocation and monitoring of system resources
457 usage by the CPU. 457 usage by the CPU.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 64037895b085..f105ae8651c9 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax 600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
601 movl %eax, %cr3 601 movl %eax, %cr3
6023: 6023:
603 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
604 pushl %ecx
605 movl $MSR_EFER, %ecx
606 rdmsr
607 btsl $_EFER_LME, %eax
608 wrmsr
609 popl %ecx
610
603 /* Enable PAE and LA57 (if required) paging modes */ 611 /* Enable PAE and LA57 (if required) paging modes */
604 movl $X86_CR4_PAE, %eax 612 movl $X86_CR4_PAE, %eax
605 cmpl $0, %edx 613 cmpl $0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f75638f6e6..6ff7e81b5628 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
7 7
8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE 8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
9#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 9#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
10 10
11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE 11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
12 12
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0dd6b0f4000e..d9a9993af882 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -6,7 +6,7 @@
6 * "Big Core" Processors (Branded as Core, Xeon, etc...) 6 * "Big Core" Processors (Branded as Core, Xeon, etc...)
7 * 7 *
8 * The "_X" parts are generally the EP and EX Xeons, or the 8 * The "_X" parts are generally the EP and EX Xeons, or the
9 * "Extreme" ones, like Broadwell-E. 9 * "Extreme" ones, like Broadwell-E, or Atom microserver.
10 * 10 *
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
@@ -71,6 +71,7 @@
71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ 71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ 72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ 73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
74#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
74 75
75/* Xeon Phi */ 76/* Xeon Phi */
76 77
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..0ce558a8150d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
7#endif 7#endif
8 8
9#ifdef CONFIG_KASAN 9#ifdef CONFIG_KASAN
10#ifdef CONFIG_KASAN_EXTRA
11#define KASAN_STACK_ORDER 2
12#else
10#define KASAN_STACK_ORDER 1 13#define KASAN_STACK_ORDER 1
14#endif
11#else 15#else
12#define KASAN_STACK_ORDER 0 16#define KASAN_STACK_ORDER 0
13#endif 17#endif
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 40ebddde6ac2..f6b7fe2833cc 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -2,7 +2,7 @@
2#ifndef _ASM_X86_RESCTRL_SCHED_H 2#ifndef _ASM_X86_RESCTRL_SCHED_H
3#define _ASM_X86_RESCTRL_SCHED_H 3#define _ASM_X86_RESCTRL_SCHED_H
4 4
5#ifdef CONFIG_X86_RESCTRL 5#ifdef CONFIG_X86_CPU_RESCTRL
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/jump_label.h> 8#include <linux/jump_label.h>
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
88 88
89static inline void resctrl_sched_in(void) {} 89static inline void resctrl_sched_in(void) {}
90 90
91#endif /* CONFIG_X86_RESCTRL */ 91#endif /* CONFIG_X86_CPU_RESCTRL */
92 92
93#endif /* _ASM_X86_RESCTRL_SCHED_H */ 93#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index b6fa0869f7aa..cfd24f9f7614 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
39obj-$(CONFIG_X86_MCE) += mce/ 39obj-$(CONFIG_X86_MCE) += mce/
40obj-$(CONFIG_MTRR) += mtrr/ 40obj-$(CONFIG_MTRR) += mtrr/
41obj-$(CONFIG_MICROCODE) += microcode/ 41obj-$(CONFIG_MICROCODE) += microcode/
42obj-$(CONFIG_X86_RESCTRL) += resctrl/ 42obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
43 43
44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
45 45
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 1de0f4170178..01874d54f4fd 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -71,7 +71,7 @@ void __init check_bugs(void)
71 * identify_boot_cpu() initialized SMT support information, let the 71 * identify_boot_cpu() initialized SMT support information, let the
72 * core code know. 72 * core code know.
73 */ 73 */
74 cpu_smt_check_topology_early(); 74 cpu_smt_check_topology();
75 75
76 if (!IS_ENABLED(CONFIG_SMP)) { 76 if (!IS_ENABLED(CONFIG_SMP)) {
77 pr_info("CPU: "); 77 pr_info("CPU: ");
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 51adde0a0f1a..e1f3ba19ba54 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
855 if (!p) { 855 if (!p) {
856 return ret; 856 return ret;
857 } else { 857 } else {
858 if (boot_cpu_data.microcode == p->patch_id) 858 if (boot_cpu_data.microcode >= p->patch_id)
859 return ret; 859 return ret;
860 860
861 ret = UCODE_NEW; 861 ret = UCODE_NEW;
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 1cabe6fd8e11..4a06c37b9cf1 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o 2obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
3obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o 3obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
4CFLAGS_pseudo_lock.o = -I$(src) 4CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 0d5efa34f359..53917a3ebf94 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
167 struct efi_info *current_ei = &boot_params.efi_info; 167 struct efi_info *current_ei = &boot_params.efi_info;
168 struct efi_info *ei = &params->efi_info; 168 struct efi_info *ei = &params->efi_info;
169 169
170 if (!efi_enabled(EFI_RUNTIME_SERVICES))
171 return 0;
172
170 if (!current_ei->efi_memmap_size) 173 if (!current_ei->efi_memmap_size)
171 return 0; 174 return 0;
172 175
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8ff20523661b..d8ea4ebd79e7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
212 return; 212 return;
213 213
214 hrtimer_cancel(&vmx->nested.preemption_timer);
214 vmx->nested.vmxon = false; 215 vmx->nested.vmxon = false;
215 vmx->nested.smm.vmxon = false; 216 vmx->nested.smm.vmxon = false;
216 free_vpid(vmx->nested.vpid02); 217 free_vpid(vmx->nested.vpid02);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4341175339f3..95d618045001 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -26,6 +26,7 @@
26#include <linux/mod_devicetable.h> 26#include <linux/mod_devicetable.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/sched/smt.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/tboot.h> 31#include <linux/tboot.h>
31#include <linux/trace_events.h> 32#include <linux/trace_events.h>
@@ -6823,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm)
6823 * Warn upon starting the first VM in a potentially 6824 * Warn upon starting the first VM in a potentially
6824 * insecure environment. 6825 * insecure environment.
6825 */ 6826 */
6826 if (cpu_smt_control == CPU_SMT_ENABLED) 6827 if (sched_smt_active())
6827 pr_warn_once(L1TF_MSG_SMT); 6828 pr_warn_once(L1TF_MSG_SMT);
6828 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6829 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
6829 pr_warn_once(L1TF_MSG_L1D); 6830 pr_warn_once(L1TF_MSG_L1D);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3d27206f6c01..e67ecf25e690 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
5116{ 5116{
5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5118 5118
5119 /*
5120 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5121 * is returned, but our callers are not ready for that and they blindly
5122 * call kvm_inject_page_fault. Ensure that they at least do not leak
5123 * uninitialized kernel stack memory into cr2 and error code.
5124 */
5125 memset(exception, 0, sizeof(*exception));
5119 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 5126 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
5120 exception); 5127 exception);
5121} 5128}
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 66894675f3c8..df50451d94ef 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -2,8 +2,11 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/io.h> 3#include <linux/io.h>
4 4
5#define movs(type,to,from) \
6 asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
7
5/* Originally from i386/string.h */ 8/* Originally from i386/string.h */
6static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) 9static __always_inline void rep_movs(void *to, const void *from, size_t n)
7{ 10{
8 unsigned long d0, d1, d2; 11 unsigned long d0, d1, d2;
9 asm volatile("rep ; movsl\n\t" 12 asm volatile("rep ; movsl\n\t"
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
21 24
22void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) 25void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
23{ 26{
24 __iomem_memcpy(to, (const void *)from, n); 27 if (unlikely(!n))
28 return;
29
30 /* Align any unaligned source IO */
31 if (unlikely(1 & (unsigned long)from)) {
32 movs("b", to, from);
33 n--;
34 }
35 if (n > 1 && unlikely(2 & (unsigned long)from)) {
36 movs("w", to, from);
37 n-=2;
38 }
39 rep_movs(to, (const void *)from, n);
25} 40}
26EXPORT_SYMBOL(memcpy_fromio); 41EXPORT_SYMBOL(memcpy_fromio);
27 42
28void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) 43void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
29{ 44{
30 __iomem_memcpy((void *)to, (const void *) from, n); 45 if (unlikely(!n))
46 return;
47
48 /* Align any unaligned destination IO */
49 if (unlikely(1 & (unsigned long)to)) {
50 movs("b", to, from);
51 n--;
52 }
53 if (n > 1 && unlikely(2 & (unsigned long)to)) {
54 movs("w", to, from);
55 n-=2;
56 }
57 rep_movs((void *)to, (const void *) from, n);
31} 58}
32EXPORT_SYMBOL(memcpy_toio); 59EXPORT_SYMBOL(memcpy_toio);
33 60
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2ff25ad33233..9d5c75f02295 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
595 return; 595 return;
596 } 596 }
597 597
598 addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); 598 addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
599#ifdef CONFIG_X86_64 599#ifdef CONFIG_X86_64
600 addr |= ((u64)desc.base3 << 32); 600 addr |= ((u64)desc.base3 << 32);
601#endif 601#endif
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 20a0756f27ef..ce91682770cb 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI
164 If unsure, say N. 164 If unsure, say N.
165 165
166config XTENSA_UNALIGNED_USER 166config XTENSA_UNALIGNED_USER
167 bool "Unaligned memory access in use space" 167 bool "Unaligned memory access in user space"
168 help 168 help
169 The Xtensa architecture currently does not handle unaligned 169 The Xtensa architecture currently does not handle unaligned
170 memory accesses in hardware but through an exception handler. 170 memory accesses in hardware but through an exception handler.
@@ -451,7 +451,7 @@ config USE_OF
451 help 451 help
452 Include support for flattened device tree machine descriptions. 452 Include support for flattened device tree machine descriptions.
453 453
454config BUILTIN_DTB 454config BUILTIN_DTB_SOURCE
455 string "DTB to build into the kernel image" 455 string "DTB to build into the kernel image"
456 depends on OF 456 depends on OF
457 457
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index f8052ba5aea8..0b8d00cdae7c 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,9 +7,9 @@
7# 7#
8# 8#
9 9
10BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o 10BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
11ifneq ($(CONFIG_BUILTIN_DTB),"") 11ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
12obj-$(CONFIG_OF) += $(BUILTIN_DTB) 12obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
13endif 13endif
14 14
15# for CONFIG_OF_ALL_DTBS test 15# for CONFIG_OF_ALL_DTBS test
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 2bf964df37ba..f378e56f9ce6 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
34CONFIG_CMDLINE_BOOL=y 34CONFIG_CMDLINE_BOOL=y
35CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 35CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
36CONFIG_USE_OF=y 36CONFIG_USE_OF=y
37CONFIG_BUILTIN_DTB="kc705" 37CONFIG_BUILTIN_DTB_SOURCE="kc705"
38# CONFIG_COMPACTION is not set 38# CONFIG_COMPACTION is not set
39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
40CONFIG_PM=y 40CONFIG_PM=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index 3221b7053fa3..62f32a902568 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y
38# CONFIG_PCI is not set 38# CONFIG_PCI is not set
39CONFIG_XTENSA_PLATFORM_XTFPGA=y 39CONFIG_XTENSA_PLATFORM_XTFPGA=y
40CONFIG_USE_OF=y 40CONFIG_USE_OF=y
41CONFIG_BUILTIN_DTB="csp" 41CONFIG_BUILTIN_DTB_SOURCE="csp"
42# CONFIG_COMPACTION is not set 42# CONFIG_COMPACTION is not set
43CONFIG_XTFPGA_LCD=y 43CONFIG_XTFPGA_LCD=y
44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 985fa8546e4e..8bebe07f1060 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
33CONFIG_CMDLINE_BOOL=y 33CONFIG_CMDLINE_BOOL=y
34CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 34CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
35CONFIG_USE_OF=y 35CONFIG_USE_OF=y
36CONFIG_BUILTIN_DTB="kc705" 36CONFIG_BUILTIN_DTB_SOURCE="kc705"
37# CONFIG_COMPACTION is not set 37# CONFIG_COMPACTION is not set
38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
39CONFIG_NET=y 39CONFIG_NET=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index f3fc4f970ca8..933ab2adf434 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
39CONFIG_CMDLINE_BOOL=y 39CONFIG_CMDLINE_BOOL=y
40CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" 40CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
41CONFIG_USE_OF=y 41CONFIG_USE_OF=y
42CONFIG_BUILTIN_DTB="kc705_nommu" 42CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
43CONFIG_BINFMT_FLAT=y 43CONFIG_BINFMT_FLAT=y
44CONFIG_NET=y 44CONFIG_NET=y
45CONFIG_PACKET=y 45CONFIG_PACKET=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c06a7c..e29c5b179a5b 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,11 +33,12 @@ CONFIG_SMP=y
33CONFIG_HOTPLUG_CPU=y 33CONFIG_HOTPLUG_CPU=y
34# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set 34# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
35# CONFIG_PCI is not set 35# CONFIG_PCI is not set
36CONFIG_VECTORS_OFFSET=0x00002000
36CONFIG_XTENSA_PLATFORM_XTFPGA=y 37CONFIG_XTENSA_PLATFORM_XTFPGA=y
37CONFIG_CMDLINE_BOOL=y 38CONFIG_CMDLINE_BOOL=y
38CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" 39CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
39CONFIG_USE_OF=y 40CONFIG_USE_OF=y
40CONFIG_BUILTIN_DTB="lx200mx" 41CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
41# CONFIG_COMPACTION is not set 42# CONFIG_COMPACTION is not set
42# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 43# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
43CONFIG_NET=y 44CONFIG_NET=y
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index da08e75100ab..7f009719304e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -276,12 +276,13 @@ should_never_return:
276 276
277 movi a2, cpu_start_ccount 277 movi a2, cpu_start_ccount
2781: 2781:
279 memw
279 l32i a3, a2, 0 280 l32i a3, a2, 0
280 beqi a3, 0, 1b 281 beqi a3, 0, 1b
281 movi a3, 0 282 movi a3, 0
282 s32i a3, a2, 0 283 s32i a3, a2, 0
283 memw
2841: 2841:
285 memw
285 l32i a3, a2, 0 286 l32i a3, a2, 0
286 beqi a3, 0, 1b 287 beqi a3, 0, 1b
287 wsr a3, ccount 288 wsr a3, ccount
@@ -317,11 +318,13 @@ ENTRY(cpu_restart)
317 rsr a0, prid 318 rsr a0, prid
318 neg a2, a0 319 neg a2, a0
319 movi a3, cpu_start_id 320 movi a3, cpu_start_id
321 memw
320 s32i a2, a3, 0 322 s32i a2, a3, 0
321#if XCHAL_DCACHE_IS_WRITEBACK 323#if XCHAL_DCACHE_IS_WRITEBACK
322 dhwbi a3, 0 324 dhwbi a3, 0
323#endif 325#endif
3241: 3261:
327 memw
325 l32i a2, a3, 0 328 l32i a2, a3, 0
326 dhi a3, 0 329 dhi a3, 0
327 bne a2, a0, 1b 330 bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d64689bac..be1f280c322c 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
83{ 83{
84 unsigned i; 84 unsigned i;
85 85
86 for (i = 0; i < max_cpus; ++i) 86 for_each_possible_cpu(i)
87 set_cpu_present(i, true); 87 set_cpu_present(i, true);
88} 88}
89 89
@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
96 pr_info("%s: Core Count = %d\n", __func__, ncpus); 96 pr_info("%s: Core Count = %d\n", __func__, ncpus);
97 pr_info("%s: Core Id = %d\n", __func__, core_id); 97 pr_info("%s: Core Id = %d\n", __func__, core_id);
98 98
99 if (ncpus > NR_CPUS) {
100 ncpus = NR_CPUS;
101 pr_info("%s: limiting core count by %d\n", __func__, ncpus);
102 }
103
99 for (i = 0; i < ncpus; ++i) 104 for (i = 0; i < ncpus; ++i)
100 set_cpu_possible(i, true); 105 set_cpu_possible(i, true);
101} 106}
@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
195 int i; 200 int i;
196 201
197#ifdef CONFIG_HOTPLUG_CPU 202#ifdef CONFIG_HOTPLUG_CPU
198 cpu_start_id = cpu; 203 WRITE_ONCE(cpu_start_id, cpu);
199 system_flush_invalidate_dcache_range( 204 /* Pairs with the third memw in the cpu_restart */
200 (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); 205 mb();
206 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
207 sizeof(cpu_start_id));
201#endif 208#endif
202 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 209 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
203 210
@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
206 ccount = get_ccount(); 213 ccount = get_ccount();
207 while (!ccount); 214 while (!ccount);
208 215
209 cpu_start_ccount = ccount; 216 WRITE_ONCE(cpu_start_ccount, ccount);
210 217
211 while (time_before(jiffies, timeout)) { 218 do {
219 /*
220 * Pairs with the first two memws in the
221 * .Lboot_secondary.
222 */
212 mb(); 223 mb();
213 if (!cpu_start_ccount) 224 ccount = READ_ONCE(cpu_start_ccount);
214 break; 225 } while (ccount && time_before(jiffies, timeout));
215 }
216 226
217 if (cpu_start_ccount) { 227 if (ccount) {
218 smp_call_function_single(0, mx_cpu_stop, 228 smp_call_function_single(0, mx_cpu_stop,
219 (void *)cpu, 1); 229 (void *)cpu, 1);
220 cpu_start_ccount = 0; 230 WRITE_ONCE(cpu_start_ccount, 0);
221 return -EIO; 231 return -EIO;
222 } 232 }
223 } 233 }
@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
237 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", 247 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
238 __func__, cpu, idle, start_info.stack); 248 __func__, cpu, idle, start_info.stack);
239 249
250 init_completion(&cpu_running);
240 ret = boot_secondary(cpu, idle); 251 ret = boot_secondary(cpu, idle);
241 if (ret == 0) { 252 if (ret == 0) {
242 wait_for_completion_timeout(&cpu_running, 253 wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
298 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 309 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
299 while (time_before(jiffies, timeout)) { 310 while (time_before(jiffies, timeout)) {
300 system_invalidate_dcache_range((unsigned long)&cpu_start_id, 311 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
301 sizeof(cpu_start_id)); 312 sizeof(cpu_start_id));
302 if (cpu_start_id == -cpu) { 313 /* Pairs with the second memw in the cpu_restart */
314 mb();
315 if (READ_ONCE(cpu_start_id) == -cpu) {
303 platform_cpu_kill(cpu); 316 platform_cpu_kill(cpu);
304 return; 317 return;
305 } 318 }
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a54d2ab..378186b5eb40 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
89 container_of(evt, struct ccount_timer, evt); 89 container_of(evt, struct ccount_timer, evt);
90 90
91 if (timer->irq_enabled) { 91 if (timer->irq_enabled) {
92 disable_irq(evt->irq); 92 disable_irq_nosync(evt->irq);
93 timer->irq_enabled = 0; 93 timer->irq_enabled = 0;
94 } 94 }
95 return 0; 95 return 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c5f61ceeb67..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
462 kblockd_schedule_work(&q->timeout_work); 462 kblockd_schedule_work(&q->timeout_work);
463} 463}
464 464
465static void blk_timeout_work(struct work_struct *work)
466{
467}
468
465/** 469/**
466 * blk_alloc_queue_node - allocate a request queue 470 * blk_alloc_queue_node - allocate a request queue
467 * @gfp_mask: memory allocation flags 471 * @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
505 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 509 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
506 laptop_mode_timer_fn, 0); 510 laptop_mode_timer_fn, 0);
507 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 511 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
508 INIT_WORK(&q->timeout_work, NULL); 512 INIT_WORK(&q->timeout_work, blk_timeout_work);
509 INIT_LIST_HEAD(&q->icq_list); 513 INIT_LIST_HEAD(&q->icq_list);
510#ifdef CONFIG_BLK_CGROUP 514#ifdef CONFIG_BLK_CGROUP
511 INIT_LIST_HEAD(&q->blkg_list); 515 INIT_LIST_HEAD(&q->blkg_list);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a3fc7191c694..6e0f2d97fc6d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); 335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
337 337
338 blk_mq_run_hw_queue(hctx, true); 338 blk_mq_sched_restart(hctx);
339} 339}
340 340
341/** 341/**
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index f8120832ca7b..7921573aebbc 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -839,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
839static bool debugfs_create_files(struct dentry *parent, void *data, 839static bool debugfs_create_files(struct dentry *parent, void *data,
840 const struct blk_mq_debugfs_attr *attr) 840 const struct blk_mq_debugfs_attr *attr)
841{ 841{
842 if (IS_ERR_OR_NULL(parent))
843 return false;
844
842 d_inode(parent)->i_private = data; 845 d_inode(parent)->i_private = data;
843 846
844 for (; attr->name; attr++) { 847 for (; attr->name; attr++) {
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cdfc87629efb..4d2b2ad1ee0e 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name)
5854static int __init binder_init(void) 5854static int __init binder_init(void)
5855{ 5855{
5856 int ret; 5856 int ret;
5857 char *device_name, *device_names, *device_tmp; 5857 char *device_name, *device_tmp;
5858 struct binder_device *device; 5858 struct binder_device *device;
5859 struct hlist_node *tmp; 5859 struct hlist_node *tmp;
5860 char *device_names = NULL;
5860 5861
5861 ret = binder_alloc_shrinker_init(); 5862 ret = binder_alloc_shrinker_init();
5862 if (ret) 5863 if (ret)
@@ -5898,23 +5899,29 @@ static int __init binder_init(void)
5898 &transaction_log_fops); 5899 &transaction_log_fops);
5899 } 5900 }
5900 5901
5901 /* 5902 if (strcmp(binder_devices_param, "") != 0) {
5902 * Copy the module_parameter string, because we don't want to 5903 /*
5903 * tokenize it in-place. 5904 * Copy the module_parameter string, because we don't want to
5904 */ 5905 * tokenize it in-place.
5905 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5906 */
5906 if (!device_names) { 5907 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5907 ret = -ENOMEM; 5908 if (!device_names) {
5908 goto err_alloc_device_names_failed; 5909 ret = -ENOMEM;
5909 } 5910 goto err_alloc_device_names_failed;
5911 }
5910 5912
5911 device_tmp = device_names; 5913 device_tmp = device_names;
5912 while ((device_name = strsep(&device_tmp, ","))) { 5914 while ((device_name = strsep(&device_tmp, ","))) {
5913 ret = init_binder_device(device_name); 5915 ret = init_binder_device(device_name);
5914 if (ret) 5916 if (ret)
5915 goto err_init_binder_device_failed; 5917 goto err_init_binder_device_failed;
5918 }
5916 } 5919 }
5917 5920
5921 ret = init_binderfs();
5922 if (ret)
5923 goto err_init_binder_device_failed;
5924
5918 return ret; 5925 return ret;
5919 5926
5920err_init_binder_device_failed: 5927err_init_binder_device_failed:
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7fb97f503ef2..045b3e42d98b 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode)
46} 46}
47#endif 47#endif
48 48
49#ifdef CONFIG_ANDROID_BINDERFS
50extern int __init init_binderfs(void);
51#else
52static inline int __init init_binderfs(void)
53{
54 return 0;
55}
56#endif
57
49#endif /* _LINUX_BINDER_INTERNAL_H */ 58#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 6a2185eb66c5..e773f45d19d9 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
395 struct inode *inode = NULL; 395 struct inode *inode = NULL;
396 struct dentry *root = sb->s_root; 396 struct dentry *root = sb->s_root;
397 struct binderfs_info *info = sb->s_fs_info; 397 struct binderfs_info *info = sb->s_fs_info;
398#if defined(CONFIG_IPC_NS)
399 bool use_reserve = (info->ipc_ns == &init_ipc_ns);
400#else
401 bool use_reserve = true;
402#endif
398 403
399 device = kzalloc(sizeof(*device), GFP_KERNEL); 404 device = kzalloc(sizeof(*device), GFP_KERNEL);
400 if (!device) 405 if (!device)
@@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
413 418
414 /* Reserve a new minor number for the new device. */ 419 /* Reserve a new minor number for the new device. */
415 mutex_lock(&binderfs_minors_mutex); 420 mutex_lock(&binderfs_minors_mutex);
416 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 421 minor = ida_alloc_max(&binderfs_minors,
422 use_reserve ? BINDERFS_MAX_MINOR :
423 BINDERFS_MAX_MINOR_CAPPED,
424 GFP_KERNEL);
417 mutex_unlock(&binderfs_minors_mutex); 425 mutex_unlock(&binderfs_minors_mutex);
418 if (minor < 0) { 426 if (minor < 0) {
419 ret = minor; 427 ret = minor;
@@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = {
542 .fs_flags = FS_USERNS_MOUNT, 550 .fs_flags = FS_USERNS_MOUNT,
543}; 551};
544 552
545static int __init init_binderfs(void) 553int __init init_binderfs(void)
546{ 554{
547 int ret; 555 int ret;
548 556
@@ -560,5 +568,3 @@ static int __init init_binderfs(void)
560 568
561 return ret; 569 return ret;
562} 570}
563
564device_initcall(init_binderfs);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cf78fa6d470d..a7359535caf5 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
79 ct_idx = get_cacheinfo_idx(this_leaf->type); 79 ct_idx = get_cacheinfo_idx(this_leaf->type);
80 propname = cache_type_info[ct_idx].size_prop; 80 propname = cache_type_info[ct_idx].size_prop;
81 81
82 if (of_property_read_u32(np, propname, &this_leaf->size)) 82 of_property_read_u32(np, propname, &this_leaf->size);
83 this_leaf->size = 0;
84} 83}
85 84
86/* not cache_line_size() because that's a macro in include/linux/cache.h */ 85/* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
114 ct_idx = get_cacheinfo_idx(this_leaf->type); 113 ct_idx = get_cacheinfo_idx(this_leaf->type);
115 propname = cache_type_info[ct_idx].nr_sets_prop; 114 propname = cache_type_info[ct_idx].nr_sets_prop;
116 115
117 if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) 116 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
118 this_leaf->number_of_sets = 0;
119} 117}
120 118
121static void cache_associativity(struct cacheinfo *this_leaf) 119static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 457be03b744d..0ea2139c50d8 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
130{ 130{
131 int autosuspend_delay; 131 int autosuspend_delay;
132 u64 last_busy, expires = 0; 132 u64 last_busy, expires = 0;
133 u64 now = ktime_to_ns(ktime_get()); 133 u64 now = ktime_get_mono_fast_ns();
134 134
135 if (!dev->power.use_autosuspend) 135 if (!dev->power.use_autosuspend)
136 goto out; 136 goto out;
@@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
909 * If 'expires' is after the current time, we've been called 909 * If 'expires' is after the current time, we've been called
910 * too early. 910 * too early.
911 */ 911 */
912 if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 912 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
913 dev->power.timer_expires = 0; 913 dev->power.timer_expires = 0;
914 rpm_suspend(dev, dev->power.timer_autosuspends ? 914 rpm_suspend(dev, dev->power.timer_autosuspends ?
915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
928int pm_schedule_suspend(struct device *dev, unsigned int delay) 928int pm_schedule_suspend(struct device *dev, unsigned int delay)
929{ 929{
930 unsigned long flags; 930 unsigned long flags;
931 ktime_t expires; 931 u64 expires;
932 int retval; 932 int retval;
933 933
934 spin_lock_irqsave(&dev->power.lock, flags); 934 spin_lock_irqsave(&dev->power.lock, flags);
@@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
945 /* Other scheduled or pending requests need to be canceled. */ 945 /* Other scheduled or pending requests need to be canceled. */
946 pm_runtime_cancel_pending(dev); 946 pm_runtime_cancel_pending(dev);
947 947
948 expires = ktime_add(ktime_get(), ms_to_ktime(delay)); 948 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
949 dev->power.timer_expires = ktime_to_ns(expires); 949 dev->power.timer_expires = expires;
950 dev->power.timer_autosuspends = 0; 950 dev->power.timer_autosuspends = 0;
951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
952 952
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 6ccdbedb02f3..d2477a5058ac 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core,
1513 if (!parent) 1513 if (!parent)
1514 return -EINVAL; 1514 return -EINVAL;
1515 1515
1516 for (i = 0; i < core->num_parents; i++) 1516 for (i = 0; i < core->num_parents; i++) {
1517 if (clk_core_get_parent_by_index(core, i) == parent) 1517 if (core->parents[i] == parent)
1518 return i;
1519
1520 if (core->parents[i])
1521 continue;
1522
1523 /* Fallback to comparing globally unique names */
1524 if (!strcmp(parent->name, core->parent_names[i])) {
1525 core->parents[i] = parent;
1518 return i; 1526 return i;
1527 }
1528 }
1519 1529
1520 return -EINVAL; 1530 return -EINVAL;
1521} 1531}
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 0026c3969b1e..76b9eb15604e 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
155{ 155{
156 struct clk_frac_pll *pll = to_clk_frac_pll(hw); 156 struct clk_frac_pll *pll = to_clk_frac_pll(hw);
157 u32 val, divfi, divff; 157 u32 val, divfi, divff;
158 u64 temp64 = parent_rate; 158 u64 temp64;
159 int ret; 159 int ret;
160 160
161 parent_rate *= 8; 161 parent_rate *= 8;
162 rate *= 2; 162 rate *= 2;
163 divfi = rate / parent_rate; 163 divfi = rate / parent_rate;
164 temp64 *= rate - divfi; 164 temp64 = parent_rate * divfi;
165 temp64 = rate - temp64;
165 temp64 *= PLL_FRAC_DENOM; 166 temp64 *= PLL_FRAC_DENOM;
166 do_div(temp64, parent_rate); 167 do_div(temp64, parent_rate);
167 divff = temp64; 168 divff = temp64;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 61fefc046ec5..d083b860f083 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,7 +53,6 @@
53#define APMU_DISP1 0x110 53#define APMU_DISP1 0x110
54#define APMU_CCIC0 0x50 54#define APMU_CCIC0 0x50
55#define APMU_CCIC1 0xf4 55#define APMU_CCIC1 0xf4
56#define APMU_SP 0x68
57#define MPMU_UART_PLL 0x14 56#define MPMU_UART_PLL 0x14
58 57
59struct mmp2_clk_unit { 58struct mmp2_clk_unit {
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
210 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), 209 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
211}; 210};
212 211
213static DEFINE_SPINLOCK(sp_lock);
214
215static struct mmp_param_mux_clk apmu_mux_clks[] = { 212static struct mmp_param_mux_clk apmu_mux_clks[] = {
216 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, 213 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
217 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, 214 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
242 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, 239 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
243 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, 240 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
244 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, 241 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
245 {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
246}; 242};
247 243
248static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) 244static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index c782e62dd98b..58fa5c247af1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
115 "core_bi_pll_test_se", 115 "core_bi_pll_test_se",
116}; 116};
117 117
118static const char * const gcc_parent_names_7[] = { 118static const char * const gcc_parent_names_7_ao[] = {
119 "bi_tcxo", 119 "bi_tcxo_ao",
120 "gpll0", 120 "gpll0",
121 "gpll0_out_even", 121 "gpll0_out_even",
122 "core_bi_pll_test_se", 122 "core_bi_pll_test_se",
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
128 "core_bi_pll_test_se", 128 "core_bi_pll_test_se",
129}; 129};
130 130
131static const char * const gcc_parent_names_8_ao[] = {
132 "bi_tcxo_ao",
133 "gpll0",
134 "core_bi_pll_test_se",
135};
136
131static const struct parent_map gcc_parent_map_10[] = { 137static const struct parent_map gcc_parent_map_10[] = {
132 { P_BI_TCXO, 0 }, 138 { P_BI_TCXO, 0 },
133 { P_GPLL0_OUT_MAIN, 1 }, 139 { P_GPLL0_OUT_MAIN, 1 },
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
210 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, 216 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
211 .clkr.hw.init = &(struct clk_init_data){ 217 .clkr.hw.init = &(struct clk_init_data){
212 .name = "gcc_cpuss_ahb_clk_src", 218 .name = "gcc_cpuss_ahb_clk_src",
213 .parent_names = gcc_parent_names_7, 219 .parent_names = gcc_parent_names_7_ao,
214 .num_parents = 4, 220 .num_parents = 4,
215 .ops = &clk_rcg2_ops, 221 .ops = &clk_rcg2_ops,
216 }, 222 },
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
229 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, 235 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
230 .clkr.hw.init = &(struct clk_init_data){ 236 .clkr.hw.init = &(struct clk_init_data){
231 .name = "gcc_cpuss_rbcpr_clk_src", 237 .name = "gcc_cpuss_rbcpr_clk_src",
232 .parent_names = gcc_parent_names_8, 238 .parent_names = gcc_parent_names_8_ao,
233 .num_parents = 3, 239 .num_parents = 3,
234 .ops = &clk_rcg2_ops, 240 .ops = &clk_rcg2_ops,
235 }, 241 },
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 8d77090ad94a..0241450f3eb3 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
403 num_dividers = i; 403 num_dividers = i;
404 404
405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); 405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
406 if (!tmp) 406 if (!tmp) {
407 *table = ERR_PTR(-ENOMEM);
407 return -ENOMEM; 408 return -ENOMEM;
409 }
408 410
409 valid_div = 0; 411 valid_div = 0;
410 *width = 0; 412 *width = 0;
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
439{ 441{
440 struct clk_omap_divider *div; 442 struct clk_omap_divider *div;
441 struct clk_omap_reg *reg; 443 struct clk_omap_reg *reg;
444 int ret;
442 445
443 if (!setup) 446 if (!setup)
444 return NULL; 447 return NULL;
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
458 div->flags |= CLK_DIVIDER_POWER_OF_TWO; 461 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
459 462
460 div->table = _get_div_table_from_setup(setup, &div->width); 463 div->table = _get_div_table_from_setup(setup, &div->width);
464 if (IS_ERR(div->table)) {
465 ret = PTR_ERR(div->table);
466 kfree(div);
467 return ERR_PTR(ret);
468 }
469
461 470
462 div->shift = setup->bit_shift; 471 div->shift = setup->bit_shift;
463 div->latch = -EINVAL; 472 div->latch = -EINVAL;
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index b17d153e724f..23a1b27579a5 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
21 local_irq_enable(); 21 local_irq_enable();
22 if (!current_set_polling_and_test()) { 22 if (!current_set_polling_and_test()) {
23 unsigned int loop_count = 0; 23 unsigned int loop_count = 0;
24 u64 limit = TICK_USEC; 24 u64 limit = TICK_NSEC;
25 int i; 25 int i;
26 26
27 for (i = 1; i < drv->state_count; i++) { 27 for (i = 1; i < drv->state_count; i++) {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index fe070d75c842..4c97478d44bd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
537 struct nitrox_device *ndev = cmdq->ndev; 537 struct nitrox_device *ndev = cmdq->ndev;
538 struct nitrox_softreq *sr; 538 struct nitrox_softreq *sr;
539 int req_completed = 0, err = 0, budget; 539 int req_completed = 0, err = 0, budget;
540 completion_t callback;
541 void *cb_arg;
540 542
541 /* check all pending requests */ 543 /* check all pending requests */
542 budget = atomic_read(&cmdq->pending_count); 544 budget = atomic_read(&cmdq->pending_count);
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
564 smp_mb__after_atomic(); 566 smp_mb__after_atomic();
565 /* remove from response list */ 567 /* remove from response list */
566 response_list_del(sr, cmdq); 568 response_list_del(sr, cmdq);
567
568 /* ORH error code */ 569 /* ORH error code */
569 err = READ_ONCE(*sr->resp.orh) & 0xff; 570 err = READ_ONCE(*sr->resp.orh) & 0xff;
570 571 callback = sr->callback;
571 if (sr->callback) 572 cb_arg = sr->cb_arg;
572 sr->callback(sr->cb_arg, err);
573 softreq_destroy(sr); 573 softreq_destroy(sr);
574 if (callback)
575 callback(cb_arg, err);
574 576
575 req_completed++; 577 req_completed++;
576 } 578 }
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 23ea1ed409d1..352bd2473162 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -37,8 +37,9 @@ extern u64 efi_system_table;
37static struct ptdump_info efi_ptdump_info = { 37static struct ptdump_info efi_ptdump_info = {
38 .mm = &efi_mm, 38 .mm = &efi_mm,
39 .markers = (struct addr_marker[]){ 39 .markers = (struct addr_marker[]){
40 { 0, "UEFI runtime start" }, 40 { 0, "UEFI runtime start" },
41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } 41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
42 { -1, NULL }
42 }, 43 },
43 .base_addr = 0, 44 .base_addr = 0,
44}; 45};
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index a1a09e04fab8..13851b3d1c56 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -508,14 +508,11 @@ static int __init s10_init(void)
508 return -ENODEV; 508 return -ENODEV;
509 509
510 np = of_find_matching_node(fw_np, s10_of_match); 510 np = of_find_matching_node(fw_np, s10_of_match);
511 if (!np) { 511 if (!np)
512 of_node_put(fw_np);
513 return -ENODEV; 512 return -ENODEV;
514 }
515 513
516 of_node_put(np); 514 of_node_put(np);
517 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); 515 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
518 of_node_put(fw_np);
519 if (ret) 516 if (ret)
520 return ret; 517 return ret;
521 518
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f1314248..7f9e0304b510 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, 66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
67 unsigned int nr, int value) 67 unsigned int nr, int value)
68{ 68{
69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) 69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
70 altr_a10sr_gpio_set(gc, nr, value);
70 return 0; 71 return 0;
72 }
71 return -EINVAL; 73 return -EINVAL;
72} 74}
73 75
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a7bc69..e41223c05f6e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
180 180
181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) 181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
182{ 182{
183 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); 183 struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
184
185 switch (sprd_eic->type) {
186 case SPRD_EIC_DEBOUNCE:
187 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
188 case SPRD_EIC_ASYNC:
189 return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
190 case SPRD_EIC_SYNC:
191 return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
192 default:
193 return -ENOTSUPP;
194 }
184} 195}
185 196
186static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) 197static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
368 irq_set_handler_locked(data, handle_edge_irq); 379 irq_set_handler_locked(data, handle_edge_irq);
369 break; 380 break;
370 case IRQ_TYPE_EDGE_BOTH: 381 case IRQ_TYPE_EDGE_BOTH:
382 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
371 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); 383 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
372 irq_set_handler_locked(data, handle_edge_irq); 384 irq_set_handler_locked(data, handle_edge_irq);
373 break; 385 break;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dda25a2..68a35b65925a 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
84 */ 84 */
85struct pcf857x { 85struct pcf857x {
86 struct gpio_chip chip; 86 struct gpio_chip chip;
87 struct irq_chip irqchip;
87 struct i2c_client *client; 88 struct i2c_client *client;
88 struct mutex lock; /* protect 'out' */ 89 struct mutex lock; /* protect 'out' */
89 unsigned out; /* software latch */ 90 unsigned out; /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
252 mutex_unlock(&gpio->lock); 253 mutex_unlock(&gpio->lock);
253} 254}
254 255
255static struct irq_chip pcf857x_irq_chip = {
256 .name = "pcf857x",
257 .irq_enable = pcf857x_irq_enable,
258 .irq_disable = pcf857x_irq_disable,
259 .irq_ack = noop,
260 .irq_mask = noop,
261 .irq_unmask = noop,
262 .irq_set_wake = pcf857x_irq_set_wake,
263 .irq_bus_lock = pcf857x_irq_bus_lock,
264 .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
265};
266
267/*-------------------------------------------------------------------------*/ 256/*-------------------------------------------------------------------------*/
268 257
269static int pcf857x_probe(struct i2c_client *client, 258static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
376 365
377 /* Enable irqchip if we have an interrupt */ 366 /* Enable irqchip if we have an interrupt */
378 if (client->irq) { 367 if (client->irq) {
368 gpio->irqchip.name = "pcf857x",
369 gpio->irqchip.irq_enable = pcf857x_irq_enable,
370 gpio->irqchip.irq_disable = pcf857x_irq_disable,
371 gpio->irqchip.irq_ack = noop,
372 gpio->irqchip.irq_mask = noop,
373 gpio->irqchip.irq_unmask = noop,
374 gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
375 gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
376 gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
379 status = gpiochip_irqchip_add_nested(&gpio->chip, 377 status = gpiochip_irqchip_add_nested(&gpio->chip,
380 &pcf857x_irq_chip, 378 &gpio->irqchip,
381 0, handle_level_irq, 379 0, handle_level_irq,
382 IRQ_TYPE_NONE); 380 IRQ_TYPE_NONE);
383 if (status) { 381 if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
392 if (status) 390 if (status)
393 goto fail; 391 goto fail;
394 392
395 gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, 393 gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
396 client->irq); 394 client->irq);
397 gpio->irq_parent = client->irq; 395 gpio->irq_parent = client->irq;
398 } 396 }
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 1b79ebcfce3e..541fa6ac399d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
253 struct vf610_gpio_port *port; 253 struct vf610_gpio_port *port;
254 struct resource *iores; 254 struct resource *iores;
255 struct gpio_chip *gc; 255 struct gpio_chip *gc;
256 int i;
256 int ret; 257 int ret;
257 258
258 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); 259 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
319 if (ret < 0) 320 if (ret < 0)
320 return ret; 321 return ret;
321 322
323 /* Mask all GPIO interrupts */
324 for (i = 0; i < gc->ngpio; i++)
325 vf610_gpio_writel(0, port->base + PORT_PCR(i));
326
322 /* Clear the interrupt status register for all GPIO's */ 327 /* Clear the interrupt status register for all GPIO's */
323 vf610_gpio_writel(~0, port->base + PORT_ISFR); 328 vf610_gpio_writel(~0, port->base + PORT_ISFR);
324 329
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1651d7f0a303..d1adfdf50fb3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
828 /* Do not leak kernel stack to userspace */ 828 /* Do not leak kernel stack to userspace */
829 memset(&ge, 0, sizeof(ge)); 829 memset(&ge, 0, sizeof(ge));
830 830
831 ge.timestamp = le->timestamp; 831 /*
832 * We may be running from a nested threaded interrupt in which case
833 * we didn't get the timestamp from lineevent_irq_handler().
834 */
835 if (!le->timestamp)
836 ge.timestamp = ktime_get_real_ns();
837 else
838 ge.timestamp = le->timestamp;
832 839
833 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 840 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
834 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 841 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 6896dec97fc7..0ed41a9d2d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1686 effective_mode &= ~S_IWUSR; 1686 effective_mode &= ~S_IWUSR;
1687 1687
1688 if ((adev->flags & AMD_IS_APU) && 1688 if ((adev->flags & AMD_IS_APU) &&
1689 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1689 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 1691 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1691 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 1692 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1692 return 0; 1693 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 71913a18d142..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,6 +38,7 @@
38#include "amdgpu_gem.h" 38#include "amdgpu_gem.h"
39#include <drm/amdgpu_drm.h> 39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h> 40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
41 42
42/** 43/**
43 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table 44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -187,6 +188,48 @@ error:
187 return ERR_PTR(ret); 188 return ERR_PTR(ret);
188} 189}
189 190
191static int
192__reservation_object_make_exclusive(struct reservation_object *obj)
193{
194 struct dma_fence **fences;
195 unsigned int count;
196 int r;
197
198 if (!reservation_object_get_list(obj)) /* no shared fences to convert */
199 return 0;
200
201 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
202 if (r)
203 return r;
204
205 if (count == 0) {
206 /* Now that was unexpected. */
207 } else if (count == 1) {
208 reservation_object_add_excl_fence(obj, fences[0]);
209 dma_fence_put(fences[0]);
210 kfree(fences);
211 } else {
212 struct dma_fence_array *array;
213
214 array = dma_fence_array_create(count, fences,
215 dma_fence_context_alloc(1), 0,
216 false);
217 if (!array)
218 goto err_fences_put;
219
220 reservation_object_add_excl_fence(obj, &array->base);
221 dma_fence_put(&array->base);
222 }
223
224 return 0;
225
226err_fences_put:
227 while (count--)
228 dma_fence_put(fences[count]);
229 kfree(fences);
230 return -ENOMEM;
231}
232
190/** 233/**
191 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 234 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
192 * @dma_buf: Shared DMA buffer 235 * @dma_buf: Shared DMA buffer
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
218 261
219 if (attach->dev->driver != adev->dev->driver) { 262 if (attach->dev->driver != adev->dev->driver) {
220 /* 263 /*
221 * Wait for all shared fences to complete before we switch to future 264 * We only create shared fences for internal use, but importers
222 * use of exclusive fence on this prime shared bo. 265 * of the dmabuf rely on exclusive fences for implicitly
266 * tracking write hazards. As any of the current fences may
267 * correspond to a write, we need to convert all existing
268 * fences on the reservation object into a single exclusive
269 * fence.
223 */ 270 */
224 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 271 r = __reservation_object_make_exclusive(bo->tbo.resv);
225 true, false, 272 if (r)
226 MAX_SCHEDULE_TIMEOUT);
227 if (unlikely(r < 0)) {
228 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
229 goto error_unreserve; 273 goto error_unreserve;
230 }
231 } 274 }
232 275
233 /* pin buffer into GTT */ 276 /* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d2ea5ce2cefb..7c108e687683 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3363 struct amdgpu_task_info *task_info) 3363 struct amdgpu_task_info *task_info)
3364{ 3364{
3365 struct amdgpu_vm *vm; 3365 struct amdgpu_vm *vm;
3366 unsigned long flags;
3366 3367
3367 spin_lock(&adev->vm_manager.pasid_lock); 3368 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3368 3369
3369 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3370 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3370 if (vm) 3371 if (vm)
3371 *task_info = vm->task_info; 3372 *task_info = vm->task_info;
3372 3373
3373 spin_unlock(&adev->vm_manager.pasid_lock); 3374 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3374} 3375}
3375 3376
3376/** 3377/**
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4cd31a276dcd..186db182f924 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
94 bool enable) 94 bool enable)
95{ 95{
96 u32 tmp = 0;
96 97
98 if (enable) {
99 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
100 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
101 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
102
103 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
104 lower_32_bits(adev->doorbell.base));
105 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
106 upper_32_bits(adev->doorbell.base));
107 }
108
109 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
97} 110}
98 111
99static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, 112static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8849b74078d6..9b639974c70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
729 case CHIP_RAVEN: 729 case CHIP_RAVEN:
730 adev->asic_funcs = &soc15_asic_funcs; 730 adev->asic_funcs = &soc15_asic_funcs;
731 if (adev->rev_id >= 0x8) 731 if (adev->rev_id >= 0x8)
732 adev->external_rev_id = adev->rev_id + 0x81; 732 adev->external_rev_id = adev->rev_id + 0x79;
733 else if (adev->pdev->device == 0x15d8) 733 else if (adev->pdev->device == 0x15d8)
734 adev->external_rev_id = adev->rev_id + 0x41; 734 adev->external_rev_id = adev->rev_id + 0x41;
735 else if (adev->rev_id == 1)
736 adev->external_rev_id = adev->rev_id + 0x20;
735 else 737 else
736 adev->external_rev_id = 0x1; 738 adev->external_rev_id = adev->rev_id + 0x01;
737 739
738 if (adev->rev_id >= 0x8) { 740 if (adev->rev_id >= 0x8) {
739 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 741 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 5d85ff341385..2e7c44955f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
863 return 0; 863 return 0;
864} 864}
865 865
866#if CONFIG_X86_64 866#ifdef CONFIG_X86_64
867static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 867static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
868 uint32_t *num_entries, 868 uint32_t *num_entries,
869 struct crat_subtype_iolink *sub_type_hdr) 869 struct crat_subtype_iolink *sub_type_hdr)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f4fa40c387d3..0b392bfca284 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4082 } 4082 }
4083 4083
4084 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 4084 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4085 connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 4085 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4086 connector_type == DRM_MODE_CONNECTOR_eDP) {
4086 drm_connector_attach_vrr_capable_property( 4087 drm_connector_attach_vrr_capable_property(
4087 &aconnector->base); 4088 &aconnector->base);
4088 } 4089 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index afd287f08bc9..19801bdba0d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
591 dc, 591 dc,
592 context->bw.dce.sclk_khz); 592 context->bw.dce.sclk_khz);
593 593
594 pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; 594 /*
595 * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
596 * This is not required for less than 5 displays,
597 * thus don't request decfclk in dc to avoid impact
598 * on power saving.
599 *
600 */
601 pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
602 pp_display_cfg->min_engine_clock_khz : 0;
595 603
596 pp_display_cfg->min_engine_clock_deep_sleep_khz 604 pp_display_cfg->min_engine_clock_deep_sleep_khz
597 = context->bw.dce.sclk_deep_sleep_khz; 605 = context->bw.dce.sclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index f95c5f50eb0f..5273de3c5b98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1033 break; 1033 break;
1034 case amd_pp_dpp_clock: 1034 case amd_pp_dpp_clock:
1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk; 1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1036 break;
1036 default: 1037 default:
1037 return -EINVAL; 1038 return -EINVAL;
1038 } 1039 }
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 24a750436559..f91e02c87fd8 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
758 if (mode->hsync) 758 if (mode->hsync)
759 return mode->hsync; 759 return mode->hsync;
760 760
761 if (mode->htotal < 0) 761 if (mode->htotal <= 0)
762 return 0; 762 return 0;
763 763
764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ 764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f3e1d6a0b7dd..4079050f9d6c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1086,7 +1086,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
1086 return DDI_CLK_SEL_TBT_810; 1086 return DDI_CLK_SEL_TBT_810;
1087 default: 1087 default:
1088 MISSING_CASE(clock); 1088 MISSING_CASE(clock);
1089 break; 1089 return DDI_CLK_SEL_NONE;
1090 } 1090 }
1091 case DPLL_ID_ICL_MGPLL1: 1091 case DPLL_ID_ICL_MGPLL1:
1092 case DPLL_ID_ICL_MGPLL2: 1092 case DPLL_ID_ICL_MGPLL2:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3da9c0f9e948..248128126422 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15415 } 15415 }
15416} 15416}
15417 15417
15418static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15419{
15420 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15421
15422 /*
15423 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15424 * the hardware when a high res displays plugged in. DPLL P
15425 * divider is zero, and the pipe timings are bonkers. We'll
15426 * try to disable everything in that case.
15427 *
15428 * FIXME would be nice to be able to sanitize this state
15429 * without several WARNs, but for now let's take the easy
15430 * road.
15431 */
15432 return IS_GEN6(dev_priv) &&
15433 crtc_state->base.active &&
15434 crtc_state->shared_dpll &&
15435 crtc_state->port_clock == 0;
15436}
15437
15418static void intel_sanitize_encoder(struct intel_encoder *encoder) 15438static void intel_sanitize_encoder(struct intel_encoder *encoder)
15419{ 15439{
15420 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 15440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15421 struct intel_connector *connector; 15441 struct intel_connector *connector;
15442 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15443 struct intel_crtc_state *crtc_state = crtc ?
15444 to_intel_crtc_state(crtc->base.state) : NULL;
15422 15445
15423 /* We need to check both for a crtc link (meaning that the 15446 /* We need to check both for a crtc link (meaning that the
15424 * encoder is active and trying to read from a pipe) and the 15447 * encoder is active and trying to read from a pipe) and the
15425 * pipe itself being active. */ 15448 * pipe itself being active. */
15426 bool has_active_crtc = encoder->base.crtc && 15449 bool has_active_crtc = crtc_state &&
15427 to_intel_crtc(encoder->base.crtc)->active; 15450 crtc_state->base.active;
15451
15452 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15453 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15454 pipe_name(crtc->pipe));
15455 has_active_crtc = false;
15456 }
15428 15457
15429 connector = intel_encoder_find_connector(encoder); 15458 connector = intel_encoder_find_connector(encoder);
15430 if (connector && !has_active_crtc) { 15459 if (connector && !has_active_crtc) {
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15435 /* Connector is active, but has no active pipe. This is 15464 /* Connector is active, but has no active pipe. This is
15436 * fallout from our resume register restoring. Disable 15465 * fallout from our resume register restoring. Disable
15437 * the encoder manually again. */ 15466 * the encoder manually again. */
15438 if (encoder->base.crtc) { 15467 if (crtc_state) {
15439 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15468 struct drm_encoder *best_encoder;
15440 15469
15441 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15470 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15442 encoder->base.base.id, 15471 encoder->base.base.id,
15443 encoder->base.name); 15472 encoder->base.name);
15473
15474 /* avoid oopsing in case the hooks consult best_encoder */
15475 best_encoder = connector->base.state->best_encoder;
15476 connector->base.state->best_encoder = &encoder->base;
15477
15444 if (encoder->disable) 15478 if (encoder->disable)
15445 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15479 encoder->disable(encoder, crtc_state,
15480 connector->base.state);
15446 if (encoder->post_disable) 15481 if (encoder->post_disable)
15447 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15482 encoder->post_disable(encoder, crtc_state,
15483 connector->base.state);
15484
15485 connector->base.state->best_encoder = best_encoder;
15448 } 15486 }
15449 encoder->base.crtc = NULL; 15487 encoder->base.crtc = NULL;
15450 15488
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d2e003d8f3db..5170a0f5fe7b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
494 494
495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); 495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
496 496
497 keymsk = key->channel_mask & 0x3ffffff; 497 keymsk = key->channel_mask & 0x7ffffff;
498 if (alpha < 0xff) 498 if (alpha < 0xff)
499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; 499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
500 500
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 00a9c2ab9e6c..64fb788b6647 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
1406 1406
1407static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) 1407static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1408{ 1408{
1409 struct dsi_data *dsi = p; 1409 struct dsi_data *dsi = s->private;
1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1411 enum dss_clk_source dispc_clk_src, dsi_clk_src; 1411 enum dss_clk_source dispc_clk_src, dsi_clk_src;
1412 int dsi_module = dsi->module_id; 1412 int dsi_module = dsi->module_id;
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1468static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) 1468static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1469{ 1469{
1470 struct dsi_data *dsi = p; 1470 struct dsi_data *dsi = s->private;
1471 unsigned long flags; 1471 unsigned long flags;
1472 struct dsi_irq_stats stats; 1472 struct dsi_irq_stats stats;
1473 1473
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1558 1558
1559static int dsi_dump_dsi_regs(struct seq_file *s, void *p) 1559static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
1560{ 1560{
1561 struct dsi_data *dsi = p; 1561 struct dsi_data *dsi = s->private;
1562 1562
1563 if (dsi_runtime_get(dsi)) 1563 if (dsi_runtime_get(dsi))
1564 return 0; 1564 return 0;
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; 4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; 4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; 4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
4754 /*
4755 * HACK: These flags should be handled through the omap_dss_device bus
4756 * flags, but this will only be possible when the DSI encoder will be
4757 * converted to the omapdrm-managed encoder model.
4758 */
4759 dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
4760 dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
4761 dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
4762 dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
4763 dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
4764 dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
4754 4765
4755 dss_mgr_set_timings(&dsi->output, &dsi->vm); 4766 dss_mgr_set_timings(&dsi->output, &dsi->vm);
4756 4767
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
5083 5094
5084 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); 5095 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
5085 dsi->debugfs.regs = dss_debugfs_create_file(dss, name, 5096 dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
5086 dsi_dump_dsi_regs, &dsi); 5097 dsi_dump_dsi_regs, dsi);
5087#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 5098#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5088 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); 5099 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
5089 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, 5100 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
5090 dsi_dump_dsi_irqs, &dsi); 5101 dsi_dump_dsi_irqs, dsi);
5091#endif 5102#endif
5092 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); 5103 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
5093 dsi->debugfs.clks = dss_debugfs_create_file(dss, name, 5104 dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
5094 dsi_dump_dsi_clocks, &dsi); 5105 dsi_dump_dsi_clocks, dsi);
5095 5106
5096 return 0; 5107 return 0;
5097} 5108}
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
5104 dss_debugfs_remove_file(dsi->debugfs.irqs); 5115 dss_debugfs_remove_file(dsi->debugfs.irqs);
5105 dss_debugfs_remove_file(dsi->debugfs.regs); 5116 dss_debugfs_remove_file(dsi->debugfs.regs);
5106 5117
5107 of_platform_depopulate(dev);
5108
5109 WARN_ON(dsi->scp_clk_refcount > 0); 5118 WARN_ON(dsi->scp_clk_refcount > 0);
5110 5119
5111 dss_pll_unregister(&dsi->pll); 5120 dss_pll_unregister(&dsi->pll);
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
5457 5466
5458 dsi_uninit_output(dsi); 5467 dsi_uninit_output(dsi);
5459 5468
5469 of_platform_depopulate(&pdev->dev);
5470
5460 pm_runtime_disable(&pdev->dev); 5471 pm_runtime_disable(&pdev->dev);
5461 5472
5462 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { 5473 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779a80b4..a97294ac96d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5676 u16 data_offset, size; 5676 u16 data_offset, size;
5677 u8 frev, crev; 5677 u8 frev, crev;
5678 struct ci_power_info *pi; 5678 struct ci_power_info *pi;
5679 enum pci_bus_speed speed_cap; 5679 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5680 struct pci_dev *root = rdev->pdev->bus->self; 5680 struct pci_dev *root = rdev->pdev->bus->self;
5681 int ret; 5681 int ret;
5682 5682
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
5685 return -ENOMEM; 5685 return -ENOMEM;
5686 rdev->pm.dpm.priv = pi; 5686 rdev->pm.dpm.priv = pi;
5687 5687
5688 speed_cap = pcie_get_speed_cap(root); 5688 if (!pci_is_root_bus(rdev->pdev->bus))
5689 speed_cap = pcie_get_speed_cap(root);
5689 if (speed_cap == PCI_SPEED_UNKNOWN) { 5690 if (speed_cap == PCI_SPEED_UNKNOWN) {
5690 pi->sys_pcie_mask = 0; 5691 pi->sys_pcie_mask = 0;
5691 } else { 5692 } else {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3af015..0a785ef0ab66 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
6899 struct ni_power_info *ni_pi; 6899 struct ni_power_info *ni_pi;
6900 struct si_power_info *si_pi; 6900 struct si_power_info *si_pi;
6901 struct atom_clock_dividers dividers; 6901 struct atom_clock_dividers dividers;
6902 enum pci_bus_speed speed_cap; 6902 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
6903 struct pci_dev *root = rdev->pdev->bus->self; 6903 struct pci_dev *root = rdev->pdev->bus->self;
6904 int ret; 6904 int ret;
6905 6905
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
6911 eg_pi = &ni_pi->eg; 6911 eg_pi = &ni_pi->eg;
6912 pi = &eg_pi->rv7xx; 6912 pi = &eg_pi->rv7xx;
6913 6913
6914 speed_cap = pcie_get_speed_cap(root); 6914 if (!pci_is_root_bus(rdev->pdev->bus))
6915 speed_cap = pcie_get_speed_cap(root);
6915 if (speed_cap == PCI_SPEED_UNKNOWN) { 6916 if (speed_cap == PCI_SPEED_UNKNOWN) {
6916 si_pi->sys_pcie_mask = 0; 6917 si_pi->sys_pcie_mask = 0;
6917 } else { 6918 } else {
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 37f93022a106..c0351abf83a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,17 +1,8 @@
1//SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: 4 * Author:
5 * Sandy Huang <hjc@rock-chips.com> 5 * Sandy Huang <hjc@rock-chips.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 6 */
16 7
17#include <drm/drmP.h> 8#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 38b52e63b2b0..27b9635124bc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,17 +1,8 @@
1//SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: 4 * Author:
5 * Sandy Huang <hjc@rock-chips.com> 5 * Sandy Huang <hjc@rock-chips.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 6 */
16 7
17#ifdef CONFIG_ROCKCHIP_RGB 8#ifdef CONFIG_ROCKCHIP_RGB
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 0420f5c978b9..cf45d0f940f9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
761 return PTR_ERR(tcon->sclk0); 761 return PTR_ERR(tcon->sclk0);
762 } 762 }
763 } 763 }
764 clk_prepare_enable(tcon->sclk0);
764 765
765 if (tcon->quirks->has_channel_1) { 766 if (tcon->quirks->has_channel_1) {
766 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); 767 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
775 776
776static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) 777static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
777{ 778{
779 clk_disable_unprepare(tcon->sclk0);
778 clk_disable_unprepare(tcon->clk); 780 clk_disable_unprepare(tcon->clk);
779} 781}
780 782
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25afb1d594e3..7ef5dcb06104 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -26,6 +26,7 @@
26 **************************************************************************/ 26 **************************************************************************/
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/dma-mapping.h>
29 30
30#include <drm/drmP.h> 31#include <drm/drmP.h>
31#include "vmwgfx_drv.h" 32#include "vmwgfx_drv.h"
@@ -34,7 +35,6 @@
34#include <drm/ttm/ttm_placement.h> 35#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_bo_driver.h> 36#include <drm/ttm/ttm_bo_driver.h>
36#include <drm/ttm/ttm_module.h> 37#include <drm/ttm/ttm_module.h>
37#include <linux/intel-iommu.h>
38 38
39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40#define VMWGFX_CHIP_SVGAII 0 40#define VMWGFX_CHIP_SVGAII 0
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
546} 546}
547 547
548/** 548/**
549 * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
550 * taking place.
551 * @dev: Pointer to the struct drm_device.
552 *
553 * Return: true if iommu present, false otherwise.
554 */
555static bool vmw_assume_iommu(struct drm_device *dev)
556{
557 const struct dma_map_ops *ops = get_dma_ops(dev->dev);
558
559 return !dma_is_direct(ops) && ops &&
560 ops->map_page != dma_direct_map_page;
561}
562
563/**
549 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 564 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
550 * system. 565 * system.
551 * 566 *
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
565 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 580 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
566 [vmw_dma_map_populate] = "Keeping DMA mappings.", 581 [vmw_dma_map_populate] = "Keeping DMA mappings.",
567 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 582 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
568#ifdef CONFIG_X86
569 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
570 583
571#ifdef CONFIG_INTEL_IOMMU 584 if (vmw_force_coherent)
572 if (intel_iommu_enabled) { 585 dev_priv->map_mode = vmw_dma_alloc_coherent;
586 else if (vmw_assume_iommu(dev_priv->dev))
573 dev_priv->map_mode = vmw_dma_map_populate; 587 dev_priv->map_mode = vmw_dma_map_populate;
574 goto out_fixup; 588 else if (!vmw_force_iommu)
575 }
576#endif
577
578 if (!(vmw_force_iommu || vmw_force_coherent)) {
579 dev_priv->map_mode = vmw_dma_phys; 589 dev_priv->map_mode = vmw_dma_phys;
580 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 590 else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
581 return 0;
582 }
583
584 dev_priv->map_mode = vmw_dma_map_populate;
585
586 if (dma_ops && dma_ops->sync_single_for_cpu)
587 dev_priv->map_mode = vmw_dma_alloc_coherent; 591 dev_priv->map_mode = vmw_dma_alloc_coherent;
588#ifdef CONFIG_SWIOTLB 592 else
589 if (swiotlb_nr_tbl() == 0)
590 dev_priv->map_mode = vmw_dma_map_populate; 593 dev_priv->map_mode = vmw_dma_map_populate;
591#endif
592 594
593#ifdef CONFIG_INTEL_IOMMU 595 if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
594out_fixup:
595#endif
596 if (dev_priv->map_mode == vmw_dma_map_populate &&
597 vmw_restrict_iommu)
598 dev_priv->map_mode = vmw_dma_map_bind; 596 dev_priv->map_mode = vmw_dma_map_bind;
599 597
600 if (vmw_force_coherent) 598 /* No TTM coherent page pool? FIXME: Ask TTM instead! */
601 dev_priv->map_mode = vmw_dma_alloc_coherent; 599 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
602 600 (dev_priv->map_mode == vmw_dma_alloc_coherent))
603#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
604 /*
605 * No coherent page pool
606 */
607 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
608 return -EINVAL; 601 return -EINVAL;
609#endif
610
611#else /* CONFIG_X86 */
612 dev_priv->map_mode = vmw_dma_map_populate;
613#endif /* CONFIG_X86 */
614 602
615 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 603 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
616
617 return 0; 604 return 0;
618} 605}
619 606
@@ -625,24 +612,20 @@ out_fixup:
625 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 612 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
626 * restriction also for 64-bit systems. 613 * restriction also for 64-bit systems.
627 */ 614 */
628#ifdef CONFIG_INTEL_IOMMU
629static int vmw_dma_masks(struct vmw_private *dev_priv) 615static int vmw_dma_masks(struct vmw_private *dev_priv)
630{ 616{
631 struct drm_device *dev = dev_priv->dev; 617 struct drm_device *dev = dev_priv->dev;
618 int ret = 0;
632 619
633 if (intel_iommu_enabled && 620 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
621 if (dev_priv->map_mode != vmw_dma_phys &&
634 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 622 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
635 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 623 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
636 return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 624 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
637 } 625 }
638 return 0; 626
639} 627 return ret;
640#else
641static int vmw_dma_masks(struct vmw_private *dev_priv)
642{
643 return 0;
644} 628}
645#endif
646 629
647static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 630static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
648{ 631{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f2d13a72c05d..88b8178d4687 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3570 *p_fence = NULL; 3570 *p_fence = NULL;
3571 } 3571 }
3572 3572
3573 return 0; 3573 return ret;
3574} 3574}
3575 3575
3576/** 3576/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b351fb5214d3..ed2f67822f45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
1646 struct drm_connector_state *conn_state; 1646 struct drm_connector_state *conn_state;
1647 struct vmw_connector_state *vmw_conn_state; 1647 struct vmw_connector_state *vmw_conn_state;
1648 1648
1649 if (!du->pref_active) { 1649 if (!du->pref_active && new_crtc_state->enable) {
1650 ret = -EINVAL; 1650 ret = -EINVAL;
1651 goto clean; 1651 goto clean;
1652 } 1652 }
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2554 user_fence_rep) 2554 user_fence_rep)
2555{ 2555{
2556 struct vmw_fence_obj *fence = NULL; 2556 struct vmw_fence_obj *fence = NULL;
2557 uint32_t handle; 2557 uint32_t handle = 0;
2558 int ret; 2558 int ret = 0;
2559 2559
2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2561 out_fence) 2561 out_fence)
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c530476edba6..ac9fda1b5a72 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/debugfs.h> 31#include <linux/debugfs.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/kfifo.h>
33#include <linux/sched/signal.h> 34#include <linux/sched/signal.h>
34#include <linux/export.h> 35#include <linux/export.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
661/* enqueue string to 'events' ring buffer */ 662/* enqueue string to 'events' ring buffer */
662void hid_debug_event(struct hid_device *hdev, char *buf) 663void hid_debug_event(struct hid_device *hdev, char *buf)
663{ 664{
664 unsigned i;
665 struct hid_debug_list *list; 665 struct hid_debug_list *list;
666 unsigned long flags; 666 unsigned long flags;
667 667
668 spin_lock_irqsave(&hdev->debug_list_lock, flags); 668 spin_lock_irqsave(&hdev->debug_list_lock, flags);
669 list_for_each_entry(list, &hdev->debug_list, node) { 669 list_for_each_entry(list, &hdev->debug_list, node)
670 for (i = 0; buf[i]; i++) 670 kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
671 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
672 buf[i];
673 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
674 }
675 spin_unlock_irqrestore(&hdev->debug_list_lock, flags); 671 spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
676 672
677 wake_up_interruptible(&hdev->debug_wait); 673 wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
722 hid_debug_event(hdev, buf); 718 hid_debug_event(hdev, buf);
723 719
724 kfree(buf); 720 kfree(buf);
725 wake_up_interruptible(&hdev->debug_wait); 721 wake_up_interruptible(&hdev->debug_wait);
726
727} 722}
728EXPORT_SYMBOL_GPL(hid_dump_input); 723EXPORT_SYMBOL_GPL(hid_dump_input);
729 724
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
1083 goto out; 1078 goto out;
1084 } 1079 }
1085 1080
1086 if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { 1081 err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
1087 err = -ENOMEM; 1082 if (err) {
1088 kfree(list); 1083 kfree(list);
1089 goto out; 1084 goto out;
1090 } 1085 }
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
1104 size_t count, loff_t *ppos) 1099 size_t count, loff_t *ppos)
1105{ 1100{
1106 struct hid_debug_list *list = file->private_data; 1101 struct hid_debug_list *list = file->private_data;
1107 int ret = 0, len; 1102 int ret = 0, copied;
1108 DECLARE_WAITQUEUE(wait, current); 1103 DECLARE_WAITQUEUE(wait, current);
1109 1104
1110 mutex_lock(&list->read_mutex); 1105 mutex_lock(&list->read_mutex);
1111 while (ret == 0) { 1106 if (kfifo_is_empty(&list->hid_debug_fifo)) {
1112 if (list->head == list->tail) { 1107 add_wait_queue(&list->hdev->debug_wait, &wait);
1113 add_wait_queue(&list->hdev->debug_wait, &wait); 1108 set_current_state(TASK_INTERRUPTIBLE);
1114 set_current_state(TASK_INTERRUPTIBLE); 1109
1115 1110 while (kfifo_is_empty(&list->hid_debug_fifo)) {
1116 while (list->head == list->tail) { 1111 if (file->f_flags & O_NONBLOCK) {
1117 if (file->f_flags & O_NONBLOCK) { 1112 ret = -EAGAIN;
1118 ret = -EAGAIN; 1113 break;
1119 break; 1114 }
1120 }
1121 if (signal_pending(current)) {
1122 ret = -ERESTARTSYS;
1123 break;
1124 }
1125 1115
1126 if (!list->hdev || !list->hdev->debug) { 1116 if (signal_pending(current)) {
1127 ret = -EIO; 1117 ret = -ERESTARTSYS;
1128 set_current_state(TASK_RUNNING); 1118 break;
1129 goto out; 1119 }
1130 }
1131 1120
1132 /* allow O_NONBLOCK from other threads */ 1121 /* if list->hdev is NULL we cannot remove_wait_queue().
1133 mutex_unlock(&list->read_mutex); 1122 * if list->hdev->debug is 0 then hid_debug_unregister()
1134 schedule(); 1123 * was already called and list->hdev is being destroyed.
1135 mutex_lock(&list->read_mutex); 1124 * if we add remove_wait_queue() here we can hit a race.
1136 set_current_state(TASK_INTERRUPTIBLE); 1125 */
1126 if (!list->hdev || !list->hdev->debug) {
1127 ret = -EIO;
1128 set_current_state(TASK_RUNNING);
1129 goto out;
1137 } 1130 }
1138 1131
1139 set_current_state(TASK_RUNNING); 1132 /* allow O_NONBLOCK from other threads */
1140 remove_wait_queue(&list->hdev->debug_wait, &wait); 1133 mutex_unlock(&list->read_mutex);
1134 schedule();
1135 mutex_lock(&list->read_mutex);
1136 set_current_state(TASK_INTERRUPTIBLE);
1141 } 1137 }
1142 1138
1143 if (ret) 1139 __set_current_state(TASK_RUNNING);
1144 goto out; 1140 remove_wait_queue(&list->hdev->debug_wait, &wait);
1145 1141
1146 /* pass the ringbuffer contents to userspace */ 1142 if (ret)
1147copy_rest:
1148 if (list->tail == list->head)
1149 goto out; 1143 goto out;
1150 if (list->tail > list->head) {
1151 len = list->tail - list->head;
1152 if (len > count)
1153 len = count;
1154
1155 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1156 ret = -EFAULT;
1157 goto out;
1158 }
1159 ret += len;
1160 list->head += len;
1161 } else {
1162 len = HID_DEBUG_BUFSIZE - list->head;
1163 if (len > count)
1164 len = count;
1165
1166 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1167 ret = -EFAULT;
1168 goto out;
1169 }
1170 list->head = 0;
1171 ret += len;
1172 count -= len;
1173 if (count > 0)
1174 goto copy_rest;
1175 }
1176
1177 } 1144 }
1145
1146 /* pass the fifo content to userspace, locking is not needed with only
1147 * one concurrent reader and one concurrent writer
1148 */
1149 ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
1150 if (ret)
1151 goto out;
1152 ret = copied;
1178out: 1153out:
1179 mutex_unlock(&list->read_mutex); 1154 mutex_unlock(&list->read_mutex);
1180 return ret; 1155 return ret;
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
1185 struct hid_debug_list *list = file->private_data; 1160 struct hid_debug_list *list = file->private_data;
1186 1161
1187 poll_wait(file, &list->hdev->debug_wait, wait); 1162 poll_wait(file, &list->hdev->debug_wait, wait);
1188 if (list->head != list->tail) 1163 if (!kfifo_is_empty(&list->hid_debug_fifo))
1189 return EPOLLIN | EPOLLRDNORM; 1164 return EPOLLIN | EPOLLRDNORM;
1190 if (!list->hdev->debug) 1165 if (!list->hdev->debug)
1191 return EPOLLERR | EPOLLHUP; 1166 return EPOLLERR | EPOLLHUP;
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
1200 spin_lock_irqsave(&list->hdev->debug_list_lock, flags); 1175 spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
1201 list_del(&list->node); 1176 list_del(&list->node);
1202 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); 1177 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
1203 kfree(list->hid_debug_buf); 1178 kfifo_free(&list->hid_debug_fifo);
1204 kfree(list); 1179 kfree(list);
1205 1180
1206 return 0; 1181 return 0;
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void)
1246{ 1221{
1247 debugfs_remove_recursive(hid_debug_root); 1222 debugfs_remove_recursive(hid_debug_root);
1248} 1223}
1249
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c39f89d2deba..2dc628d4f1ae 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
1828 1828
1829 ret = i3c_master_retrieve_dev_info(newdev); 1829 ret = i3c_master_retrieve_dev_info(newdev);
1830 if (ret) 1830 if (ret)
1831 goto err_free_dev; 1831 goto err_detach_dev;
1832 1832
1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev); 1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev);
1834 if (olddev) { 1834 if (olddev) {
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index f8c00b94817f..bb03079fbade 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
419 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 419 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
420} 420}
421 421
422static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 422static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
423 struct dw_i3c_xfer *xfer) 423 struct dw_i3c_xfer *xfer)
424{ 424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&master->xferqueue.lock, flags);
428 if (master->xferqueue.cur == xfer) { 425 if (master->xferqueue.cur == xfer) {
429 u32 status; 426 u32 status;
430 427
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
439 } else { 436 } else {
440 list_del_init(&xfer->node); 437 list_del_init(&xfer->node);
441 } 438 }
439}
440
441static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
442 struct dw_i3c_xfer *xfer)
443{
444 unsigned long flags;
445
446 spin_lock_irqsave(&master->xferqueue.lock, flags);
447 dw_i3c_master_dequeue_xfer_locked(master, xfer);
442 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 448 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
443} 449}
444 450
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
494 complete(&xfer->comp); 500 complete(&xfer->comp);
495 501
496 if (ret < 0) { 502 if (ret < 0) {
497 dw_i3c_master_dequeue_xfer(master, xfer); 503 dw_i3c_master_dequeue_xfer_locked(master, xfer);
498 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 504 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
499 master->regs + DEVICE_CTRL); 505 master->regs + DEVICE_CTRL);
500 } 506 }
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
235 235
236int ide_queue_sense_rq(ide_drive_t *drive, void *special) 236int ide_queue_sense_rq(ide_drive_t *drive, void *special)
237{ 237{
238 struct request *sense_rq = drive->sense_rq; 238 ide_hwif_t *hwif = drive->hwif;
239 struct request *sense_rq;
240 unsigned long flags;
241
242 spin_lock_irqsave(&hwif->lock, flags);
239 243
240 /* deferred failure from ide_prep_sense() */ 244 /* deferred failure from ide_prep_sense() */
241 if (!drive->sense_rq_armed) { 245 if (!drive->sense_rq_armed) {
242 printk(KERN_WARNING PFX "%s: error queuing a sense request\n", 246 printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
243 drive->name); 247 drive->name);
248 spin_unlock_irqrestore(&hwif->lock, flags);
244 return -ENOMEM; 249 return -ENOMEM;
245 } 250 }
246 251
252 sense_rq = drive->sense_rq;
247 ide_req(sense_rq)->special = special; 253 ide_req(sense_rq)->special = special;
248 drive->sense_rq_armed = false; 254 drive->sense_rq_armed = false;
249 255
250 drive->hwif->rq = NULL; 256 drive->hwif->rq = NULL;
251 257
252 ide_insert_request_head(drive, sense_rq); 258 ide_insert_request_head(drive, sense_rq);
259 spin_unlock_irqrestore(&hwif->lock, flags);
253 return 0; 260 return 0;
254} 261}
255EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 262EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
68 } 68 }
69 69
70 if (!blk_update_request(rq, error, nr_bytes)) { 70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) 71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL; 72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
73 75
74 __blk_mq_end_request(rq, error); 76 __blk_mq_end_request(rq, error);
75 return 0; 77 return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
451 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 453 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
452} 454}
453 455
454/* 456blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
455 * Issue a new request to a device. 457 bool local_requeue)
456 */
457blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
458 const struct blk_mq_queue_data *bd)
459{ 458{
460 ide_drive_t *drive = hctx->queue->queuedata; 459 ide_hwif_t *hwif = drive->hwif;
461 ide_hwif_t *hwif = drive->hwif;
462 struct ide_host *host = hwif->host; 460 struct ide_host *host = hwif->host;
463 struct request *rq = bd->rq;
464 ide_startstop_t startstop; 461 ide_startstop_t startstop;
465 462
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { 463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
474 if (ide_lock_host(host, hwif)) 471 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE; 472 return BLK_STS_DEV_RESOURCE;
476 473
477 blk_mq_start_request(rq);
478
479 spin_lock_irq(&hwif->lock); 474 spin_lock_irq(&hwif->lock);
480 475
481 if (!ide_lock_port(hwif)) { 476 if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
511 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 506 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
512 507
513 /* 508 /*
514 * we know that the queue isn't empty, but this can happen
515 * if ->prep_rq() decides to kill a request
516 */
517 if (!rq) {
518 rq = bd->rq;
519 if (!rq) {
520 ide_unlock_port(hwif);
521 goto out;
522 }
523 }
524
525 /*
526 * Sanity: don't accept a request that isn't a PM request 509 * Sanity: don't accept a request that isn't a PM request
527 * if we are currently power managed. This is very important as 510 * if we are currently power managed. This is very important as
528 * blk_stop_queue() doesn't prevent the blk_fetch_request() 511 * blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
560 } 543 }
561 } else { 544 } else {
562plug_device: 545plug_device:
546 if (local_requeue)
547 list_add(&rq->queuelist, &drive->rq_list);
563 spin_unlock_irq(&hwif->lock); 548 spin_unlock_irq(&hwif->lock);
564 ide_unlock_host(host); 549 ide_unlock_host(host);
565 ide_requeue_and_plug(drive, rq); 550 if (!local_requeue)
551 ide_requeue_and_plug(drive, rq);
566 return BLK_STS_OK; 552 return BLK_STS_OK;
567 } 553 }
568 554
@@ -573,6 +559,26 @@ out:
573 return BLK_STS_OK; 559 return BLK_STS_OK;
574} 560}
575 561
562/*
563 * Issue a new request to a device.
564 */
565blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
566 const struct blk_mq_queue_data *bd)
567{
568 ide_drive_t *drive = hctx->queue->queuedata;
569 ide_hwif_t *hwif = drive->hwif;
570
571 spin_lock_irq(&hwif->lock);
572 if (drive->sense_rq_active) {
573 spin_unlock_irq(&hwif->lock);
574 return BLK_STS_DEV_RESOURCE;
575 }
576 spin_unlock_irq(&hwif->lock);
577
578 blk_mq_start_request(bd->rq);
579 return ide_issue_rq(drive, bd->rq, false);
580}
581
576static int drive_is_ready(ide_drive_t *drive) 582static int drive_is_ready(ide_drive_t *drive)
577{ 583{
578 ide_hwif_t *hwif = drive->hwif; 584 ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
893 899
894void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 900void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
895{ 901{
896 ide_hwif_t *hwif = drive->hwif; 902 drive->sense_rq_active = true;
897 unsigned long flags;
898
899 spin_lock_irqsave(&hwif->lock, flags);
900 list_add_tail(&rq->queuelist, &drive->rq_list); 903 list_add_tail(&rq->queuelist, &drive->rq_list);
901 spin_unlock_irqrestore(&hwif->lock, flags);
902
903 kblockd_schedule_work(&drive->rq_work); 904 kblockd_schedule_work(&drive->rq_work);
904} 905}
905EXPORT_SYMBOL_GPL(ide_insert_request_head); 906EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
55 scsi_req(rq)->cmd_len = 1; 55 scsi_req(rq)->cmd_len = 1;
56 ide_req(rq)->type = ATA_PRIV_MISC; 56 ide_req(rq)->type = ATA_PRIV_MISC;
57 spin_lock_irq(&hwif->lock);
57 ide_insert_request_head(drive, rq); 58 ide_insert_request_head(drive, rq);
59 spin_unlock_irq(&hwif->lock);
58 60
59out: 61out:
60 return; 62 return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); 1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
1160 ide_hwif_t *hwif = drive->hwif; 1160 ide_hwif_t *hwif = drive->hwif;
1161 struct request *rq; 1161 struct request *rq;
1162 blk_status_t ret;
1162 LIST_HEAD(list); 1163 LIST_HEAD(list);
1163 1164
1164 spin_lock_irq(&hwif->lock); 1165 blk_mq_quiesce_queue(drive->queue);
1165 if (!list_empty(&drive->rq_list))
1166 list_splice_init(&drive->rq_list, &list);
1167 spin_unlock_irq(&hwif->lock);
1168 1166
1169 while (!list_empty(&list)) { 1167 ret = BLK_STS_OK;
1170 rq = list_first_entry(&list, struct request, queuelist); 1168 spin_lock_irq(&hwif->lock);
1169 while (!list_empty(&drive->rq_list)) {
1170 rq = list_first_entry(&drive->rq_list, struct request, queuelist);
1171 list_del_init(&rq->queuelist); 1171 list_del_init(&rq->queuelist);
1172 blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); 1172
1173 spin_unlock_irq(&hwif->lock);
1174 ret = ide_issue_rq(drive, rq, true);
1175 spin_lock_irq(&hwif->lock);
1173 } 1176 }
1177 spin_unlock_irq(&hwif->lock);
1178
1179 blk_mq_unquiesce_queue(drive->queue);
1180
1181 if (ret != BLK_STS_OK)
1182 kblockd_schedule_work(&drive->rq_work);
1174} 1183}
1175 1184
1176static const u8 ide_hwif_to_major[] = 1185static const u8 ide_hwif_to_major[] =
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 031d568b4972..4e339cfd0c54 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -27,9 +27,18 @@
27#include <linux/iio/machine.h> 27#include <linux/iio/machine.h>
28#include <linux/iio/driver.h> 28#include <linux/iio/driver.h>
29 29
30#define AXP288_ADC_EN_MASK 0xF1 30/*
31#define AXP288_ADC_TS_PIN_GPADC 0xF2 31 * This mask enables all ADCs except for the battery temp-sensor (TS), that is
32#define AXP288_ADC_TS_PIN_ON 0xF3 32 * left as-is to avoid breaking charging on devices without a temp-sensor.
33 */
34#define AXP288_ADC_EN_MASK 0xF0
35#define AXP288_ADC_TS_ENABLE 0x01
36
37#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
38#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
39#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
40#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
41#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
33 42
34enum axp288_adc_id { 43enum axp288_adc_id {
35 AXP288_ADC_TS, 44 AXP288_ADC_TS,
@@ -44,6 +53,7 @@ enum axp288_adc_id {
44struct axp288_adc_info { 53struct axp288_adc_info {
45 int irq; 54 int irq;
46 struct regmap *regmap; 55 struct regmap *regmap;
56 bool ts_enabled;
47}; 57};
48 58
49static const struct iio_chan_spec axp288_adc_channels[] = { 59static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
115 return IIO_VAL_INT; 125 return IIO_VAL_INT;
116} 126}
117 127
118static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, 128/*
119 unsigned long address) 129 * The current-source used for the battery temp-sensor (TS) is shared
130 * with the GPADC. For proper fuel-gauge and charger operation the TS
131 * current-source needs to be permanently on. But to read the GPADC we
132 * need to temporary switch the TS current-source to ondemand, so that
133 * the GPADC can use it, otherwise we will always read an all 0 value.
134 */
135static int axp288_adc_set_ts(struct axp288_adc_info *info,
136 unsigned int mode, unsigned long address)
120{ 137{
121 int ret; 138 int ret;
122 139
123 /* channels other than GPADC do not need to switch TS pin */ 140 /* No need to switch the current-source if the TS pin is disabled */
141 if (!info->ts_enabled)
142 return 0;
143
144 /* Channels other than GPADC do not need the current source */
124 if (address != AXP288_GP_ADC_H) 145 if (address != AXP288_GP_ADC_H)
125 return 0; 146 return 0;
126 147
127 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); 148 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
149 AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
128 if (ret) 150 if (ret)
129 return ret; 151 return ret;
130 152
131 /* When switching to the GPADC pin give things some time to settle */ 153 /* When switching to the GPADC pin give things some time to settle */
132 if (mode == AXP288_ADC_TS_PIN_GPADC) 154 if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
133 usleep_range(6000, 10000); 155 usleep_range(6000, 10000);
134 156
135 return 0; 157 return 0;
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
145 mutex_lock(&indio_dev->mlock); 167 mutex_lock(&indio_dev->mlock);
146 switch (mask) { 168 switch (mask) {
147 case IIO_CHAN_INFO_RAW: 169 case IIO_CHAN_INFO_RAW:
148 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, 170 if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
149 chan->address)) { 171 chan->address)) {
150 dev_err(&indio_dev->dev, "GPADC mode\n"); 172 dev_err(&indio_dev->dev, "GPADC mode\n");
151 ret = -EINVAL; 173 ret = -EINVAL;
152 break; 174 break;
153 } 175 }
154 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 176 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
155 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, 177 if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
156 chan->address)) 178 chan->address))
157 dev_err(&indio_dev->dev, "TS pin restore\n"); 179 dev_err(&indio_dev->dev, "TS pin restore\n");
158 break; 180 break;
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
164 return ret; 186 return ret;
165} 187}
166 188
167static int axp288_adc_set_state(struct regmap *regmap) 189static int axp288_adc_initialize(struct axp288_adc_info *info)
168{ 190{
169 /* ADC should be always enabled for internal FG to function */ 191 int ret, adc_enable_val;
170 if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) 192
171 return -EIO; 193 /*
194 * Determine if the TS pin is enabled and set the TS current-source
195 * accordingly.
196 */
197 ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
198 if (ret)
199 return ret;
200
201 if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
202 info->ts_enabled = true;
203 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
204 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
205 AXP288_ADC_TS_CURRENT_ON);
206 } else {
207 info->ts_enabled = false;
208 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
209 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
210 AXP288_ADC_TS_CURRENT_OFF);
211 }
212 if (ret)
213 return ret;
172 214
173 return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 215 /* Turn on the ADC for all channels except TS, leave TS as is */
216 return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
217 AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
174} 218}
175 219
176static const struct iio_info axp288_adc_iio_info = { 220static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
200 * Set ADC to enabled state at all time, including system suspend. 244 * Set ADC to enabled state at all time, including system suspend.
201 * otherwise internal fuel gauge functionality may be affected. 245 * otherwise internal fuel gauge functionality may be affected.
202 */ 246 */
203 ret = axp288_adc_set_state(axp20x->regmap); 247 ret = axp288_adc_initialize(info);
204 if (ret) { 248 if (ret) {
205 dev_err(&pdev->dev, "unable to enable ADC device\n"); 249 dev_err(&pdev->dev, "unable to enable ADC device\n");
206 return ret; 250 return ret;
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 184d686ebd99..8b4568edd5cb 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -41,6 +41,7 @@
41 41
42#define ADS8688_VREF_MV 4096 42#define ADS8688_VREF_MV 4096
43#define ADS8688_REALBITS 16 43#define ADS8688_REALBITS 16
44#define ADS8688_MAX_CHANNELS 8
44 45
45/* 46/*
46 * enum ads8688_range - ADS8688 reference voltage range 47 * enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
385{ 386{
386 struct iio_poll_func *pf = p; 387 struct iio_poll_func *pf = p;
387 struct iio_dev *indio_dev = pf->indio_dev; 388 struct iio_dev *indio_dev = pf->indio_dev;
388 u16 buffer[8]; 389 u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
389 int i, j = 0; 390 int i, j = 0;
390 391
391 for (i = 0; i < indio_dev->masklength; i++) { 392 for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index a406ad31b096..3a20cb5d9bff 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
444 case IIO_CHAN_INFO_SCALE: 444 case IIO_CHAN_INFO_SCALE:
445 switch (chan->type) { 445 switch (chan->type) {
446 case IIO_TEMP: 446 case IIO_TEMP:
447 *val = 1; /* 0.01 */ 447 *val = 10;
448 *val2 = 100; 448 return IIO_VAL_INT;
449 break;
450 case IIO_PH: 449 case IIO_PH:
451 *val = 1; /* 0.001 */ 450 *val = 1; /* 0.001 */
452 *val2 = 1000; 451 *val2 = 1000;
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
477 int val, int val2, long mask) 476 int val, int val2, long mask)
478{ 477{
479 struct atlas_data *data = iio_priv(indio_dev); 478 struct atlas_data *data = iio_priv(indio_dev);
480 __be32 reg = cpu_to_be32(val); 479 __be32 reg = cpu_to_be32(val / 10);
481 480
482 if (val2 != 0 || val < 0 || val > 20000) 481 if (val2 != 0 || val < 0 || val > 20000)
483 return -EINVAL; 482 return -EINVAL;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3cd830d52967..616734313f0c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
267#endif 267#endif
268 268
269struct ib_device *ib_device_get_by_index(u32 ifindex); 269struct ib_device *ib_device_get_by_index(u32 ifindex);
270void ib_device_put(struct ib_device *device);
271/* RDMA device netlink */ 270/* RDMA device netlink */
272void nldev_init(void); 271void nldev_init(void);
273void nldev_exit(void); 272void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 8872453e26c0..238ec42778ef 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index)
156 down_read(&lists_rwsem); 156 down_read(&lists_rwsem);
157 device = __ib_device_get_by_index(index); 157 device = __ib_device_get_by_index(index);
158 if (device) { 158 if (device) {
159 /* Do not return a device if unregistration has started. */ 159 if (!ib_device_try_get(device))
160 if (!refcount_inc_not_zero(&device->refcount))
161 device = NULL; 160 device = NULL;
162 } 161 }
163 up_read(&lists_rwsem); 162 up_read(&lists_rwsem);
164 return device; 163 return device;
165} 164}
166 165
166/**
167 * ib_device_put - Release IB device reference
168 * @device: device whose reference to be released
169 *
170 * ib_device_put() releases reference to the IB device to allow it to be
171 * unregistered and eventually free.
172 */
167void ib_device_put(struct ib_device *device) 173void ib_device_put(struct ib_device *device)
168{ 174{
169 if (refcount_dec_and_test(&device->refcount)) 175 if (refcount_dec_and_test(&device->refcount))
170 complete(&device->unreg_completion); 176 complete(&device->unreg_completion);
171} 177}
178EXPORT_SYMBOL(ib_device_put);
172 179
173static struct ib_device *__ib_device_get_by_name(const char *name) 180static struct ib_device *__ib_device_get_by_name(const char *name)
174{ 181{
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size)
303 rwlock_init(&device->client_data_lock); 310 rwlock_init(&device->client_data_lock);
304 INIT_LIST_HEAD(&device->client_data_list); 311 INIT_LIST_HEAD(&device->client_data_list);
305 INIT_LIST_HEAD(&device->port_list); 312 INIT_LIST_HEAD(&device->port_list);
306 refcount_set(&device->refcount, 1);
307 init_completion(&device->unreg_completion); 313 init_completion(&device->unreg_completion);
308 314
309 return device; 315 return device;
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name,
620 goto cg_cleanup; 626 goto cg_cleanup;
621 } 627 }
622 628
629 refcount_set(&device->refcount, 1);
623 device->reg_state = IB_DEV_REGISTERED; 630 device->reg_state = IB_DEV_REGISTERED;
624 631
625 list_for_each_entry(client, &client_list, list) 632 list_for_each_entry(client, &client_list, list)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index a4ec43093cb3..acb882f279cb 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
352 umem->writable = 1; 352 umem->writable = 1;
353 umem->is_odp = 1; 353 umem->is_odp = 1;
354 odp_data->per_mm = per_mm; 354 odp_data->per_mm = per_mm;
355 umem->owning_mm = per_mm->mm;
356 mmgrab(umem->owning_mm);
355 357
356 mutex_init(&odp_data->umem_mutex); 358 mutex_init(&odp_data->umem_mutex);
357 init_completion(&odp_data->notifier_completion); 359 init_completion(&odp_data->notifier_completion);
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
384out_page_list: 386out_page_list:
385 vfree(odp_data->page_list); 387 vfree(odp_data->page_list);
386out_odp_data: 388out_odp_data:
389 mmdrop(umem->owning_mm);
387 kfree(odp_data); 390 kfree(odp_data);
388 return ERR_PTR(ret); 391 return ERR_PTR(ret);
389} 392}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2890a77339e1..5f366838b7ff 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref)
204 if (atomic_dec_and_test(&file->device->refcount)) 204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device); 205 ib_uverbs_comp_dev(file->device);
206 206
207 if (file->async_file)
208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file);
207 put_device(&file->device->dev); 210 put_device(&file->device->dev);
208 kfree(file); 211 kfree(file);
209} 212}
@@ -964,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
964 967
965 /* Get an arbitrary mm pointer that hasn't been cleaned yet */ 968 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
966 mutex_lock(&ufile->umap_lock); 969 mutex_lock(&ufile->umap_lock);
967 if (!list_empty(&ufile->umaps)) { 970 while (!list_empty(&ufile->umaps)) {
968 mm = list_first_entry(&ufile->umaps, 971 int ret;
969 struct rdma_umap_priv, list) 972
970 ->vma->vm_mm; 973 priv = list_first_entry(&ufile->umaps,
971 mmget(mm); 974 struct rdma_umap_priv, list);
975 mm = priv->vma->vm_mm;
976 ret = mmget_not_zero(mm);
977 if (!ret) {
978 list_del_init(&priv->list);
979 mm = NULL;
980 continue;
981 }
982 break;
972 } 983 }
973 mutex_unlock(&ufile->umap_lock); 984 mutex_unlock(&ufile->umap_lock);
974 if (!mm) 985 if (!mm)
@@ -1096,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
1096 list_del_init(&file->list); 1107 list_del_init(&file->list);
1097 mutex_unlock(&file->device->lists_mutex); 1108 mutex_unlock(&file->device->lists_mutex);
1098 1109
1099 if (file->async_file)
1100 kref_put(&file->async_file->ref,
1101 ib_uverbs_release_async_event_file);
1102
1103 kref_put(&file->ref, ib_uverbs_release_file); 1110 kref_put(&file->ref, ib_uverbs_release_file);
1104 1111
1105 return 0; 1112 return 0;
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 5030ec480370..2a3f2f01028d 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( 168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
169 struct uverbs_attr_bundle *attrs) 169 struct uverbs_attr_bundle *attrs)
170{ 170{
171 struct ib_device *ib_dev = attrs->ufile->device->ib_dev; 171 struct ib_device *ib_dev;
172 struct ib_port_attr attr = {}; 172 struct ib_port_attr attr = {};
173 struct ib_uverbs_query_port_resp_ex resp = {}; 173 struct ib_uverbs_query_port_resp_ex resp = {};
174 struct ib_ucontext *ucontext;
174 int ret; 175 int ret;
175 u8 port_num; 176 u8 port_num;
176 177
178 ucontext = ib_uverbs_get_ucontext(attrs);
179 if (IS_ERR(ucontext))
180 return PTR_ERR(ucontext);
181 ib_dev = ucontext->device;
182
177 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ 183 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
178 if (!ib_dev->ops.query_port) 184 if (!ib_dev->ops.query_port)
179 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c22ebc774a6a..f9a7e9d29c8b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
488 vmf = 1; 488 vmf = 1;
489 break; 489 break;
490 case STATUS: 490 case STATUS:
491 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { 491 if (flags & VM_WRITE) {
492 ret = -EPERM; 492 ret = -EPERM;
493 goto done; 493 goto done;
494 } 494 }
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 88242fe95eaa..bf96067876c9 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data; 988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
989 wc.wc_flags = IB_WC_WITH_IMM; 989 wc.wc_flags = IB_WC_WITH_IMM;
990 tlen -= sizeof(u32);
991 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 990 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
992 wc.ex.imm_data = 0; 991 wc.ex.imm_data = 0;
993 wc.wc_flags = 0; 992 wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 960b1946c365..12deacf442cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
210 struct ib_udata *udata) 210 struct ib_udata *udata)
211{ 211{
212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
213 struct hns_roce_ib_create_srq_resp resp = {};
213 struct hns_roce_srq *srq; 214 struct hns_roce_srq *srq;
214 int srq_desc_size; 215 int srq_desc_size;
215 int srq_buf_size; 216 int srq_buf_size;
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
378 379
379 srq->event = hns_roce_ib_srq_event; 380 srq->event = hns_roce_ib_srq_event;
380 srq->ibsrq.ext.xrc.srq_num = srq->srqn; 381 srq->ibsrq.ext.xrc.srq_num = srq->srqn;
382 resp.srqn = srq->srqn;
381 383
382 if (udata) { 384 if (udata) {
383 if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 385 if (ib_copy_to_udata(udata, &resp,
386 min(udata->outlen, sizeof(resp)))) {
384 ret = -EFAULT; 387 ret = -EFAULT;
385 goto err_wrid; 388 goto err_srqc_alloc;
386 } 389 }
387 } 390 }
388 391
389 return &srq->ibsrq; 392 return &srq->ibsrq;
390 393
394err_srqc_alloc:
395 hns_roce_srq_free(hr_dev, srq);
396
391err_wrid: 397err_wrid:
392 kvfree(srq->wrid); 398 kvfree(srq->wrid);
393 399
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 25439da8976c..936ee1314bcd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1411 1411
1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); 1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1413 if (sqp->tx_ring[wire_tx_ix].ah) 1413 if (sqp->tx_ring[wire_tx_ix].ah)
1414 rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); 1414 mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
1415 sqp->tx_ring[wire_tx_ix].ah = ah; 1415 sqp->tx_ring[wire_tx_ix].ah = ah;
1416 ib_dma_sync_single_for_cpu(&dev->ib_dev, 1416 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1417 sqp->tx_ring[wire_tx_ix].buf.map, 1417 sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1902 if (wc.status == IB_WC_SUCCESS) { 1902 if (wc.status == IB_WC_SUCCESS) {
1903 switch (wc.opcode) { 1903 switch (wc.opcode) {
1904 case IB_WC_SEND: 1904 case IB_WC_SEND:
1905 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1905 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1908 = NULL; 1908 = NULL;
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1931 " status = %d, wrid = 0x%llx\n", 1931 " status = %d, wrid = 0x%llx\n",
1932 ctx->slave, wc.status, wc.wr_id); 1932 ctx->slave, wc.status, wc.wr_id);
1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1934 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1934 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1937 = NULL; 1937 = NULL;
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index e8a1e4498e3f..798591a18484 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), 630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
631 UAPI_DEF_CHAIN_OBJ_TREE( 631 UAPI_DEF_CHAIN_OBJ_TREE(
632 UVERBS_OBJECT_FLOW, 632 UVERBS_OBJECT_FLOW,
633 &mlx5_ib_fs, 633 &mlx5_ib_fs),
634 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
635 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 634 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
636 &mlx5_ib_flow_actions), 635 &mlx5_ib_flow_actions),
637 {}, 636 {},
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 01e0f6200631..4ee32964e1dd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1595 struct prefetch_mr_work *w = 1595 struct prefetch_mr_work *w =
1596 container_of(work, struct prefetch_mr_work, work); 1596 container_of(work, struct prefetch_mr_work, work);
1597 1597
1598 if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) 1598 if (ib_device_try_get(&w->dev->ib_dev)) {
1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, 1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
1600 w->num_sge); 1600 w->num_sge);
1601 1601 ib_device_put(&w->dev->ib_dev);
1602 }
1603 put_device(&w->dev->ib_dev.dev);
1602 kfree(w); 1604 kfree(w);
1603} 1605}
1604 1606
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1617 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, 1619 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
1618 num_sge); 1620 num_sge);
1619 1621
1620 if (dev->ib_dev.reg_state != IB_DEV_REGISTERED)
1621 return -ENODEV;
1622
1623 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); 1622 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1624 if (!work) 1623 if (!work)
1625 return -ENOMEM; 1624 return -ENOMEM;
1626 1625
1627 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); 1626 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1628 1627
1628 get_device(&dev->ib_dev.dev);
1629 work->dev = dev; 1629 work->dev = dev;
1630 work->pf_flags = pf_flags; 1630 work->pf_flags = pf_flags;
1631 work->num_sge = num_sge; 1631 work->num_sge = num_sge;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dd2ae640bc84..7db778d96ef5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1912 } 1912 }
1913 1913
1914 if (!check_flags_mask(ucmd.flags, 1914 if (!check_flags_mask(ucmd.flags,
1915 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1916 MLX5_QP_FLAG_BFREG_INDEX |
1917 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
1918 MLX5_QP_FLAG_SCATTER_CQE |
1915 MLX5_QP_FLAG_SIGNATURE | 1919 MLX5_QP_FLAG_SIGNATURE |
1916 MLX5_QP_FLAG_SCATTER_CQE | 1920 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
1917 MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1921 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1918 MLX5_QP_FLAG_BFREG_INDEX | 1922 MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1919 MLX5_QP_FLAG_TYPE_DCT | 1923 MLX5_QP_FLAG_TYPE_DCI |
1920 MLX5_QP_FLAG_TYPE_DCI | 1924 MLX5_QP_FLAG_TYPE_DCT))
1921 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1922 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
1923 return -EINVAL; 1925 return -EINVAL;
1924 1926
1925 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1927 err = get_qp_user_index(to_mucontext(pd->uobject->context),
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 868da0ece7ba..445ea19a2ec8 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
513 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
514 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
515 tlen -= sizeof(u32);
516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 515 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
517 wc.ex.imm_data = 0; 516 wc.ex.imm_data = 0;
518 wc.wc_flags = 0; 517 wc.wc_flags = 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a1bd8cfc2c25..c6cc3e4ab71d 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2910,6 +2910,8 @@ send:
2910 goto op_err; 2910 goto op_err;
2911 if (!ret) 2911 if (!ret)
2912 goto rnr_nak; 2912 goto rnr_nak;
2913 if (wqe->length > qp->r_len)
2914 goto inv_err;
2913 break; 2915 break;
2914 2916
2915 case IB_WR_RDMA_WRITE_WITH_IMM: 2917 case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3078,7 +3080,10 @@ op_err:
3078 goto err; 3080 goto err;
3079 3081
3080inv_err: 3082inv_err:
3081 send_status = IB_WC_REM_INV_REQ_ERR; 3083 send_status =
3084 sqp->ibqp.qp_type == IB_QPT_RC ?
3085 IB_WC_REM_INV_REQ_ERR :
3086 IB_WC_SUCCESS;
3082 wc.status = IB_WC_LOC_QP_OP_ERR; 3087 wc.status = IB_WC_LOC_QP_OP_ERR;
3083 goto err; 3088 goto err;
3084 3089
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1da119d901a9..73e808c1e6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
248 struct list_head list; 248 struct list_head list;
249 struct net_device *dev; 249 struct net_device *dev;
250 struct ipoib_neigh *neigh; 250 struct ipoib_neigh *neigh;
251 struct ipoib_path *path;
252 struct ipoib_tx_buf *tx_ring; 251 struct ipoib_tx_buf *tx_ring;
253 unsigned int tx_head; 252 unsigned int tx_head;
254 unsigned int tx_tail; 253 unsigned int tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e8f69..aa9dcfc36cd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1312 1312
1313 neigh->cm = tx; 1313 neigh->cm = tx;
1314 tx->neigh = neigh; 1314 tx->neigh = neigh;
1315 tx->path = path;
1316 tx->dev = dev; 1315 tx->dev = dev;
1317 list_add(&tx->list, &priv->cm.start_list); 1316 list_add(&tx->list, &priv->cm.start_list);
1318 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1317 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1371 neigh->daddr + QPN_AND_OPTIONS_OFFSET); 1370 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1372 goto free_neigh; 1371 goto free_neigh;
1373 } 1372 }
1374 memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); 1373 memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
1375 1374
1376 spin_unlock_irqrestore(&priv->lock, flags); 1375 spin_unlock_irqrestore(&priv->lock, flags);
1377 netif_tx_unlock_bh(dev); 1376 netif_tx_unlock_bh(dev);
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index bae08226e3d9..a7cfab3db9ee 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -23,7 +23,6 @@
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/clk.h>
27 26
28/* 27/*
29 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. 28 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller.
@@ -75,7 +74,6 @@ struct olpc_apsp {
75 struct serio *kbio; 74 struct serio *kbio;
76 struct serio *padio; 75 struct serio *padio;
77 void __iomem *base; 76 void __iomem *base;
78 struct clk *clk;
79 int open_count; 77 int open_count;
80 int irq; 78 int irq;
81}; 79};
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port)
148 struct olpc_apsp *priv = port->port_data; 146 struct olpc_apsp *priv = port->port_data;
149 unsigned int tmp; 147 unsigned int tmp;
150 unsigned long l; 148 unsigned long l;
151 int error;
152 149
153 if (priv->open_count++ == 0) { 150 if (priv->open_count++ == 0) {
154 error = clk_prepare_enable(priv->clk);
155 if (error)
156 return error;
157
158 l = readl(priv->base + COMMAND_FIFO_STATUS); 151 l = readl(priv->base + COMMAND_FIFO_STATUS);
159 if (!(l & CMD_STS_MASK)) { 152 if (!(l & CMD_STS_MASK)) {
160 dev_err(priv->dev, "SP cannot accept commands.\n"); 153 dev_err(priv->dev, "SP cannot accept commands.\n");
161 clk_disable_unprepare(priv->clk);
162 return -EIO; 154 return -EIO;
163 } 155 }
164 156
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port)
179 /* Disable interrupt 0 */ 171 /* Disable interrupt 0 */
180 tmp = readl(priv->base + PJ_INTERRUPT_MASK); 172 tmp = readl(priv->base + PJ_INTERRUPT_MASK);
181 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); 173 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK);
182
183 clk_disable_unprepare(priv->clk);
184 } 174 }
185} 175}
186 176
@@ -208,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
208 if (priv->irq < 0) 198 if (priv->irq < 0)
209 return priv->irq; 199 return priv->irq;
210 200
211 priv->clk = devm_clk_get(&pdev->dev, "sp");
212 if (IS_ERR(priv->clk))
213 return PTR_ERR(priv->clk);
214
215 /* KEYBOARD */ 201 /* KEYBOARD */
216 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 202 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
217 if (!kb_serio) 203 if (!kb_serio)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 87ba23a75b38..2a7b78bb98b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
1991 1991
1992static void do_detach(struct iommu_dev_data *dev_data) 1992static void do_detach(struct iommu_dev_data *dev_data)
1993{ 1993{
1994 struct protection_domain *domain = dev_data->domain;
1994 struct amd_iommu *iommu; 1995 struct amd_iommu *iommu;
1995 u16 alias; 1996 u16 alias;
1996 1997
1997 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1998 iommu = amd_iommu_rlookup_table[dev_data->devid];
1998 alias = dev_data->alias; 1999 alias = dev_data->alias;
1999 2000
2000 /* decrease reference counters */
2001 dev_data->domain->dev_iommu[iommu->index] -= 1;
2002 dev_data->domain->dev_cnt -= 1;
2003
2004 /* Update data structures */ 2001 /* Update data structures */
2005 dev_data->domain = NULL; 2002 dev_data->domain = NULL;
2006 list_del(&dev_data->list); 2003 list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
2010 2007
2011 /* Flush the DTE entry */ 2008 /* Flush the DTE entry */
2012 device_flush_dte(dev_data); 2009 device_flush_dte(dev_data);
2010
2011 /* Flush IOTLB */
2012 domain_flush_tlb_pde(domain);
2013
2014 /* Wait for the flushes to finish */
2015 domain_flush_complete(domain);
2016
2017 /* decrease reference counters - needs to happen after the flushes */
2018 domain->dev_iommu[iommu->index] -= 1;
2019 domain->dev_cnt -= 1;
2013} 2020}
2014 2021
2015/* 2022/*
@@ -2617,13 +2624,13 @@ out_unmap:
2617 bus_addr = address + s->dma_address + (j << PAGE_SHIFT); 2624 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2618 iommu_unmap_page(domain, bus_addr, PAGE_SIZE); 2625 iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2619 2626
2620 if (--mapped_pages) 2627 if (--mapped_pages == 0)
2621 goto out_free_iova; 2628 goto out_free_iova;
2622 } 2629 }
2623 } 2630 }
2624 2631
2625out_free_iova: 2632out_free_iova:
2626 free_iova_fast(&dma_dom->iovad, address, npages); 2633 free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
2627 2634
2628out_err: 2635out_err:
2629 return 0; 2636 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2bd9ac285c0d..1457f931218e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
5294 struct iommu_resv_region *entry, *next; 5294 struct iommu_resv_region *entry, *next;
5295 5295
5296 list_for_each_entry_safe(entry, next, head, list) { 5296 list_for_each_entry_safe(entry, next, head, list) {
5297 if (entry->type == IOMMU_RESV_RESERVED) 5297 if (entry->type == IOMMU_RESV_MSI)
5298 kfree(entry); 5298 kfree(entry);
5299 } 5299 }
5300} 5300}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 730f7dabcf37..7e0df67bd3e9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -441,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev)
441 iommu_spec.args_count = count; 441 iommu_spec.args_count = count;
442 442
443 mtk_iommu_create_mapping(dev, &iommu_spec); 443 mtk_iommu_create_mapping(dev, &iommu_spec);
444
445 /* dev->iommu_fwspec might have changed */
446 fwspec = dev_iommu_fwspec_get(dev);
447
444 of_node_put(iommu_spec.np); 448 of_node_put(iommu_spec.np);
445 } 449 }
446 450
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 5385f5768345..27933338f7b3 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
71 unsigned int mask = 1u << d->hwirq; 71 unsigned int mask = 1u << d->hwirq;
72 72
73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 75 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
76 HW_IRQ_MX_BASE), MIENG); 76
77 } else { 77 if (ext_irq >= HW_IRQ_MX_BASE) {
78 mask = __this_cpu_read(cached_irq_mask) & ~mask; 78 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
79 __this_cpu_write(cached_irq_mask, mask); 79 return;
80 xtensa_set_sr(mask, intenable); 80 }
81 } 81 }
82 mask = __this_cpu_read(cached_irq_mask) & ~mask;
83 __this_cpu_write(cached_irq_mask, mask);
84 xtensa_set_sr(mask, intenable);
82} 85}
83 86
84static void xtensa_mx_irq_unmask(struct irq_data *d) 87static void xtensa_mx_irq_unmask(struct irq_data *d)
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
86 unsigned int mask = 1u << d->hwirq; 89 unsigned int mask = 1u << d->hwirq;
87 90
88 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 91 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
89 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 92 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
90 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 93 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
91 HW_IRQ_MX_BASE), MIENGSET); 94
92 } else { 95 if (ext_irq >= HW_IRQ_MX_BASE) {
93 mask |= __this_cpu_read(cached_irq_mask); 96 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
94 __this_cpu_write(cached_irq_mask, mask); 97 return;
95 xtensa_set_sr(mask, intenable); 98 }
96 } 99 }
100 mask |= __this_cpu_read(cached_irq_mask);
101 __this_cpu_write(cached_irq_mask, mask);
102 xtensa_set_sr(mask, intenable);
97} 103}
98 104
99static void xtensa_mx_irq_enable(struct irq_data *d) 105static void xtensa_mx_irq_enable(struct irq_data *d)
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d)
113 119
114static int xtensa_mx_irq_retrigger(struct irq_data *d) 120static int xtensa_mx_irq_retrigger(struct irq_data *d)
115{ 121{
116 xtensa_set_sr(1 << d->hwirq, intset); 122 unsigned int mask = 1u << d->hwirq;
123
124 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125 return 0;
126 xtensa_set_sr(mask, intset);
117 return 1; 127 return 1;
118} 128}
119 129
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index c200234dd2c9..ab12328be5ee 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d)
70 70
71static int xtensa_irq_retrigger(struct irq_data *d) 71static int xtensa_irq_retrigger(struct irq_data *d)
72{ 72{
73 xtensa_set_sr(1 << d->hwirq, intset); 73 unsigned int mask = 1u << d->hwirq;
74
75 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
76 return 0;
77 xtensa_set_sr(mask, intset);
74 return 1; 78 return 1;
75} 79}
76 80
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 211ed6cffd10..578978711887 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
170 spin_lock_irqsave(&timer->dev->lock, flags); 170 spin_lock_irqsave(&timer->dev->lock, flags);
171 if (timer->id >= 0) 171 if (timer->id >= 0)
172 list_move_tail(&timer->list, &timer->dev->expired); 172 list_move_tail(&timer->list, &timer->dev->expired);
173 spin_unlock_irqrestore(&timer->dev->lock, flags);
174 wake_up_interruptible(&timer->dev->wait); 173 wake_up_interruptible(&timer->dev->wait);
174 spin_unlock_irqrestore(&timer->dev->lock, flags);
175} 175}
176 176
177static int 177static int
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4eb5f8c56535..a20531e5f3b4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
131static void rq_completed(struct mapped_device *md) 131static void rq_completed(struct mapped_device *md)
132{ 132{
133 /* nudge anyone waiting on suspend queue */ 133 /* nudge anyone waiting on suspend queue */
134 if (unlikely(waitqueue_active(&md->wait))) 134 if (unlikely(wq_has_sleeper(&md->wait)))
135 wake_up(&md->wait); 135 wake_up(&md->wait);
136 136
137 /* 137 /*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2b53c3841b53..515e6af9bed2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
699 true, duration, &io->stats_aux); 699 true, duration, &io->stats_aux);
700 700
701 /* nudge anyone waiting on suspend queue */ 701 /* nudge anyone waiting on suspend queue */
702 if (unlikely(waitqueue_active(&md->wait))) 702 if (unlikely(wq_has_sleeper(&md->wait)))
703 wake_up(&md->wait); 703 wake_up(&md->wait);
704} 704}
705 705
@@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1336 return r; 1336 return r;
1337 } 1337 }
1338 1338
1339 bio_trim(clone, sector - clone->bi_iter.bi_sector, len); 1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1340 clone->bi_iter.bi_size = to_bytes(len);
1341
1342 if (bio_integrity(bio))
1343 bio_integrity_trim(clone);
1340 1344
1341 return 0; 1345 return 0;
1342} 1346}
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
1935} 1935}
1936 1936
1937static struct stripe_head * 1937static struct stripe_head *
1938r5c_recovery_alloc_stripe(struct r5conf *conf, 1938r5c_recovery_alloc_stripe(
1939 sector_t stripe_sect) 1939 struct r5conf *conf,
1940 sector_t stripe_sect,
1941 int noblock)
1940{ 1942{
1941 struct stripe_head *sh; 1943 struct stripe_head *sh;
1942 1944
1943 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945 sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
1944 if (!sh) 1946 if (!sh)
1945 return NULL; /* no more stripe available */ 1947 return NULL; /* no more stripe available */
1946 1948
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2150 stripe_sect); 2152 stripe_sect);
2151 2153
2152 if (!sh) { 2154 if (!sh) {
2153 sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2155 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2154 /* 2156 /*
2155 * cannot get stripe from raid5_get_active_stripe 2157 * cannot get stripe from raid5_get_active_stripe
2156 * try replay some stripes 2158 * try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2159 r5c_recovery_replay_stripes( 2161 r5c_recovery_replay_stripes(
2160 cached_stripe_list, ctx); 2162 cached_stripe_list, ctx);
2161 sh = r5c_recovery_alloc_stripe( 2163 sh = r5c_recovery_alloc_stripe(
2162 conf, stripe_sect); 2164 conf, stripe_sect, 1);
2163 } 2165 }
2164 if (!sh) { 2166 if (!sh) {
2167 int new_size = conf->min_nr_stripes * 2;
2165 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2168 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2166 mdname(mddev), 2169 mdname(mddev),
2167 conf->min_nr_stripes * 2); 2170 new_size);
2168 raid5_set_cache_size(mddev, 2171 ret = raid5_set_cache_size(mddev, new_size);
2169 conf->min_nr_stripes * 2); 2172 if (conf->min_nr_stripes <= new_size / 2) {
2170 sh = r5c_recovery_alloc_stripe(conf, 2173 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2171 stripe_sect); 2174 mdname(mddev),
2175 ret,
2176 new_size,
2177 conf->min_nr_stripes,
2178 conf->max_nr_stripes);
2179 return -ENOMEM;
2180 }
2181 sh = r5c_recovery_alloc_stripe(
2182 conf, stripe_sect, 0);
2172 } 2183 }
2173 if (!sh) { 2184 if (!sh) {
2174 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2185 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2175 mdname(mddev)); 2186 mdname(mddev));
2176 return -ENOMEM; 2187 return -ENOMEM;
2177 } 2188 }
2178 list_add_tail(&sh->lru, cached_stripe_list); 2189 list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6369int 6369int
6370raid5_set_cache_size(struct mddev *mddev, int size) 6370raid5_set_cache_size(struct mddev *mddev, int size)
6371{ 6371{
6372 int result = 0;
6372 struct r5conf *conf = mddev->private; 6373 struct r5conf *conf = mddev->private;
6373 6374
6374 if (size <= 16 || size > 32768) 6375 if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6385 6386
6386 mutex_lock(&conf->cache_size_mutex); 6387 mutex_lock(&conf->cache_size_mutex);
6387 while (size > conf->max_nr_stripes) 6388 while (size > conf->max_nr_stripes)
6388 if (!grow_one_stripe(conf, GFP_KERNEL)) 6389 if (!grow_one_stripe(conf, GFP_KERNEL)) {
6390 conf->min_nr_stripes = conf->max_nr_stripes;
6391 result = -ENOMEM;
6389 break; 6392 break;
6393 }
6390 mutex_unlock(&conf->cache_size_mutex); 6394 mutex_unlock(&conf->cache_size_mutex);
6391 6395
6392 return 0; 6396 return result;
6393} 6397}
6394EXPORT_SYMBOL(raid5_set_cache_size); 6398EXPORT_SYMBOL(raid5_set_cache_size);
6395 6399
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f461460a2aeb..76f9909cf396 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1419,7 +1419,7 @@ config MFD_TPS65217
1419 1419
1420config MFD_TPS68470 1420config MFD_TPS68470
1421 bool "TI TPS68470 Power Management / LED chips" 1421 bool "TI TPS68470 Power Management / LED chips"
1422 depends on ACPI && I2C=y 1422 depends on ACPI && PCI && I2C=y
1423 select MFD_CORE 1423 select MFD_CORE
1424 select REGMAP_I2C 1424 select REGMAP_I2C
1425 select I2C_DESIGNWARE_PLATFORM 1425 select I2C_DESIGNWARE_PLATFORM
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1fc8ea0f519b..ca4c9cc218a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head,
401 struct mei_cl_cb *cb, *next; 401 struct mei_cl_cb *cb, *next;
402 402
403 list_for_each_entry_safe(cb, next, head, list) { 403 list_for_each_entry_safe(cb, next, head, list) {
404 if (cl == cb->cl) 404 if (cl == cb->cl) {
405 list_del_init(&cb->list); 405 list_del_init(&cb->list);
406 if (cb->fop_type == MEI_FOP_READ)
407 mei_io_cb_free(cb);
408 }
406 } 409 }
407} 410}
408 411
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 23739a60517f..bb1ee9834a02 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -139,6 +139,8 @@
139#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ 139#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
140#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ 140#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
141 141
142#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
143
142/* 144/*
143 * MEI HW Section 145 * MEI HW Section
144 */ 146 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index e89497f858ae..3ab946ad3257 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
105 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, 105 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
106 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, 106 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
107 107
108 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
109
108 /* required last entry */ 110 /* required last entry */
109 {0, } 111 {0, }
110}; 112};
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 2bfa3a903bf9..744757f541be 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -47,7 +47,8 @@
47 * @dc: Virtio device control 47 * @dc: Virtio device control
48 * @vpdev: VOP device which is the parent for this virtio device 48 * @vpdev: VOP device which is the parent for this virtio device
49 * @vr: Buffer for accessing the VRING 49 * @vr: Buffer for accessing the VRING
50 * @used: Buffer for used 50 * @used_virt: Virtual address of used ring
51 * @used: DMA address of used ring
51 * @used_size: Size of the used buffer 52 * @used_size: Size of the used buffer
52 * @reset_done: Track whether VOP reset is complete 53 * @reset_done: Track whether VOP reset is complete
53 * @virtio_cookie: Cookie returned upon requesting a interrupt 54 * @virtio_cookie: Cookie returned upon requesting a interrupt
@@ -61,6 +62,7 @@ struct _vop_vdev {
61 struct mic_device_ctrl __iomem *dc; 62 struct mic_device_ctrl __iomem *dc;
62 struct vop_device *vpdev; 63 struct vop_device *vpdev;
63 void __iomem *vr[VOP_MAX_VRINGS]; 64 void __iomem *vr[VOP_MAX_VRINGS];
65 void *used_virt[VOP_MAX_VRINGS];
64 dma_addr_t used[VOP_MAX_VRINGS]; 66 dma_addr_t used[VOP_MAX_VRINGS];
65 int used_size[VOP_MAX_VRINGS]; 67 int used_size[VOP_MAX_VRINGS];
66 struct completion reset_done; 68 struct completion reset_done;
@@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq)
260static void vop_del_vq(struct virtqueue *vq, int n) 262static void vop_del_vq(struct virtqueue *vq, int n)
261{ 263{
262 struct _vop_vdev *vdev = to_vopvdev(vq->vdev); 264 struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
263 struct vring *vr = (struct vring *)(vq + 1);
264 struct vop_device *vpdev = vdev->vpdev; 265 struct vop_device *vpdev = vdev->vpdev;
265 266
266 dma_unmap_single(&vpdev->dev, vdev->used[n], 267 dma_unmap_single(&vpdev->dev, vdev->used[n],
267 vdev->used_size[n], DMA_BIDIRECTIONAL); 268 vdev->used_size[n], DMA_BIDIRECTIONAL);
268 free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); 269 free_pages((unsigned long)vdev->used_virt[n],
270 get_order(vdev->used_size[n]));
269 vring_del_virtqueue(vq); 271 vring_del_virtqueue(vq);
270 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); 272 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
271 vdev->vr[n] = NULL; 273 vdev->vr[n] = NULL;
@@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev)
283 vop_del_vq(vq, idx++); 285 vop_del_vq(vq, idx++);
284} 286}
285 287
288static struct virtqueue *vop_new_virtqueue(unsigned int index,
289 unsigned int num,
290 struct virtio_device *vdev,
291 bool context,
292 void *pages,
293 bool (*notify)(struct virtqueue *vq),
294 void (*callback)(struct virtqueue *vq),
295 const char *name,
296 void *used)
297{
298 bool weak_barriers = false;
299 struct vring vring;
300
301 vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
302 vring.used = used;
303
304 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
305 notify, callback, name);
306}
307
286/* 308/*
287 * This routine will assign vring's allocated in host/io memory. Code in 309 * This routine will assign vring's allocated in host/io memory. Code in
288 * virtio_ring.c however continues to access this io memory as if it were local 310 * virtio_ring.c however continues to access this io memory as if it were local
@@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
302 struct _mic_vring_info __iomem *info; 324 struct _mic_vring_info __iomem *info;
303 void *used; 325 void *used;
304 int vr_size, _vr_size, err, magic; 326 int vr_size, _vr_size, err, magic;
305 struct vring *vr;
306 u8 type = ioread8(&vdev->desc->type); 327 u8 type = ioread8(&vdev->desc->type);
307 328
308 if (index >= ioread8(&vdev->desc->num_vq)) 329 if (index >= ioread8(&vdev->desc->num_vq))
@@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
322 return ERR_PTR(-ENOMEM); 343 return ERR_PTR(-ENOMEM);
323 vdev->vr[index] = va; 344 vdev->vr[index] = va;
324 memset_io(va, 0x0, _vr_size); 345 memset_io(va, 0x0, _vr_size);
325 vq = vring_new_virtqueue( 346
326 index,
327 le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
328 dev,
329 false,
330 ctx,
331 (void __force *)va, vop_notify, callback, name);
332 if (!vq) {
333 err = -ENOMEM;
334 goto unmap;
335 }
336 info = va + _vr_size; 347 info = va + _vr_size;
337 magic = ioread32(&info->magic); 348 magic = ioread32(&info->magic);
338 349
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
341 goto unmap; 352 goto unmap;
342 } 353 }
343 354
344 /* Allocate and reassign used ring now */
345 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + 355 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
346 sizeof(struct vring_used_elem) * 356 sizeof(struct vring_used_elem) *
347 le16_to_cpu(config.num)); 357 le16_to_cpu(config.num));
348 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 358 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
349 get_order(vdev->used_size[index])); 359 get_order(vdev->used_size[index]));
360 vdev->used_virt[index] = used;
350 if (!used) { 361 if (!used) {
351 err = -ENOMEM; 362 err = -ENOMEM;
352 dev_err(_vop_dev(vdev), "%s %d err %d\n", 363 dev_err(_vop_dev(vdev), "%s %d err %d\n",
353 __func__, __LINE__, err); 364 __func__, __LINE__, err);
354 goto del_vq; 365 goto unmap;
366 }
367
368 vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
369 (void __force *)va, vop_notify, callback,
370 name, used);
371 if (!vq) {
372 err = -ENOMEM;
373 goto free_used;
355 } 374 }
375
356 vdev->used[index] = dma_map_single(&vpdev->dev, used, 376 vdev->used[index] = dma_map_single(&vpdev->dev, used,
357 vdev->used_size[index], 377 vdev->used_size[index],
358 DMA_BIDIRECTIONAL); 378 DMA_BIDIRECTIONAL);
@@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
360 err = -ENOMEM; 380 err = -ENOMEM;
361 dev_err(_vop_dev(vdev), "%s %d err %d\n", 381 dev_err(_vop_dev(vdev), "%s %d err %d\n",
362 __func__, __LINE__, err); 382 __func__, __LINE__, err);
363 goto free_used; 383 goto del_vq;
364 } 384 }
365 writeq(vdev->used[index], &vqconfig->used_address); 385 writeq(vdev->used[index], &vqconfig->used_address);
366 /*
367 * To reassign the used ring here we are directly accessing
368 * struct vring_virtqueue which is a private data structure
369 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
370 * vring_new_virtqueue() would ensure that
371 * (&vq->vring == (struct vring *) (&vq->vq + 1));
372 */
373 vr = (struct vring *)(vq + 1);
374 vr->used = used;
375 386
376 vq->priv = vdev; 387 vq->priv = vdev;
377 return vq; 388 return vq;
389del_vq:
390 vring_del_virtqueue(vq);
378free_used: 391free_used:
379 free_pages((unsigned long)used, 392 free_pages((unsigned long)used,
380 get_order(vdev->used_size[index])); 393 get_order(vdev->used_size[index]));
381del_vq:
382 vring_del_virtqueue(vq);
383unmap: 394unmap:
384 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); 395 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
385 return ERR_PTR(err); 396 return ERR_PTR(err);
@@ -581,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
581 int ret = -1; 592 int ret = -1;
582 593
583 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { 594 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
595 struct device *dev = get_device(&vdev->vdev.dev);
596
584 dev_dbg(&vpdev->dev, 597 dev_dbg(&vpdev->dev,
585 "%s %d config_change %d type %d vdev %p\n", 598 "%s %d config_change %d type %d vdev %p\n",
586 __func__, __LINE__, 599 __func__, __LINE__,
@@ -592,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
592 iowrite8(-1, &dc->h2c_vdev_db); 605 iowrite8(-1, &dc->h2c_vdev_db);
593 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 606 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
594 wait_for_completion(&vdev->reset_done); 607 wait_for_completion(&vdev->reset_done);
595 put_device(&vdev->vdev.dev); 608 put_device(dev);
596 iowrite8(1, &dc->guest_ack); 609 iowrite8(1, &dc->guest_ack);
597 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", 610 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
598 __func__, __LINE__, ioread8(&dc->guest_ack)); 611 __func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 50293529d6de..c9e7aa50bb0a 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev)
1431 1431
1432err: 1432err:
1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret); 1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1434 if (host->dma_chan_rxtx)
1435 dma_release_channel(host->dma_chan_rxtx);
1434 mmc_free_host(mmc); 1436 mmc_free_host(mmc);
1435 1437
1436 return ret; 1438 return ret;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8afeaf81ae66..833ef0590af8 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
846 846
847 if (timing == MMC_TIMING_MMC_HS400 && 847 if (timing == MMC_TIMING_MMC_HS400 &&
848 host->dev_comp->hs400_tune) 848 host->dev_comp->hs400_tune)
849 sdr_set_field(host->base + PAD_CMD_TUNE, 849 sdr_set_field(host->base + tune_reg,
850 MSDC_PAD_TUNE_CMDRRDLY, 850 MSDC_PAD_TUNE_CMDRRDLY,
851 host->hs400_cmd_int_delay); 851 host->hs400_cmd_int_delay);
852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, 852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 90f514252987..d9c56a779c08 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
511 /* Clear all pending interrupts */ 511 /* Clear all pending interrupts */
512 writel(0xffffffff, priv->regs + B53_SRAB_INTR); 512 writel(0xffffffff, priv->regs + B53_SRAB_INTR);
513 513
514 if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
515 return;
516
517 for (i = 0; i < B53_N_PORTS; i++) { 514 for (i = 0; i < B53_N_PORTS; i++) {
518 port = &priv->port_intrs[i]; 515 port = &priv->port_intrs[i];
519 516
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 5200e4bdce93..ea243840ee0f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
314{ 314{
315 struct mv88e6xxx_chip *chip = dev_id; 315 struct mv88e6xxx_chip *chip = dev_id;
316 struct mv88e6xxx_atu_entry entry; 316 struct mv88e6xxx_atu_entry entry;
317 int spid;
317 int err; 318 int err;
318 u16 val; 319 u16 val;
319 320
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
336 if (err) 337 if (err)
337 goto out; 338 goto out;
338 339
340 spid = entry.state;
341
339 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { 342 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
340 dev_err_ratelimited(chip->dev, 343 dev_err_ratelimited(chip->dev,
341 "ATU age out violation for %pM\n", 344 "ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
344 347
345 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 348 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
346 dev_err_ratelimited(chip->dev, 349 dev_err_ratelimited(chip->dev,
347 "ATU member violation for %pM portvec %x\n", 350 "ATU member violation for %pM portvec %x spid %d\n",
348 entry.mac, entry.portvec); 351 entry.mac, entry.portvec, spid);
349 chip->ports[entry.portvec].atu_member_violation++; 352 chip->ports[spid].atu_member_violation++;
350 } 353 }
351 354
352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { 355 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 356 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 357 "ATU miss violation for %pM portvec %x spid %d\n",
355 entry.mac, entry.portvec); 358 entry.mac, entry.portvec, spid);
356 chip->ports[entry.portvec].atu_miss_violation++; 359 chip->ports[spid].atu_miss_violation++;
357 } 360 }
358 361
359 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { 362 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
360 dev_err_ratelimited(chip->dev, 363 dev_err_ratelimited(chip->dev,
361 "ATU full violation for %pM portvec %x\n", 364 "ATU full violation for %pM portvec %x spid %d\n",
362 entry.mac, entry.portvec); 365 entry.mac, entry.portvec, spid);
363 chip->ports[entry.portvec].atu_full_violation++; 366 chip->ports[spid].atu_full_violation++;
364 } 367 }
365 mutex_unlock(&chip->reg_lock); 368 mutex_unlock(&chip->reg_lock);
366 369
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f9521d0274b7..28c9b0bdf2f6 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
520 struct ethtool_wolinfo *wol) 520 struct ethtool_wolinfo *wol)
521{ 521{
522 struct bcm_sysport_priv *priv = netdev_priv(dev); 522 struct bcm_sysport_priv *priv = netdev_priv(dev);
523 u32 reg;
524 523
525 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 524 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
526 wol->wolopts = priv->wolopts; 525 wol->wolopts = priv->wolopts;
@@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
528 if (!(priv->wolopts & WAKE_MAGICSECURE)) 527 if (!(priv->wolopts & WAKE_MAGICSECURE))
529 return; 528 return;
530 529
531 /* Return the programmed SecureOn password */ 530 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
532 reg = umac_readl(priv, UMAC_PSW_MS);
533 put_unaligned_be16(reg, &wol->sopass[0]);
534 reg = umac_readl(priv, UMAC_PSW_LS);
535 put_unaligned_be32(reg, &wol->sopass[2]);
536} 531}
537 532
538static int bcm_sysport_set_wol(struct net_device *dev, 533static int bcm_sysport_set_wol(struct net_device *dev,
@@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
548 if (wol->wolopts & ~supported) 543 if (wol->wolopts & ~supported)
549 return -EINVAL; 544 return -EINVAL;
550 545
551 /* Program the SecureOn password */ 546 if (wol->wolopts & WAKE_MAGICSECURE)
552 if (wol->wolopts & WAKE_MAGICSECURE) { 547 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
553 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
554 UMAC_PSW_MS);
555 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
556 UMAC_PSW_LS);
557 }
558 548
559 /* Flag the device and relevant IRQ as wakeup capable */ 549 /* Flag the device and relevant IRQ as wakeup capable */
560 if (wol->wolopts) { 550 if (wol->wolopts) {
@@ -2649,13 +2639,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2649 unsigned int index, i = 0; 2639 unsigned int index, i = 0;
2650 u32 reg; 2640 u32 reg;
2651 2641
2652 /* Password has already been programmed */
2653 reg = umac_readl(priv, UMAC_MPD_CTRL); 2642 reg = umac_readl(priv, UMAC_MPD_CTRL);
2654 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2643 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2655 reg |= MPD_EN; 2644 reg |= MPD_EN;
2656 reg &= ~PSW_EN; 2645 reg &= ~PSW_EN;
2657 if (priv->wolopts & WAKE_MAGICSECURE) 2646 if (priv->wolopts & WAKE_MAGICSECURE) {
2647 /* Program the SecureOn password */
2648 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2649 UMAC_PSW_MS);
2650 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2651 UMAC_PSW_LS);
2658 reg |= PSW_EN; 2652 reg |= PSW_EN;
2653 }
2659 umac_writel(priv, reg, UMAC_MPD_CTRL); 2654 umac_writel(priv, reg, UMAC_MPD_CTRL);
2660 2655
2661 if (priv->wolopts & WAKE_FILTER) { 2656 if (priv->wolopts & WAKE_FILTER) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 0887e6356649..0b192fea9c5d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
12#define __BCM_SYSPORT_H 12#define __BCM_SYSPORT_H
13 13
14#include <linux/bitmap.h> 14#include <linux/bitmap.h>
15#include <linux/ethtool.h>
15#include <linux/if_vlan.h> 16#include <linux/if_vlan.h>
16#include <linux/net_dim.h> 17#include <linux/net_dim.h>
17 18
@@ -778,6 +779,7 @@ struct bcm_sysport_priv {
778 unsigned int crc_fwd:1; 779 unsigned int crc_fwd:1;
779 u16 rev; 780 u16 rev;
780 u32 wolopts; 781 u32 wolopts;
782 u8 sopass[SOPASS_MAX];
781 unsigned int wol_irq_disabled:1; 783 unsigned int wol_irq_disabled:1;
782 784
783 /* MIB related fields */ 785 /* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1c2987c3d708..92d73453a318 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4975 u32 map_idx = ring->map_idx; 4975 u32 map_idx = ring->map_idx;
4976 unsigned int vector;
4976 4977
4978 vector = bp->irq_tbl[map_idx].vector;
4979 disable_irq_nosync(vector);
4977 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4980 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4978 if (rc) 4981 if (rc) {
4982 enable_irq(vector);
4979 goto err_out; 4983 goto err_out;
4984 }
4980 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 4985 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4981 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4986 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4987 enable_irq(vector);
4982 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4988 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4983 4989
4984 if (!i) { 4990 if (!i) {
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 5db9f4158e62..134ae2862efa 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
1288 * for transmits, we just free buffers. 1288 * for transmits, we just free buffers.
1289 */ 1289 */
1290 1290
1291 dev_kfree_skb_irq(sb); 1291 dev_consume_skb_irq(sb);
1292 1292
1293 /* 1293 /*
1294 * .. and advance to the next buffer. 1294 * .. and advance to the next buffer.
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 5f03199a3acf..05f4a3b21e29 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -54,7 +54,6 @@ config CAVIUM_PTP
54 tristate "Cavium PTP coprocessor as PTP clock" 54 tristate "Cavium PTP coprocessor as PTP clock"
55 depends on 64BIT && PCI 55 depends on 64BIT && PCI
56 imply PTP_1588_CLOCK 56 imply PTP_1588_CLOCK
57 default y
58 ---help--- 57 ---help---
59 This driver adds support for the Precision Time Protocol Clocks and 58 This driver adds support for the Precision Time Protocol Clocks and
60 Timestamping coprocessor (PTP) found on Cavium processors. 59 Timestamping coprocessor (PTP) found on Cavium processors.
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 60641e202534..9a7f70db20c7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1434 * csum is correct or is zero. 1434 * csum is correct or is zero.
1435 */ 1435 */
1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && 1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1437 tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { 1437 tcp_udp_csum_ok && outer_csum_ok &&
1438 (ipv4_csum_ok || ipv6)) {
1438 skb->ip_summed = CHECKSUM_UNNECESSARY; 1439 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 skb->csum_level = encap; 1440 skb->csum_level = encap;
1440 } 1441 }
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 13430f75496c..f1a2da15dd0a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de)
585 netif_dbg(de, tx_done, de->dev, 585 netif_dbg(de, tx_done, de->dev,
586 "tx done, slot %d\n", tx_tail); 586 "tx done, slot %d\n", tx_tail);
587 } 587 }
588 dev_kfree_skb_irq(skb); 588 dev_consume_skb_irq(skb);
589 } 589 }
590 590
591next: 591next:
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b90bab72efdb..c1968b3ecec8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, 369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
370 DMA_TO_DEVICE); 370 DMA_TO_DEVICE);
371 371
372 dev_kfree_skb_irq(skb); 372 dev_consume_skb_irq(skb);
373 } 373 }
374 spin_unlock(&priv->lock); 374 spin_unlock(&priv->lock);
375 375
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c3d539e209ed..eb3e65e8868f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
1879 u16 i, j; 1879 u16 i, j;
1880 u8 __iomem *bd; 1880 u8 __iomem *bd;
1881 1881
1882 netdev_reset_queue(ugeth->ndev);
1883
1882 ug_info = ugeth->ug_info; 1884 ug_info = ugeth->ug_info;
1883 uf_info = &ug_info->uf_info; 1885 uf_info = &ug_info->uf_info;
1884 1886
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 04fd1f135011..654ac534b10e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
152 memset(p, 0, regs->len); 152 memset(p, 0, regs->len);
153 memcpy_fromio(p, io, B3_RAM_ADDR); 153 memcpy_fromio(p, io, B3_RAM_ADDR);
154 154
155 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 155 if (regs->len > B3_RI_WTO_R1) {
156 regs->len - B3_RI_WTO_R1); 156 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
157 regs->len - B3_RI_WTO_R1);
158 }
157} 159}
158 160
159/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 161/* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 9e71f4d41b82..bdcc5e79328d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
256 e->m_neigh.family = n->ops->family; 256 e->m_neigh.family = n->ops->family;
257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
258 e->out_dev = out_dev; 258 e->out_dev = out_dev;
259 e->route_dev = route_dev;
259 260
260 /* It's important to add the neigh to the hash table before checking 261 /* It's important to add the neigh to the hash table before checking
261 * the neigh validity state. So if we'll get a notification, in case the 262 * the neigh validity state. So if we'll get a notification, in case the
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
369 e->m_neigh.family = n->ops->family; 370 e->m_neigh.family = n->ops->family;
370 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 371 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
371 e->out_dev = out_dev; 372 e->out_dev = out_dev;
373 e->route_dev = route_dev;
372 374
373 /* It's importent to add the neigh to the hash table before checking 375 /* It's importent to add the neigh to the hash table before checking
374 * the neigh validity state. So if we'll get a notification, in case the 376 * the neigh validity state. So if we'll get a notification, in case the
@@ -602,16 +604,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
602 struct mlx5_flow_spec *spec, 604 struct mlx5_flow_spec *spec,
603 struct tc_cls_flower_offload *f, 605 struct tc_cls_flower_offload *f,
604 void *headers_c, 606 void *headers_c,
605 void *headers_v) 607 void *headers_v, u8 *match_level)
606{ 608{
607 int tunnel_type; 609 int tunnel_type;
608 int err = 0; 610 int err = 0;
609 611
610 tunnel_type = mlx5e_tc_tun_get_type(filter_dev); 612 tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
611 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 613 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
614 *match_level = MLX5_MATCH_L4;
612 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, 615 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
613 headers_c, headers_v); 616 headers_c, headers_v);
614 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { 617 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
618 *match_level = MLX5_MATCH_L3;
615 err = mlx5e_tc_tun_parse_gretap(priv, spec, f, 619 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
616 headers_c, headers_v); 620 headers_c, headers_v);
617 } else { 621 } else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 706ce7bf15e7..b63f15de899d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
39 struct mlx5_flow_spec *spec, 39 struct mlx5_flow_spec *spec,
40 struct tc_cls_flower_offload *f, 40 struct tc_cls_flower_offload *f,
41 void *headers_c, 41 void *headers_c,
42 void *headers_v); 42 void *headers_v, u8 *match_level);
43 43
44#endif //__MLX5_EN_TC_TUNNEL_H__ 44#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 0b1988b330f3..7b5829406d95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -579,6 +579,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
579 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { 579 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
580 ether_addr_copy(e->h_dest, ha); 580 ether_addr_copy(e->h_dest, ha);
581 ether_addr_copy(eth->h_dest, ha); 581 ether_addr_copy(eth->h_dest, ha);
582 /* Update the encap source mac, in case that we delete
583 * the flows when encap source mac changed.
584 */
585 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
582 586
583 mlx5e_tc_encap_flows_add(priv, e); 587 mlx5e_tc_encap_flows_add(priv, e);
584 } 588 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index edd722824697..36eafc877e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry {
148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
149 149
150 struct net_device *out_dev; 150 struct net_device *out_dev;
151 struct net_device *route_dev;
151 int tunnel_type; 152 int tunnel_type;
152 int tunnel_hlen; 153 int tunnel_hlen;
153 int reformat_type; 154 int reformat_type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 85c5dd7fc2c7..e9437757cad7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -127,6 +127,7 @@ struct mlx5e_tc_flow_parse_attr {
127 struct net_device *filter_dev; 127 struct net_device *filter_dev;
128 struct mlx5_flow_spec spec; 128 struct mlx5_flow_spec spec;
129 int num_mod_hdr_actions; 129 int num_mod_hdr_actions;
130 int max_mod_hdr_actions;
130 void *mod_hdr_actions; 131 void *mod_hdr_actions;
131 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 132 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
132}; 133};
@@ -1301,7 +1302,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1301static int parse_tunnel_attr(struct mlx5e_priv *priv, 1302static int parse_tunnel_attr(struct mlx5e_priv *priv,
1302 struct mlx5_flow_spec *spec, 1303 struct mlx5_flow_spec *spec,
1303 struct tc_cls_flower_offload *f, 1304 struct tc_cls_flower_offload *f,
1304 struct net_device *filter_dev) 1305 struct net_device *filter_dev, u8 *match_level)
1305{ 1306{
1306 struct netlink_ext_ack *extack = f->common.extack; 1307 struct netlink_ext_ack *extack = f->common.extack;
1307 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1308 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1313,7 +1314,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
1313 int err; 1314 int err;
1314 1315
1315 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1316 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1316 headers_c, headers_v); 1317 headers_c, headers_v, match_level);
1317 if (err) { 1318 if (err) {
1318 NL_SET_ERR_MSG_MOD(extack, 1319 NL_SET_ERR_MSG_MOD(extack,
1319 "failed to parse tunnel attributes"); 1320 "failed to parse tunnel attributes");
@@ -1413,7 +1414,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1413 struct mlx5_flow_spec *spec, 1414 struct mlx5_flow_spec *spec,
1414 struct tc_cls_flower_offload *f, 1415 struct tc_cls_flower_offload *f,
1415 struct net_device *filter_dev, 1416 struct net_device *filter_dev,
1416 u8 *match_level) 1417 u8 *match_level, u8 *tunnel_match_level)
1417{ 1418{
1418 struct netlink_ext_ack *extack = f->common.extack; 1419 struct netlink_ext_ack *extack = f->common.extack;
1419 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1420 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1464,7 +1465,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1464 switch (match.key->addr_type) { 1465 switch (match.key->addr_type) {
1465 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1466 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1466 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1467 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1467 if (parse_tunnel_attr(priv, spec, f, filter_dev)) 1468 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
1468 return -EOPNOTSUPP; 1469 return -EOPNOTSUPP;
1469 break; 1470 break;
1470 default: 1471 default:
@@ -1767,11 +1768,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1767 struct mlx5_core_dev *dev = priv->mdev; 1768 struct mlx5_core_dev *dev = priv->mdev;
1768 struct mlx5_eswitch *esw = dev->priv.eswitch; 1769 struct mlx5_eswitch *esw = dev->priv.eswitch;
1769 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1770 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1771 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1770 struct mlx5_eswitch_rep *rep; 1772 struct mlx5_eswitch_rep *rep;
1771 u8 match_level;
1772 int err; 1773 int err;
1773 1774
1774 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); 1775 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
1775 1776
1776 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1777 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1777 rep = rpriv->rep; 1778 rep = rpriv->rep;
@@ -1787,10 +1788,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1787 } 1788 }
1788 } 1789 }
1789 1790
1790 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) 1791 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1791 flow->esw_attr->match_level = match_level; 1792 flow->esw_attr->match_level = match_level;
1792 else 1793 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1794 } else {
1793 flow->nic_attr->match_level = match_level; 1795 flow->nic_attr->match_level = match_level;
1796 }
1794 1797
1795 return err; 1798 return err;
1796} 1799}
@@ -1880,9 +1883,9 @@ static struct mlx5_fields fields[] = {
1880 OFFLOAD(UDP_DPORT, 2, udp.dest, 0), 1883 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1881}; 1884};
1882 1885
1883/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at 1886/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1884 * max from the SW pedit action. On success, it says how many HW actions were 1887 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1885 * actually parsed. 1888 * says how many HW actions were actually parsed.
1886 */ 1889 */
1887static int offload_pedit_fields(struct pedit_headers_action *hdrs, 1890static int offload_pedit_fields(struct pedit_headers_action *hdrs,
1888 struct mlx5e_tc_flow_parse_attr *parse_attr, 1891 struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -1905,9 +1908,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
1905 add_vals = &hdrs[1].vals; 1908 add_vals = &hdrs[1].vals;
1906 1909
1907 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 1910 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1908 action = parse_attr->mod_hdr_actions; 1911 action = parse_attr->mod_hdr_actions +
1909 max_actions = parse_attr->num_mod_hdr_actions; 1912 parse_attr->num_mod_hdr_actions * action_size;
1910 nactions = 0; 1913
1914 max_actions = parse_attr->max_mod_hdr_actions;
1915 nactions = parse_attr->num_mod_hdr_actions;
1911 1916
1912 for (i = 0; i < ARRAY_SIZE(fields); i++) { 1917 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1913 f = &fields[i]; 1918 f = &fields[i];
@@ -2020,7 +2025,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2020 if (!parse_attr->mod_hdr_actions) 2025 if (!parse_attr->mod_hdr_actions)
2021 return -ENOMEM; 2026 return -ENOMEM;
2022 2027
2023 parse_attr->num_mod_hdr_actions = max_actions; 2028 parse_attr->max_mod_hdr_actions = max_actions;
2024 return 0; 2029 return 0;
2025} 2030}
2026 2031
@@ -2069,9 +2074,11 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2069 int err; 2074 int err;
2070 u8 cmd; 2075 u8 cmd;
2071 2076
2072 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr); 2077 if (!parse_attr->mod_hdr_actions) {
2073 if (err) 2078 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
2074 goto out_err; 2079 if (err)
2080 goto out_err;
2081 }
2075 2082
2076 err = offload_pedit_fields(hdrs, parse_attr, extack); 2083 err = offload_pedit_fields(hdrs, parse_attr, extack);
2077 if (err < 0) 2084 if (err < 0)
@@ -2129,6 +2136,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2129 2136
2130static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2137static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2131 struct flow_action *flow_action, 2138 struct flow_action *flow_action,
2139 u32 actions,
2132 struct netlink_ext_ack *extack) 2140 struct netlink_ext_ack *extack)
2133{ 2141{
2134 const struct flow_action_entry *act; 2142 const struct flow_action_entry *act;
@@ -2138,7 +2146,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2138 u16 ethertype; 2146 u16 ethertype;
2139 int i; 2147 int i;
2140 2148
2141 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2149 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2150 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2151 else
2152 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2153
2142 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2154 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2143 2155
2144 /* for non-IP we only re-write MACs, so we're okay */ 2156 /* for non-IP we only re-write MACs, so we're okay */
@@ -2191,7 +2203,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
2191 2203
2192 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2204 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2193 return modify_header_match_supported(&parse_attr->spec, 2205 return modify_header_match_supported(&parse_attr->spec,
2194 flow_action, 2206 flow_action, actions,
2195 extack); 2207 extack);
2196 2208
2197 return true; 2209 return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 189211295e48..c1334a8ac8f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
389 if (unlikely(contig_wqebbs_room < num_wqebbs)) { 389 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
390#ifdef CONFIG_MLX5_EN_IPSEC
391 struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
392#endif
390 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 393 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
391 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 394 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
395#ifdef CONFIG_MLX5_EN_IPSEC
396 wqe->eth = cur_eth;
397#endif
392 } 398 }
393 399
394 /* fill wqe */ 400 /* fill wqe */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 9c89eea9b2c3..748ff178a1d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr {
312 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 312 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
313 u32 mod_hdr_id; 313 u32 mod_hdr_id;
314 u8 match_level; 314 u8 match_level;
315 u8 tunnel_match_level;
315 struct mlx5_fc *counter; 316 struct mlx5_fc *counter;
316 u32 chain; 317 u32 chain;
317 u16 prio; 318 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 53065b6ae593..d4e6fe5b9300 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
160 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 160 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
161 source_eswitch_owner_vhca_id); 161 source_eswitch_owner_vhca_id);
162 162
163 if (attr->match_level == MLX5_MATCH_NONE) 163 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
164 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 164 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
165 else 165 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
166 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | 166 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
167 MLX5_MATCH_MISC_PARAMETERS; 167 if (attr->match_level != MLX5_MATCH_NONE)
168 168 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 169 } else if (attr->match_level != MLX5_MATCH_NONE) {
170 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 170 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
171 }
171 172
172 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 173 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
173 flow_act.modify_id = attr->mod_hdr_id; 174 flow_act.modify_id = attr->mod_hdr_id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 3b0955d34716..2d21c94f19c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -53,7 +53,7 @@
53extern const struct qed_common_ops qed_common_ops_pass; 53extern const struct qed_common_ops qed_common_ops_pass;
54 54
55#define QED_MAJOR_VERSION 8 55#define QED_MAJOR_VERSION 8
56#define QED_MINOR_VERSION 33 56#define QED_MINOR_VERSION 37
57#define QED_REVISION_VERSION 0 57#define QED_REVISION_VERSION 0
58#define QED_ENGINEERING_VERSION 20 58#define QED_ENGINEERING_VERSION 20
59 59
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index e68ca83ae915..58be1c4c6668 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -2216,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2216 u16 num_queues = 0; 2216 u16 num_queues = 0;
2217 2217
2218 /* Since the feature controls only queue-zones, 2218 /* Since the feature controls only queue-zones,
2219 * make sure we have the contexts [rx, tx, xdp] to 2219 * make sure we have the contexts [rx, xdp, tcs] to
2220 * match. 2220 * match.
2221 */ 2221 */
2222 for_each_hwfn(cdev, i) { 2222 for_each_hwfn(cdev, i) {
@@ -2226,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2226 u16 cids; 2226 u16 cids;
2227 2227
2228 cids = hwfn->pf_params.eth_pf_params.num_cons; 2228 cids = hwfn->pf_params.eth_pf_params.num_cons;
2229 num_queues += min_t(u16, l2_queues, cids / 3); 2229 cids /= (2 + info->num_tc);
2230 num_queues += min_t(u16, l2_queues, cids);
2230 } 2231 }
2231 2232
2232 /* queues might theoretically be >256, but interrupts' 2233 /* queues might theoretically be >256, but interrupts'
@@ -2870,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2870 p_hwfn = p_cid->p_owner; 2871 p_hwfn = p_cid->p_owner;
2871 rc = qed_get_queue_coalesce(p_hwfn, coal, handle); 2872 rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2872 if (rc) 2873 if (rc)
2873 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2874 DP_VERBOSE(cdev, QED_MSG_DEBUG,
2875 "Unable to read queue coalescing\n");
2874 2876
2875 return rc; 2877 return rc;
2876} 2878}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 4179c9013fc6..96ab77ae6af5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
382 * @param p_hwfn 382 * @param p_hwfn
383 */ 383 */
384void qed_consq_free(struct qed_hwfn *p_hwfn); 384void qed_consq_free(struct qed_hwfn *p_hwfn);
385int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
385 386
386/** 387/**
387 * @file 388 * @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 888274fa208b..5a495fda9e9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
604 604
605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true; 605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); 606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
607 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
608 p_ent->ramrod.pf_update.mf_vlan |=
609 cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
607 610
608 return qed_spq_post(p_hwfn, p_ent, NULL); 611 return qed_spq_post(p_hwfn, p_ent, NULL);
609} 612}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3e0f7c46bb1b..79b311b86f66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
397 397
398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); 398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
399 399
400 /* Attempt to post pending requests */
401 spin_lock_bh(&p_hwfn->p_spq->lock);
402 rc = qed_spq_pend_post(p_hwfn);
403 spin_unlock_bh(&p_hwfn->p_spq->lock);
404
400 return rc; 405 return rc;
401} 406}
402 407
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
767 return 0; 772 return 0;
768} 773}
769 774
770static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 775int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
771{ 776{
772 struct qed_spq *p_spq = p_hwfn->p_spq; 777 struct qed_spq *p_spq = p_hwfn->p_spq;
773 struct qed_spq_entry *p_ent = NULL; 778 struct qed_spq_entry *p_ent = NULL;
@@ -927,7 +932,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
927 struct qed_spq_entry *p_ent = NULL; 932 struct qed_spq_entry *p_ent = NULL;
928 struct qed_spq_entry *tmp; 933 struct qed_spq_entry *tmp;
929 struct qed_spq_entry *found = NULL; 934 struct qed_spq_entry *found = NULL;
930 int rc;
931 935
932 if (!p_hwfn) 936 if (!p_hwfn)
933 return -EINVAL; 937 return -EINVAL;
@@ -985,12 +989,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
985 */ 989 */
986 qed_spq_return_entry(p_hwfn, found); 990 qed_spq_return_entry(p_hwfn, found);
987 991
988 /* Attempt to post pending requests */ 992 return 0;
989 spin_lock_bh(&p_spq->lock);
990 rc = qed_spq_pend_post(p_hwfn);
991 spin_unlock_bh(&p_spq->lock);
992
993 return rc;
994} 993}
995 994
996int qed_consq_alloc(struct qed_hwfn *p_hwfn) 995int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 843416404aeb..63a78162cfaf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -56,7 +56,7 @@
56#include <net/tc_act/tc_gact.h> 56#include <net/tc_act/tc_gact.h>
57 57
58#define QEDE_MAJOR_VERSION 8 58#define QEDE_MAJOR_VERSION 8
59#define QEDE_MINOR_VERSION 33 59#define QEDE_MINOR_VERSION 37
60#define QEDE_REVISION_VERSION 0 60#define QEDE_REVISION_VERSION 0
61#define QEDE_ENGINEERING_VERSION 20 61#define QEDE_ENGINEERING_VERSION 20
62#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ 62#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -497,6 +497,9 @@ struct qede_reload_args {
497 497
498/* Datapath functions definition */ 498/* Datapath functions definition */
499netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); 499netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
500u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
501 struct net_device *sb_dev,
502 select_queue_fallback_t fallback);
500netdev_features_t qede_features_check(struct sk_buff *skb, 503netdev_features_t qede_features_check(struct sk_buff *skb,
501 struct net_device *dev, 504 struct net_device *dev,
502 netdev_features_t features); 505 netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index bdf816fe5a16..31b046e24565 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1695 return NETDEV_TX_OK; 1695 return NETDEV_TX_OK;
1696} 1696}
1697 1697
1698u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1699 struct net_device *sb_dev,
1700 select_queue_fallback_t fallback)
1701{
1702 struct qede_dev *edev = netdev_priv(dev);
1703 int total_txq;
1704
1705 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1706
1707 return QEDE_TSS_COUNT(edev) ?
1708 fallback(dev, skb, NULL) % total_txq : 0;
1709}
1710
1698/* 8B udp header + 8B base tunnel header + 32B option length */ 1711/* 8B udp header + 8B base tunnel header + 32B option length */
1699#define QEDE_MAX_TUN_HDR_LEN 48 1712#define QEDE_MAX_TUN_HDR_LEN 48
1700 1713
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6b4d96635238..02a97c659e29 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -621,6 +621,7 @@ static const struct net_device_ops qede_netdev_ops = {
621 .ndo_open = qede_open, 621 .ndo_open = qede_open,
622 .ndo_stop = qede_close, 622 .ndo_stop = qede_close,
623 .ndo_start_xmit = qede_start_xmit, 623 .ndo_start_xmit = qede_start_xmit,
624 .ndo_select_queue = qede_select_queue,
624 .ndo_set_rx_mode = qede_set_rx_mode, 625 .ndo_set_rx_mode = qede_set_rx_mode,
625 .ndo_set_mac_address = qede_set_mac_addr, 626 .ndo_set_mac_address = qede_set_mac_addr,
626 .ndo_validate_addr = eth_validate_addr, 627 .ndo_validate_addr = eth_validate_addr,
@@ -656,6 +657,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
656 .ndo_open = qede_open, 657 .ndo_open = qede_open,
657 .ndo_stop = qede_close, 658 .ndo_stop = qede_close,
658 .ndo_start_xmit = qede_start_xmit, 659 .ndo_start_xmit = qede_start_xmit,
660 .ndo_select_queue = qede_select_queue,
659 .ndo_set_rx_mode = qede_set_rx_mode, 661 .ndo_set_rx_mode = qede_set_rx_mode,
660 .ndo_set_mac_address = qede_set_mac_addr, 662 .ndo_set_mac_address = qede_set_mac_addr,
661 .ndo_validate_addr = eth_validate_addr, 663 .ndo_validate_addr = eth_validate_addr,
@@ -674,6 +676,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
674 .ndo_open = qede_open, 676 .ndo_open = qede_open,
675 .ndo_stop = qede_close, 677 .ndo_stop = qede_close,
676 .ndo_start_xmit = qede_start_xmit, 678 .ndo_start_xmit = qede_start_xmit,
679 .ndo_select_queue = qede_select_queue,
677 .ndo_set_rx_mode = qede_set_rx_mode, 680 .ndo_set_rx_mode = qede_set_rx_mode,
678 .ndo_set_mac_address = qede_set_mac_addr, 681 .ndo_set_mac_address = qede_set_mac_addr,
679 .ndo_validate_addr = eth_validate_addr, 682 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 15c62c160953..be47d864f8b9 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1037 skb = ep->tx_skbuff[entry]; 1037 skb = ep->tx_skbuff[entry];
1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1039 skb->len, PCI_DMA_TODEVICE); 1039 skb->len, PCI_DMA_TODEVICE);
1040 dev_kfree_skb_irq(skb); 1040 dev_consume_skb_irq(skb);
1041 ep->tx_skbuff[entry] = NULL; 1041 ep->tx_skbuff[entry] = NULL;
1042 } 1042 }
1043 1043
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1f61c25d82b..5d85742a2be0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -721,8 +721,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
721{ 721{
722 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 722 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
723 723
724 if (!clk) 724 if (!clk) {
725 return 0; 725 clk = priv->plat->clk_ref_rate;
726 if (!clk)
727 return 0;
728 }
726 729
727 return (usec * (clk / 1000000)) / 256; 730 return (usec * (clk / 1000000)) / 256;
728} 731}
@@ -731,8 +734,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
731{ 734{
732 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 735 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
733 736
734 if (!clk) 737 if (!clk) {
735 return 0; 738 clk = priv->plat->clk_ref_rate;
739 if (!clk)
740 return 0;
741 }
736 742
737 return (riwt * 256) / (clk / 1000000); 743 return (riwt * 256) / (clk / 1000000);
738} 744}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index effff171d250..f12dd59c85cf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3039,10 +3039,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3039 3039
3040 tx_q = &priv->tx_queue[queue]; 3040 tx_q = &priv->tx_queue[queue];
3041 3041
3042 if (priv->tx_path_in_lpi_mode)
3043 stmmac_disable_eee_mode(priv);
3044
3042 /* Manage oversized TCP frames for GMAC4 device */ 3045 /* Manage oversized TCP frames for GMAC4 device */
3043 if (skb_is_gso(skb) && priv->tso) { 3046 if (skb_is_gso(skb) && priv->tso) {
3044 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3047 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3048 /*
3049 * There is no way to determine the number of TSO
3050 * capable Queues. Let's use always the Queue 0
3051 * because if TSO is supported then at least this
3052 * one will be capable.
3053 */
3054 skb_set_queue_mapping(skb, 0);
3055
3045 return stmmac_tso_xmit(skb, dev); 3056 return stmmac_tso_xmit(skb, dev);
3057 }
3046 } 3058 }
3047 3059
3048 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3060 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3057,9 +3069,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3057 return NETDEV_TX_BUSY; 3069 return NETDEV_TX_BUSY;
3058 } 3070 }
3059 3071
3060 if (priv->tx_path_in_lpi_mode)
3061 stmmac_disable_eee_mode(priv);
3062
3063 entry = tx_q->cur_tx; 3072 entry = tx_q->cur_tx;
3064 first_entry = entry; 3073 first_entry = entry;
3065 WARN_ON(tx_q->tx_skbuff[first_entry]); 3074 WARN_ON(tx_q->tx_skbuff[first_entry]);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 7ec4eb74fe21..6fc05c106afc 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1898 cp->net_stats[ring].tx_packets++; 1898 cp->net_stats[ring].tx_packets++;
1899 cp->net_stats[ring].tx_bytes += skb->len; 1899 cp->net_stats[ring].tx_bytes += skb->len;
1900 spin_unlock(&cp->stat_lock[ring]); 1900 spin_unlock(&cp->stat_lock[ring]);
1901 dev_kfree_skb_irq(skb); 1901 dev_consume_skb_irq(skb);
1902 } 1902 }
1903 cp->tx_old[ring] = entry; 1903 cp->tx_old[ring] = entry;
1904 1904
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 720b7ac77f3b..e9b757b03b56 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp)
781 781
782 DTX(("skb(%p) ", skb)); 782 DTX(("skb(%p) ", skb));
783 bp->tx_skbs[elem] = NULL; 783 bp->tx_skbs[elem] = NULL;
784 dev_kfree_skb_irq(skb); 784 dev_consume_skb_irq(skb);
785 785
786 elem = NEXT_TX(elem); 786 elem = NEXT_TX(elem);
787 } 787 }
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ff641cf30a4e..d007dfeba5c3 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp)
1962 this = &txbase[elem]; 1962 this = &txbase[elem];
1963 } 1963 }
1964 1964
1965 dev_kfree_skb_irq(skb); 1965 dev_consume_skb_irq(skb);
1966 dev->stats.tx_packets++; 1966 dev->stats.tx_packets++;
1967 } 1967 }
1968 hp->tx_old = elem; 1968 hp->tx_old = elem;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dc966ddb6d81..b24c11187017 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
1739 tx_level -= db->rptr->len; /* '-' koz len is negative */ 1739 tx_level -= db->rptr->len; /* '-' koz len is negative */
1740 1740
1741 /* now should come skb pointer - free it */ 1741 /* now should come skb pointer - free it */
1742 dev_kfree_skb_irq(db->rptr->addr.skb); 1742 dev_consume_skb_irq(db->rptr->addr.skb);
1743 bdx_tx_db_inc_rptr(db); 1743 bdx_tx_db_inc_rptr(db);
1744 } 1744 }
1745 1745
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 82412691ee66..27f6cf140845 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], 1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1741 le16_to_cpu(pktlen), DMA_TO_DEVICE); 1741 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1742 } 1742 }
1743 dev_kfree_skb_irq(skb); 1743 dev_consume_skb_irq(skb);
1744 tdinfo->skb = NULL; 1744 tdinfo->skb = NULL;
1745} 1745}
1746 1746
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 38ac8ef41f5f..56b7791911bf 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp)
3512 bp->descr_block_virt->xmt_data[comp].long_1, 3512 bp->descr_block_virt->xmt_data[comp].long_1,
3513 p_xmt_drv_descr->p_skb->len, 3513 p_xmt_drv_descr->p_skb->len,
3514 DMA_TO_DEVICE); 3514 DMA_TO_DEVICE);
3515 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3515 dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3516 3516
3517 /* 3517 /*
3518 * Move to start of next packet by updating completion index 3518 * Move to start of next packet by updating completion index
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 58bbba8582b0..3377ac66a347 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev,
1512 } 1512 }
1513#if IS_ENABLED(CONFIG_IPV6) 1513#if IS_ENABLED(CONFIG_IPV6)
1514 case AF_INET6: { 1514 case AF_INET6: {
1515 struct rt6_info *rt = rt6_lookup(geneve->net, 1515 struct rt6_info *rt;
1516 &info->key.u.ipv6.dst, NULL, 0, 1516
1517 NULL, 0); 1517 if (!__in6_dev_get(dev))
1518 break;
1519
1520 rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
1521 NULL, 0);
1518 1522
1519 if (rt && rt->dst.dev) 1523 if (rt && rt->dst.dev)
1520 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; 1524 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 44de81e5f140..c589f5ae75bb 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context)
905 } 905 }
906 break; 906 break;
907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): 907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
908 /* rx is starting */ 908 /* rx is starting */
909 dev_dbg(printdev(lp), "RX is starting\n"); 909 dev_dbg(printdev(lp), "RX is starting\n");
910 mcr20a_handle_rx(lp); 910 mcr20a_handle_rx(lp);
911 break; 911 break;
912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): 912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
913 if (lp->is_tx) { 913 if (lp->is_tx) {
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index 9a2f24078a54..d17480a911a3 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -209,7 +209,7 @@ int ipvlan_l3s_register(struct ipvl_port *port)
209 ret = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 209 ret = ipvlan_register_nf_hook(read_pnet(&port->pnet));
210 if (!ret) { 210 if (!ret) {
211 dev->l3mdev_ops = &ipvl_l3mdev_ops; 211 dev->l3mdev_ops = &ipvl_l3mdev_ops;
212 dev->priv_flags |= IFF_L3MDEV_MASTER; 212 dev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
213 } 213 }
214 214
215 return ret; 215 return ret;
@@ -221,7 +221,7 @@ void ipvlan_l3s_unregister(struct ipvl_port *port)
221 221
222 ASSERT_RTNL(); 222 ASSERT_RTNL();
223 223
224 dev->priv_flags &= ~IFF_L3MDEV_MASTER; 224 dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
225 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 225 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
226 dev->l3mdev_ops = NULL; 226 dev->l3mdev_ops = NULL;
227} 227}
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 25ef483bcc24..2fe2ebaf62d1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -885,14 +885,14 @@ static void decode_txts(struct dp83640_private *dp83640,
885 struct phy_txts *phy_txts) 885 struct phy_txts *phy_txts)
886{ 886{
887 struct skb_shared_hwtstamps shhwtstamps; 887 struct skb_shared_hwtstamps shhwtstamps;
888 struct dp83640_skb_info *skb_info;
888 struct sk_buff *skb; 889 struct sk_buff *skb;
889 u64 ns;
890 u8 overflow; 890 u8 overflow;
891 u64 ns;
891 892
892 /* We must already have the skb that triggered this. */ 893 /* We must already have the skb that triggered this. */
893 894again:
894 skb = skb_dequeue(&dp83640->tx_queue); 895 skb = skb_dequeue(&dp83640->tx_queue);
895
896 if (!skb) { 896 if (!skb) {
897 pr_debug("have timestamp but tx_queue empty\n"); 897 pr_debug("have timestamp but tx_queue empty\n");
898 return; 898 return;
@@ -907,6 +907,11 @@ static void decode_txts(struct dp83640_private *dp83640,
907 } 907 }
908 return; 908 return;
909 } 909 }
910 skb_info = (struct dp83640_skb_info *)skb->cb;
911 if (time_after(jiffies, skb_info->tmo)) {
912 kfree_skb(skb);
913 goto again;
914 }
910 915
911 ns = phy2txts(phy_txts); 916 ns = phy2txts(phy_txts);
912 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 917 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1459,6 +1464,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1459static void dp83640_txtstamp(struct phy_device *phydev, 1464static void dp83640_txtstamp(struct phy_device *phydev,
1460 struct sk_buff *skb, int type) 1465 struct sk_buff *skb, int type)
1461{ 1466{
1467 struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
1462 struct dp83640_private *dp83640 = phydev->priv; 1468 struct dp83640_private *dp83640 = phydev->priv;
1463 1469
1464 switch (dp83640->hwts_tx_en) { 1470 switch (dp83640->hwts_tx_en) {
@@ -1471,6 +1477,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1471 /* fall through */ 1477 /* fall through */
1472 case HWTSTAMP_TX_ON: 1478 case HWTSTAMP_TX_ON:
1473 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1479 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1480 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
1474 skb_queue_tail(&dp83640->tx_queue, skb); 1481 skb_queue_tail(&dp83640->tx_queue, skb);
1475 break; 1482 break;
1476 1483
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 90f44ba8aca7..3ccba37bd6dd 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -842,7 +842,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
842 842
843 /* SGMII-to-Copper mode initialization */ 843 /* SGMII-to-Copper mode initialization */
844 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { 844 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
845
846 /* Select page 18 */ 845 /* Select page 18 */
847 err = marvell_set_page(phydev, 18); 846 err = marvell_set_page(phydev, 18);
848 if (err < 0) 847 if (err < 0)
@@ -865,21 +864,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
865 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); 864 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
866 if (err < 0) 865 if (err < 0)
867 return err; 866 return err;
868
869 /* There appears to be a bug in the 88e1512 when used in
870 * SGMII to copper mode, where the AN advertisement register
871 * clears the pause bits each time a negotiation occurs.
872 * This means we can never be truely sure what was advertised,
873 * so disable Pause support.
874 */
875 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
876 phydev->supported);
877 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
878 phydev->supported);
879 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
880 phydev->advertising);
881 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
882 phydev->advertising);
883 } 867 }
884 868
885 return m88e1318_config_init(phydev); 869 return m88e1318_config_init(phydev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 18656c4094b3..fed298c0cb39 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
866 if (rtnl_dereference(tun->xdp_prog)) 866 if (rtnl_dereference(tun->xdp_prog))
867 sock_set_flag(&tfile->sk, SOCK_XDP); 867 sock_set_flag(&tfile->sk, SOCK_XDP);
868 868
869 tun_set_real_num_queues(tun);
870
871 /* device is allowed to go away first, so no need to hold extra 869 /* device is allowed to go away first, so no need to hold extra
872 * refcnt. 870 * refcnt.
873 */ 871 */
@@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
879 rcu_assign_pointer(tfile->tun, tun); 877 rcu_assign_pointer(tfile->tun, tun);
880 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 878 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
881 tun->numqueues++; 879 tun->numqueues++;
880 tun_set_real_num_queues(tun);
882out: 881out:
883 return err; 882 return err;
884} 883}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2a0edd4653e3..7eb38ea9ba56 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
57#define VIRTIO_XDP_TX BIT(0) 57#define VIRTIO_XDP_TX BIT(0)
58#define VIRTIO_XDP_REDIR BIT(1) 58#define VIRTIO_XDP_REDIR BIT(1)
59 59
60#define VIRTIO_XDP_FLAG BIT(0)
61
60/* RX packet size EWMA. The average packet size is used to determine the packet 62/* RX packet size EWMA. The average packet size is used to determine the packet
61 * buffer size when refilling RX rings. As the entire RX ring may be refilled 63 * buffer size when refilling RX rings. As the entire RX ring may be refilled
62 * at once, the weight is chosen so that the EWMA will be insensitive to short- 64 * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -252,6 +254,21 @@ struct padded_vnet_hdr {
252 char padding[4]; 254 char padding[4];
253}; 255};
254 256
257static bool is_xdp_frame(void *ptr)
258{
259 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
260}
261
262static void *xdp_to_ptr(struct xdp_frame *ptr)
263{
264 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
265}
266
267static struct xdp_frame *ptr_to_xdp(void *ptr)
268{
269 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
270}
271
255/* Converting between virtqueue no. and kernel tx/rx queue no. 272/* Converting between virtqueue no. and kernel tx/rx queue no.
256 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 273 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
257 */ 274 */
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
462 479
463 sg_init_one(sq->sg, xdpf->data, xdpf->len); 480 sg_init_one(sq->sg, xdpf->data, xdpf->len);
464 481
465 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); 482 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
483 GFP_ATOMIC);
466 if (unlikely(err)) 484 if (unlikely(err))
467 return -ENOSPC; /* Caller handle free/refcnt */ 485 return -ENOSPC; /* Caller handle free/refcnt */
468 486
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev,
482{ 500{
483 struct virtnet_info *vi = netdev_priv(dev); 501 struct virtnet_info *vi = netdev_priv(dev);
484 struct receive_queue *rq = vi->rq; 502 struct receive_queue *rq = vi->rq;
485 struct xdp_frame *xdpf_sent;
486 struct bpf_prog *xdp_prog; 503 struct bpf_prog *xdp_prog;
487 struct send_queue *sq; 504 struct send_queue *sq;
488 unsigned int len; 505 unsigned int len;
506 int packets = 0;
507 int bytes = 0;
489 int drops = 0; 508 int drops = 0;
490 int kicks = 0; 509 int kicks = 0;
491 int ret, err; 510 int ret, err;
511 void *ptr;
492 int i; 512 int i;
493 513
494 sq = virtnet_xdp_sq(vi);
495
496 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
497 ret = -EINVAL;
498 drops = n;
499 goto out;
500 }
501
502 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 514 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
503 * indicate XDP resources have been successfully allocated. 515 * indicate XDP resources have been successfully allocated.
504 */ 516 */
505 xdp_prog = rcu_dereference(rq->xdp_prog); 517 xdp_prog = rcu_dereference(rq->xdp_prog);
506 if (!xdp_prog) { 518 if (!xdp_prog)
507 ret = -ENXIO; 519 return -ENXIO;
520
521 sq = virtnet_xdp_sq(vi);
522
523 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
524 ret = -EINVAL;
508 drops = n; 525 drops = n;
509 goto out; 526 goto out;
510 } 527 }
511 528
512 /* Free up any pending old buffers before queueing new ones. */ 529 /* Free up any pending old buffers before queueing new ones. */
513 while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) 530 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
514 xdp_return_frame(xdpf_sent); 531 if (likely(is_xdp_frame(ptr))) {
532 struct xdp_frame *frame = ptr_to_xdp(ptr);
533
534 bytes += frame->len;
535 xdp_return_frame(frame);
536 } else {
537 struct sk_buff *skb = ptr;
538
539 bytes += skb->len;
540 napi_consume_skb(skb, false);
541 }
542 packets++;
543 }
515 544
516 for (i = 0; i < n; i++) { 545 for (i = 0; i < n; i++) {
517 struct xdp_frame *xdpf = frames[i]; 546 struct xdp_frame *xdpf = frames[i];
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
530 } 559 }
531out: 560out:
532 u64_stats_update_begin(&sq->stats.syncp); 561 u64_stats_update_begin(&sq->stats.syncp);
562 sq->stats.bytes += bytes;
563 sq->stats.packets += packets;
533 sq->stats.xdp_tx += n; 564 sq->stats.xdp_tx += n;
534 sq->stats.xdp_tx_drops += drops; 565 sq->stats.xdp_tx_drops += drops;
535 sq->stats.kicks += kicks; 566 sq->stats.kicks += kicks;
@@ -1333,18 +1364,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1333 1364
1334static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) 1365static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1335{ 1366{
1336 struct sk_buff *skb;
1337 unsigned int len; 1367 unsigned int len;
1338 unsigned int packets = 0; 1368 unsigned int packets = 0;
1339 unsigned int bytes = 0; 1369 unsigned int bytes = 0;
1370 void *ptr;
1340 1371
1341 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1372 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1342 pr_debug("Sent skb %p\n", skb); 1373 if (likely(!is_xdp_frame(ptr))) {
1374 struct sk_buff *skb = ptr;
1343 1375
1344 bytes += skb->len; 1376 pr_debug("Sent skb %p\n", skb);
1345 packets++; 1377
1378 bytes += skb->len;
1379 napi_consume_skb(skb, in_napi);
1380 } else {
1381 struct xdp_frame *frame = ptr_to_xdp(ptr);
1346 1382
1347 napi_consume_skb(skb, in_napi); 1383 bytes += frame->len;
1384 xdp_return_frame(frame);
1385 }
1386 packets++;
1348 } 1387 }
1349 1388
1350 /* Avoid overhead when no packets have been processed 1389 /* Avoid overhead when no packets have been processed
@@ -1359,6 +1398,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1359 u64_stats_update_end(&sq->stats.syncp); 1398 u64_stats_update_end(&sq->stats.syncp);
1360} 1399}
1361 1400
1401static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1402{
1403 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1404 return false;
1405 else if (q < vi->curr_queue_pairs)
1406 return true;
1407 else
1408 return false;
1409}
1410
1362static void virtnet_poll_cleantx(struct receive_queue *rq) 1411static void virtnet_poll_cleantx(struct receive_queue *rq)
1363{ 1412{
1364 struct virtnet_info *vi = rq->vq->vdev->priv; 1413 struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1366,7 +1415,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
1366 struct send_queue *sq = &vi->sq[index]; 1415 struct send_queue *sq = &vi->sq[index];
1367 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1416 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1368 1417
1369 if (!sq->napi.weight) 1418 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1370 return; 1419 return;
1371 1420
1372 if (__netif_tx_trylock(txq)) { 1421 if (__netif_tx_trylock(txq)) {
@@ -1443,8 +1492,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1443{ 1492{
1444 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1493 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1445 struct virtnet_info *vi = sq->vq->vdev->priv; 1494 struct virtnet_info *vi = sq->vq->vdev->priv;
1446 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1495 unsigned int index = vq2txq(sq->vq);
1496 struct netdev_queue *txq;
1447 1497
1498 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1499 /* We don't need to enable cb for XDP */
1500 napi_complete_done(napi, 0);
1501 return 0;
1502 }
1503
1504 txq = netdev_get_tx_queue(vi->dev, index);
1448 __netif_tx_lock(txq, raw_smp_processor_id()); 1505 __netif_tx_lock(txq, raw_smp_processor_id());
1449 free_old_xmit_skbs(sq, true); 1506 free_old_xmit_skbs(sq, true);
1450 __netif_tx_unlock(txq); 1507 __netif_tx_unlock(txq);
@@ -2396,6 +2453,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2396 return -ENOMEM; 2453 return -ENOMEM;
2397 } 2454 }
2398 2455
2456 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2457 if (!prog && !old_prog)
2458 return 0;
2459
2399 if (prog) { 2460 if (prog) {
2400 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 2461 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
2401 if (IS_ERR(prog)) 2462 if (IS_ERR(prog))
@@ -2403,36 +2464,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2403 } 2464 }
2404 2465
2405 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2466 /* Make sure NAPI is not using any XDP TX queues for RX. */
2406 if (netif_running(dev)) 2467 if (netif_running(dev)) {
2407 for (i = 0; i < vi->max_queue_pairs; i++) 2468 for (i = 0; i < vi->max_queue_pairs; i++) {
2408 napi_disable(&vi->rq[i].napi); 2469 napi_disable(&vi->rq[i].napi);
2470 virtnet_napi_tx_disable(&vi->sq[i].napi);
2471 }
2472 }
2473
2474 if (!prog) {
2475 for (i = 0; i < vi->max_queue_pairs; i++) {
2476 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2477 if (i == 0)
2478 virtnet_restore_guest_offloads(vi);
2479 }
2480 synchronize_net();
2481 }
2409 2482
2410 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2411 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2483 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2412 if (err) 2484 if (err)
2413 goto err; 2485 goto err;
2486 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2414 vi->xdp_queue_pairs = xdp_qp; 2487 vi->xdp_queue_pairs = xdp_qp;
2415 2488
2416 for (i = 0; i < vi->max_queue_pairs; i++) { 2489 if (prog) {
2417 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2490 for (i = 0; i < vi->max_queue_pairs; i++) {
2418 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2491 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2419 if (i == 0) { 2492 if (i == 0 && !old_prog)
2420 if (!old_prog)
2421 virtnet_clear_guest_offloads(vi); 2493 virtnet_clear_guest_offloads(vi);
2422 if (!prog)
2423 virtnet_restore_guest_offloads(vi);
2424 } 2494 }
2495 }
2496
2497 for (i = 0; i < vi->max_queue_pairs; i++) {
2425 if (old_prog) 2498 if (old_prog)
2426 bpf_prog_put(old_prog); 2499 bpf_prog_put(old_prog);
2427 if (netif_running(dev)) 2500 if (netif_running(dev)) {
2428 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2501 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2502 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2503 &vi->sq[i].napi);
2504 }
2429 } 2505 }
2430 2506
2431 return 0; 2507 return 0;
2432 2508
2433err: 2509err:
2434 for (i = 0; i < vi->max_queue_pairs; i++) 2510 if (!prog) {
2435 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2511 virtnet_clear_guest_offloads(vi);
2512 for (i = 0; i < vi->max_queue_pairs; i++)
2513 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2514 }
2515
2516 if (netif_running(dev)) {
2517 for (i = 0; i < vi->max_queue_pairs; i++) {
2518 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2519 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2520 &vi->sq[i].napi);
2521 }
2522 }
2436 if (prog) 2523 if (prog)
2437 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 2524 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2438 return err; 2525 return err;
@@ -2614,16 +2701,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
2614 put_page(vi->rq[i].alloc_frag.page); 2701 put_page(vi->rq[i].alloc_frag.page);
2615} 2702}
2616 2703
2617static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
2618{
2619 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
2620 return false;
2621 else if (q < vi->curr_queue_pairs)
2622 return true;
2623 else
2624 return false;
2625}
2626
2627static void free_unused_bufs(struct virtnet_info *vi) 2704static void free_unused_bufs(struct virtnet_info *vi)
2628{ 2705{
2629 void *buf; 2706 void *buf;
@@ -2632,10 +2709,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
2632 for (i = 0; i < vi->max_queue_pairs; i++) { 2709 for (i = 0; i < vi->max_queue_pairs; i++) {
2633 struct virtqueue *vq = vi->sq[i].vq; 2710 struct virtqueue *vq = vi->sq[i].vq;
2634 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2711 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2635 if (!is_xdp_raw_buffer_queue(vi, i)) 2712 if (!is_xdp_frame(buf))
2636 dev_kfree_skb(buf); 2713 dev_kfree_skb(buf);
2637 else 2714 else
2638 put_page(virt_to_head_page(buf)); 2715 xdp_return_frame(ptr_to_xdp(buf));
2639 } 2716 }
2640 } 2717 }
2641 2718
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index d5dc823f781e..fa78d2b14136 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1575,7 +1575,7 @@ try:
1575 dev->stats.tx_packets++; 1575 dev->stats.tx_packets++;
1576 dev->stats.tx_bytes += skb->len; 1576 dev->stats.tx_bytes += skb->len;
1577 } 1577 }
1578 dev_kfree_skb_irq(skb); 1578 dev_consume_skb_irq(skb);
1579 dpriv->tx_skbuff[cur] = NULL; 1579 dpriv->tx_skbuff[cur] = NULL;
1580 ++dpriv->tx_dirty; 1580 ++dpriv->tx_dirty;
1581 } else { 1581 } else {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 66d889d54e58..a08f04c3f644 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
482 memset(priv->tx_buffer + 482 memset(priv->tx_buffer +
483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
484 0, skb->len); 484 0, skb->len);
485 dev_kfree_skb_irq(skb); 485 dev_consume_skb_irq(skb);
486 486
487 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 487 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
488 priv->skb_dirtytx = 488 priv->skb_dirtytx =
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 399b501f3c3c..e8891f5fc83a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
548 { 548 {
549 .id = WCN3990_HW_1_0_DEV_VERSION, 549 .id = WCN3990_HW_1_0_DEV_VERSION,
550 .dev_id = 0, 550 .dev_id = 0,
551 .bus = ATH10K_BUS_PCI, 551 .bus = ATH10K_BUS_SNOC,
552 .name = "wcn3990 hw1.0", 552 .name = "wcn3990 hw1.0",
553 .continuous_frag_desc = true, 553 .continuous_frag_desc = true,
554 .tx_chain_mask = 0x7, 554 .tx_chain_mask = 0x7,
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 491ca3c8b43c..83d5bceea08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
1config IWLWIFI 1config IWLWIFI
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
3 depends on PCI && HAS_IOMEM 3 depends on PCI && HAS_IOMEM && CFG80211
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 Select to build the driver supporting the: 6 Select to build the driver supporting the:
@@ -47,6 +47,7 @@ if IWLWIFI
47config IWLWIFI_LEDS 47config IWLWIFI_LEDS
48 bool 48 bool
49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI 49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
50 depends on IWLMVM || IWLDVM
50 select LEDS_TRIGGERS 51 select LEDS_TRIGGERS
51 select MAC80211_LEDS 52 select MAC80211_LEDS
52 default y 53 default y
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 497e762978cc..b2cabce1d74d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
212 mt76x02_add_rate_power_offset(t, delta); 212 mt76x02_add_rate_power_offset(t, delta);
213} 213}
214 214
215void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) 215void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
216{ 216{
217 struct mt76x0_chan_map { 217 struct mt76x0_chan_map {
218 u8 chan; 218 u8 chan;
219 u8 offset; 219 u8 offset;
220 } chan_map[] = { 220 } chan_map[] = {
221 { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, 221 { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 },
222 { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, 222 { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 },
223 { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, 223 { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 },
224 { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, 224 { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
225 { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, 225 { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
226 { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, 226 { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
227 { 167, 17 }, { 171, 18 }, { 173, 19 }, 227 { 167, 34 }, { 171, 36 }, { 175, 38 },
228 }; 228 };
229 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 229 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
230 u8 offset, addr; 230 u8 offset, addr;
231 int i, idx = 0;
231 u16 data; 232 u16 data;
232 int i;
233 233
234 if (mt76x0_tssi_enabled(dev)) { 234 if (mt76x0_tssi_enabled(dev)) {
235 s8 target_power; 235 s8 target_power;
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
239 else 239 else
240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); 240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; 241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
242 info[0] = target_power + mt76x0_get_delta(dev); 242 *tp = target_power + mt76x0_get_delta(dev);
243 info[1] = 0;
244 243
245 return; 244 return;
246 } 245 }
247 246
248 for (i = 0; i < ARRAY_SIZE(chan_map); i++) { 247 for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
249 if (chan_map[i].chan <= chan->hw_value) { 248 if (chan->hw_value <= chan_map[i].chan) {
249 idx = (chan->hw_value == chan_map[i].chan);
250 offset = chan_map[i].offset; 250 offset = chan_map[i].offset;
251 break; 251 break;
252 } 252 }
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset; 258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
259 } else { 259 } else {
260 switch (chan->hw_value) { 260 switch (chan->hw_value) {
261 case 42:
262 offset = 2;
263 break;
261 case 58: 264 case 58:
262 offset = 8; 265 offset = 8;
263 break; 266 break;
264 case 106: 267 case 106:
265 offset = 14; 268 offset = 14;
266 break; 269 break;
267 case 112: 270 case 122:
268 offset = 20; 271 offset = 20;
269 break; 272 break;
270 case 155: 273 case 155:
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
277 } 280 }
278 281
279 data = mt76x02_eeprom_get(dev, addr); 282 data = mt76x02_eeprom_get(dev, addr);
280 283 *tp = data >> (8 * idx);
281 info[0] = data; 284 if (*tp < 0 || *tp > 0x3f)
282 if (!info[0] || info[0] > 0x3f) 285 *tp = 5;
283 info[0] = 5;
284
285 info[1] = data >> 8;
286 if (!info[1] || info[1] > 0x3f)
287 info[1] = 5;
288} 286}
289 287
290static int mt76x0_check_eeprom(struct mt76x02_dev *dev) 288static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index ee9ade9f3c8b..42b259f90b6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -26,7 +26,7 @@ struct mt76x02_dev;
26int mt76x0_eeprom_init(struct mt76x02_dev *dev); 26int mt76x0_eeprom_init(struct mt76x02_dev *dev);
27void mt76x0_read_rx_gain(struct mt76x02_dev *dev); 27void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
28void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); 28void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
29void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); 29void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp);
30 30
31static inline s8 s6_to_s8(u32 val) 31static inline s8 s6_to_s8(u32 val)
32{ 32{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 5a4c6f34267e..1117cdc15b04 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
845void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) 845void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
846{ 846{
847 struct mt76_rate_power *t = &dev->mt76.rate_power; 847 struct mt76_rate_power *t = &dev->mt76.rate_power;
848 u8 info[2]; 848 s8 info;
849 849
850 mt76x0_get_tx_power_per_rate(dev); 850 mt76x0_get_tx_power_per_rate(dev);
851 mt76x0_get_power_info(dev, info); 851 mt76x0_get_power_info(dev, &info);
852 852
853 mt76x02_add_rate_power_offset(t, info[0]); 853 mt76x02_add_rate_power_offset(t, info);
854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); 854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); 855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
856 mt76x02_add_rate_power_offset(t, -info[0]); 856 mt76x02_add_rate_power_offset(t, -info);
857 857
858 mt76x02_phy_set_txpower(dev, info[0], info[1]); 858 mt76x02_phy_set_txpower(dev, info, info);
859} 859}
860 860
861void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) 861void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index bd10165d7eec..4d4b07701149 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
164 } 164 }
165 165
166 sdio_claim_host(func); 166 sdio_claim_host(func);
167 /*
168 * To guarantee that the SDIO card is power cycled, as required to make
169 * the FW programming to succeed, let's do a brute force HW reset.
170 */
171 mmc_hw_reset(card->host);
172
167 sdio_enable_func(func); 173 sdio_enable_func(func);
168 sdio_release_host(func); 174 sdio_release_host(func);
169 175
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
174{ 180{
175 struct sdio_func *func = dev_to_sdio_func(glue->dev); 181 struct sdio_func *func = dev_to_sdio_func(glue->dev);
176 struct mmc_card *card = func->card; 182 struct mmc_card *card = func->card;
177 int error;
178 183
179 sdio_claim_host(func); 184 sdio_claim_host(func);
180 sdio_disable_func(func); 185 sdio_disable_func(func);
181 sdio_release_host(func); 186 sdio_release_host(func);
182 187
183 /* Let runtime PM know the card is powered off */ 188 /* Let runtime PM know the card is powered off */
184 error = pm_runtime_put(&card->dev); 189 pm_runtime_put(&card->dev);
185 if (error < 0 && error != -EBUSY) {
186 dev_err(&card->dev, "%s failed: %i\n", __func__, error);
187
188 return error;
189 }
190
191 return 0; 190 return 0;
192} 191}
193 192
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 52e47dac028f..80f843030e36 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev)
310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
311 if (IS_ERR(imx6_pcie->pd_pcie)) 311 if (IS_ERR(imx6_pcie->pd_pcie))
312 return PTR_ERR(imx6_pcie->pd_pcie); 312 return PTR_ERR(imx6_pcie->pd_pcie);
313 /* Do nothing when power domain missing */
314 if (!imx6_pcie->pd_pcie)
315 return 0;
313 link = device_link_add(dev, imx6_pcie->pd_pcie, 316 link = device_link_add(dev, imx6_pcie->pd_pcie,
314 DL_FLAG_STATELESS | 317 DL_FLAG_STATELESS |
315 DL_FLAG_PM_RUNTIME | 318 DL_FLAG_PM_RUNTIME |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev)
323 if (IS_ERR(imx6_pcie->pd_pcie_phy)) 326 if (IS_ERR(imx6_pcie->pd_pcie_phy))
324 return PTR_ERR(imx6_pcie->pd_pcie_phy); 327 return PTR_ERR(imx6_pcie->pd_pcie_phy);
325 328
326 device_link_add(dev, imx6_pcie->pd_pcie_phy, 329 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
327 DL_FLAG_STATELESS | 330 DL_FLAG_STATELESS |
328 DL_FLAG_PM_RUNTIME | 331 DL_FLAG_PM_RUNTIME |
329 DL_FLAG_RPM_ACTIVE); 332 DL_FLAG_RPM_ACTIVE);
330 if (IS_ERR(link)) { 333 if (!link) {
331 dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); 334 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
332 return PTR_ERR(link); 335 return -EINVAL;
333 } 336 }
334 337
335 return 0; 338 return 0;
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b171b6bc15c8..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -22,7 +22,6 @@
22#include <linux/resource.h> 22#include <linux/resource.h>
23#include <linux/of_pci.h> 23#include <linux/of_pci.h>
24#include <linux/of_irq.h> 24#include <linux/of_irq.h>
25#include <linux/gpio/consumer.h>
26 25
27#include "pcie-designware.h" 26#include "pcie-designware.h"
28 27
@@ -30,7 +29,6 @@ struct armada8k_pcie {
30 struct dw_pcie *pci; 29 struct dw_pcie *pci;
31 struct clk *clk; 30 struct clk *clk;
32 struct clk *clk_reg; 31 struct clk *clk_reg;
33 struct gpio_desc *reset_gpio;
34}; 32};
35 33
36#define PCIE_VENDOR_REGS_OFFSET 0x8000 34#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
139 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 137 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
140 struct armada8k_pcie *pcie = to_armada8k_pcie(pci); 138 struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
141 139
142 if (pcie->reset_gpio) {
143 /* assert and then deassert the reset signal */
144 gpiod_set_value_cansleep(pcie->reset_gpio, 1);
145 msleep(100);
146 gpiod_set_value_cansleep(pcie->reset_gpio, 0);
147 }
148 dw_pcie_setup_rc(pp); 140 dw_pcie_setup_rc(pp);
149 armada8k_pcie_establish_link(pcie); 141 armada8k_pcie_establish_link(pcie);
150 142
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
257 goto fail_clkreg; 249 goto fail_clkreg;
258 } 250 }
259 251
260 /* Get reset gpio signal and hold asserted (logically high) */
261 pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
262 GPIOD_OUT_HIGH);
263 if (IS_ERR(pcie->reset_gpio)) {
264 ret = PTR_ERR(pcie->reset_gpio);
265 goto fail_clkreg;
266 }
267
268 platform_set_drvdata(pdev, pcie); 252 platform_set_drvdata(pdev, pcie);
269 253
270 ret = armada8k_add_pcie_port(pcie, pdev); 254 ret = armada8k_add_pcie_port(pcie, pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 05044e323ea5..03ec7a5d9d0b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1513 .matches = { 1513 .matches = {
1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1516 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1516 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1517 }, 1517 },
1518 }, 1518 },
1519 { 1519 {
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1521 .matches = { 1521 .matches = {
1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), 1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1524 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1524 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1525 }, 1525 },
1526 }, 1526 },
1527 { 1527 {
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1529 .matches = { 1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1532 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1532 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1533 }, 1533 },
1534 }, 1534 },
1535 { 1535 {
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1537 .matches = { 1537 .matches = {
1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1540 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1540 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1541 }, 1541 },
1542 }, 1542 },
1543 {} 1543 {}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1817786ab6aa..a005cbccb4f7 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -45,12 +45,14 @@ config PINCTRL_MT2701
45config PINCTRL_MT7623 45config PINCTRL_MT7623
46 bool "Mediatek MT7623 pin control with generic binding" 46 bool "Mediatek MT7623 pin control with generic binding"
47 depends on MACH_MT7623 || COMPILE_TEST 47 depends on MACH_MT7623 || COMPILE_TEST
48 depends on OF
48 default MACH_MT7623 49 default MACH_MT7623
49 select PINCTRL_MTK_MOORE 50 select PINCTRL_MTK_MOORE
50 51
51config PINCTRL_MT7629 52config PINCTRL_MT7629
52 bool "Mediatek MT7629 pin control" 53 bool "Mediatek MT7629 pin control"
53 depends on MACH_MT7629 || COMPILE_TEST 54 depends on MACH_MT7629 || COMPILE_TEST
55 depends on OF
54 default MACH_MT7629 56 default MACH_MT7629
55 select PINCTRL_MTK_MOORE 57 select PINCTRL_MTK_MOORE
56 58
@@ -92,6 +94,7 @@ config PINCTRL_MT6797
92 94
93config PINCTRL_MT7622 95config PINCTRL_MT7622
94 bool "MediaTek MT7622 pin control" 96 bool "MediaTek MT7622 pin control"
97 depends on OF
95 depends on ARM64 || COMPILE_TEST 98 depends on ARM64 || COMPILE_TEST
96 default ARM64 && ARCH_MEDIATEK 99 default ARM64 && ARCH_MEDIATEK
97 select PINCTRL_MTK_MOORE 100 select PINCTRL_MTK_MOORE
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b03481ef99a1..98905d4a79ca 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
832 break; 832 break;
833 833
834 case MCP_TYPE_S18: 834 case MCP_TYPE_S18:
835 one_regmap_config =
836 devm_kmemdup(dev, &mcp23x17_regmap,
837 sizeof(struct regmap_config), GFP_KERNEL);
838 if (!one_regmap_config)
839 return -ENOMEM;
835 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, 840 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
836 &mcp23x17_regmap); 841 one_regmap_config);
837 mcp->reg_shift = 1; 842 mcp->reg_shift = 1;
838 mcp->chip.ngpio = 16; 843 mcp->chip.ngpio = 16;
839 mcp->chip.label = "mcp23s18"; 844 mcp->chip.label = "mcp23s18";
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b58125568..ef4268cc6227 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
588static const struct sunxi_pinctrl_desc h6_pinctrl_data = { 588static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
589 .pins = h6_pins, 589 .pins = h6_pins,
590 .npins = ARRAY_SIZE(h6_pins), 590 .npins = ARRAY_SIZE(h6_pins),
591 .irq_banks = 3, 591 .irq_banks = 4,
592 .irq_bank_map = h6_irq_bank_map, 592 .irq_bank_map = h6_irq_bank_map,
593 .irq_read_needs_mux = true, 593 .irq_read_needs_mux = true,
594}; 594};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 5d9184d18c16..0e7fa69e93df 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
698{ 698{
699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
700 unsigned short bank = offset / PINS_PER_BANK; 700 unsigned short bank = offset / PINS_PER_BANK;
701 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 701 unsigned short bank_offset = bank - pctl->desc->pin_base /
702 struct regulator *reg; 702 PINS_PER_BANK;
703 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
704 struct regulator *reg = s_reg->regulator;
705 char supply[16];
703 int ret; 706 int ret;
704 707
705 reg = s_reg->regulator; 708 if (reg) {
706 if (!reg) {
707 char supply[16];
708
709 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
710 reg = regulator_get(pctl->dev, supply);
711 if (IS_ERR(reg)) {
712 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
713 'A' + bank);
714 return PTR_ERR(reg);
715 }
716
717 s_reg->regulator = reg;
718 refcount_set(&s_reg->refcount, 1);
719 } else {
720 refcount_inc(&s_reg->refcount); 709 refcount_inc(&s_reg->refcount);
710 return 0;
711 }
712
713 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
714 reg = regulator_get(pctl->dev, supply);
715 if (IS_ERR(reg)) {
716 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
717 'A' + bank);
718 return PTR_ERR(reg);
721 } 719 }
722 720
723 ret = regulator_enable(reg); 721 ret = regulator_enable(reg);
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
727 goto out; 725 goto out;
728 } 726 }
729 727
728 s_reg->regulator = reg;
729 refcount_set(&s_reg->refcount, 1);
730
730 return 0; 731 return 0;
731 732
732out: 733out:
733 if (refcount_dec_and_test(&s_reg->refcount)) { 734 regulator_put(s_reg->regulator);
734 regulator_put(s_reg->regulator);
735 s_reg->regulator = NULL;
736 }
737 735
738 return ret; 736 return ret;
739} 737}
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
742{ 740{
743 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 741 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
744 unsigned short bank = offset / PINS_PER_BANK; 742 unsigned short bank = offset / PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 743 unsigned short bank_offset = bank - pctl->desc->pin_base /
744 PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
746 746
747 if (!refcount_dec_and_test(&s_reg->refcount)) 747 if (!refcount_dec_and_test(&s_reg->refcount))
748 return 0; 748 return 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e340d2a24b44..034c0317c8d6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -136,7 +136,7 @@ struct sunxi_pinctrl {
136 struct gpio_chip *chip; 136 struct gpio_chip *chip;
137 const struct sunxi_pinctrl_desc *desc; 137 const struct sunxi_pinctrl_desc *desc;
138 struct device *dev; 138 struct device *dev;
139 struct sunxi_pinctrl_regulator regulators[12]; 139 struct sunxi_pinctrl_regulator regulators[9];
140 struct irq_domain *domain; 140 struct irq_domain *domain;
141 struct sunxi_pinctrl_function *functions; 141 struct sunxi_pinctrl_function *functions;
142 unsigned nfunctions; 142 unsigned nfunctions;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5e2109c54c7c..b5e9db85e881 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -905,6 +905,7 @@ config TOSHIBA_WMI
905config ACPI_CMPC 905config ACPI_CMPC
906 tristate "CMPC Laptop Extras" 906 tristate "CMPC Laptop Extras"
907 depends on ACPI && INPUT 907 depends on ACPI && INPUT
908 depends on BACKLIGHT_LCD_SUPPORT
908 depends on RFKILL || RFKILL=n 909 depends on RFKILL || RFKILL=n
909 select BACKLIGHT_CLASS_DEVICE 910 select BACKLIGHT_CLASS_DEVICE
910 help 911 help
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL
1128config SAMSUNG_Q10 1129config SAMSUNG_Q10
1129 tristate "Samsung Q10 Extras" 1130 tristate "Samsung Q10 Extras"
1130 depends on ACPI 1131 depends on ACPI
1132 depends on BACKLIGHT_LCD_SUPPORT
1131 select BACKLIGHT_CLASS_DEVICE 1133 select BACKLIGHT_CLASS_DEVICE
1132 ---help--- 1134 ---help---
1133 This driver provides support for backlight control on Samsung Q10 1135 This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d65650ef6b41..71d27a804920 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
22#include <linux/hashtable.h> 22#include <linux/hashtable.h>
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <linux/refcount.h> 24#include <linux/refcount.h>
25#include <linux/workqueue.h>
25 26
26#include <net/ipv6.h> 27#include <net/ipv6.h>
27#include <net/if_inet6.h> 28#include <net/if_inet6.h>
@@ -784,6 +785,7 @@ struct qeth_card {
784 struct qeth_seqno seqno; 785 struct qeth_seqno seqno;
785 struct qeth_card_options options; 786 struct qeth_card_options options;
786 787
788 struct workqueue_struct *event_wq;
787 wait_queue_head_t wait_q; 789 wait_queue_head_t wait_q;
788 spinlock_t mclock; 790 spinlock_t mclock;
789 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 791 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -954,7 +956,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
954extern const struct attribute_group qeth_device_attr_group; 956extern const struct attribute_group qeth_device_attr_group;
955extern const struct attribute_group qeth_device_blkt_group; 957extern const struct attribute_group qeth_device_blkt_group;
956extern const struct device_type qeth_generic_devtype; 958extern const struct device_type qeth_generic_devtype;
957extern struct workqueue_struct *qeth_wq;
958 959
959int qeth_card_hw_is_reachable(struct qeth_card *); 960int qeth_card_hw_is_reachable(struct qeth_card *);
960const char *qeth_get_cardname_short(struct qeth_card *); 961const char *qeth_get_cardname_short(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dcc06e48b70b..6ef0c89370b5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
74static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 74static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 75static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76 76
77struct workqueue_struct *qeth_wq; 77static struct workqueue_struct *qeth_wq;
78EXPORT_SYMBOL_GPL(qeth_wq);
79 78
80int qeth_card_hw_is_reachable(struct qeth_card *card) 79int qeth_card_hw_is_reachable(struct qeth_card *card)
81{ 80{
@@ -540,6 +539,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
540 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 539 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
541 rc, CARD_DEVID(card)); 540 rc, CARD_DEVID(card));
542 atomic_set(&channel->irq_pending, 0); 541 atomic_set(&channel->irq_pending, 0);
542 qeth_release_buffer(channel, iob);
543 card->read_or_write_problem = 1; 543 card->read_or_write_problem = 1;
544 qeth_schedule_recovery(card); 544 qeth_schedule_recovery(card);
545 wake_up(&card->wait_q); 545 wake_up(&card->wait_q);
@@ -1101,6 +1101,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1101 rc = qeth_get_problem(card, cdev, irb); 1101 rc = qeth_get_problem(card, cdev, irb);
1102 if (rc) { 1102 if (rc) {
1103 card->read_or_write_problem = 1; 1103 card->read_or_write_problem = 1;
1104 if (iob)
1105 qeth_release_buffer(iob->channel, iob);
1104 qeth_clear_ipacmd_list(card); 1106 qeth_clear_ipacmd_list(card);
1105 qeth_schedule_recovery(card); 1107 qeth_schedule_recovery(card);
1106 goto out; 1108 goto out;
@@ -1439,6 +1441,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1439 CARD_RDEV(card) = gdev->cdev[0]; 1441 CARD_RDEV(card) = gdev->cdev[0];
1440 CARD_WDEV(card) = gdev->cdev[1]; 1442 CARD_WDEV(card) = gdev->cdev[1];
1441 CARD_DDEV(card) = gdev->cdev[2]; 1443 CARD_DDEV(card) = gdev->cdev[2];
1444
1445 card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
1446 if (!card->event_wq)
1447 goto out_wq;
1442 if (qeth_setup_channel(&card->read, true)) 1448 if (qeth_setup_channel(&card->read, true))
1443 goto out_ip; 1449 goto out_ip;
1444 if (qeth_setup_channel(&card->write, true)) 1450 if (qeth_setup_channel(&card->write, true))
@@ -1454,6 +1460,8 @@ out_data:
1454out_channel: 1460out_channel:
1455 qeth_clean_channel(&card->read); 1461 qeth_clean_channel(&card->read);
1456out_ip: 1462out_ip:
1463 destroy_workqueue(card->event_wq);
1464out_wq:
1457 dev_set_drvdata(&gdev->dev, NULL); 1465 dev_set_drvdata(&gdev->dev, NULL);
1458 kfree(card); 1466 kfree(card);
1459out: 1467out:
@@ -1782,6 +1790,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
1782 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); 1790 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1783 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 1791 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1784 atomic_set(&channel->irq_pending, 0); 1792 atomic_set(&channel->irq_pending, 0);
1793 qeth_release_buffer(channel, iob);
1785 wake_up(&card->wait_q); 1794 wake_up(&card->wait_q);
1786 return rc; 1795 return rc;
1787 } 1796 }
@@ -1851,6 +1860,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
1851 rc); 1860 rc);
1852 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 1861 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1853 atomic_set(&channel->irq_pending, 0); 1862 atomic_set(&channel->irq_pending, 0);
1863 qeth_release_buffer(channel, iob);
1854 wake_up(&card->wait_q); 1864 wake_up(&card->wait_q);
1855 return rc; 1865 return rc;
1856 } 1866 }
@@ -2031,6 +2041,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2031 } 2041 }
2032 reply = qeth_alloc_reply(card); 2042 reply = qeth_alloc_reply(card);
2033 if (!reply) { 2043 if (!reply) {
2044 qeth_release_buffer(channel, iob);
2034 return -ENOMEM; 2045 return -ENOMEM;
2035 } 2046 }
2036 reply->callback = reply_cb; 2047 reply->callback = reply_cb;
@@ -2362,11 +2373,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2362 return 0; 2373 return 0;
2363} 2374}
2364 2375
2365static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) 2376static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2366{ 2377{
2367 if (!q) 2378 if (!q)
2368 return; 2379 return;
2369 2380
2381 qeth_clear_outq_buffers(q, 1);
2370 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2382 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2371 kfree(q); 2383 kfree(q);
2372} 2384}
@@ -2440,10 +2452,8 @@ out_freeoutqbufs:
2440 card->qdio.out_qs[i]->bufs[j] = NULL; 2452 card->qdio.out_qs[i]->bufs[j] = NULL;
2441 } 2453 }
2442out_freeoutq: 2454out_freeoutq:
2443 while (i > 0) { 2455 while (i > 0)
2444 qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); 2456 qeth_free_output_queue(card->qdio.out_qs[--i]);
2445 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2446 }
2447 kfree(card->qdio.out_qs); 2457 kfree(card->qdio.out_qs);
2448 card->qdio.out_qs = NULL; 2458 card->qdio.out_qs = NULL;
2449out_freepool: 2459out_freepool:
@@ -2476,10 +2486,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
2476 qeth_free_buffer_pool(card); 2486 qeth_free_buffer_pool(card);
2477 /* free outbound qdio_qs */ 2487 /* free outbound qdio_qs */
2478 if (card->qdio.out_qs) { 2488 if (card->qdio.out_qs) {
2479 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2489 for (i = 0; i < card->qdio.no_out_queues; i++)
2480 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 2490 qeth_free_output_queue(card->qdio.out_qs[i]);
2481 qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
2482 }
2483 kfree(card->qdio.out_qs); 2491 kfree(card->qdio.out_qs);
2484 card->qdio.out_qs = NULL; 2492 card->qdio.out_qs = NULL;
2485 } 2493 }
@@ -4994,6 +5002,7 @@ static void qeth_core_free_card(struct qeth_card *card)
4994 qeth_clean_channel(&card->read); 5002 qeth_clean_channel(&card->read);
4995 qeth_clean_channel(&card->write); 5003 qeth_clean_channel(&card->write);
4996 qeth_clean_channel(&card->data); 5004 qeth_clean_channel(&card->data);
5005 destroy_workqueue(card->event_wq);
4997 qeth_free_qdio_buffers(card); 5006 qeth_free_qdio_buffers(card);
4998 unregister_service_level(&card->qeth_service_level); 5007 unregister_service_level(&card->qeth_service_level);
4999 dev_set_drvdata(&card->gdev->dev, NULL); 5008 dev_set_drvdata(&card->gdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 82f50cc30b0a..ef0b5eaf2532 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -319,6 +319,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
319 qeth_clear_cmd_buffers(&card->read); 319 qeth_clear_cmd_buffers(&card->read);
320 qeth_clear_cmd_buffers(&card->write); 320 qeth_clear_cmd_buffers(&card->write);
321 } 321 }
322
323 flush_workqueue(card->event_wq);
322} 324}
323 325
324static int qeth_l2_process_inbound_buffer(struct qeth_card *card, 326static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -695,6 +697,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
695 697
696 if (cgdev->state == CCWGROUP_ONLINE) 698 if (cgdev->state == CCWGROUP_ONLINE)
697 qeth_l2_set_offline(cgdev); 699 qeth_l2_set_offline(cgdev);
700
701 cancel_work_sync(&card->close_dev_work);
698 if (qeth_netdev_is_registered(card->dev)) 702 if (qeth_netdev_is_registered(card->dev))
699 unregister_netdev(card->dev); 703 unregister_netdev(card->dev);
700} 704}
@@ -1330,7 +1334,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
1330 data->card = card; 1334 data->card = card;
1331 memcpy(&data->qports, qports, 1335 memcpy(&data->qports, qports,
1332 sizeof(struct qeth_sbp_state_change) + extrasize); 1336 sizeof(struct qeth_sbp_state_change) + extrasize);
1333 queue_work(qeth_wq, &data->worker); 1337 queue_work(card->event_wq, &data->worker);
1334} 1338}
1335 1339
1336struct qeth_bridge_host_data { 1340struct qeth_bridge_host_data {
@@ -1402,7 +1406,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
1402 data->card = card; 1406 data->card = card;
1403 memcpy(&data->hostevs, hostevs, 1407 memcpy(&data->hostevs, hostevs,
1404 sizeof(struct qeth_ipacmd_addr_change) + extrasize); 1408 sizeof(struct qeth_ipacmd_addr_change) + extrasize);
1405 queue_work(qeth_wq, &data->worker); 1409 queue_work(card->event_wq, &data->worker);
1406} 1410}
1407 1411
1408/* SETBRIDGEPORT support; sending commands */ 1412/* SETBRIDGEPORT support; sending commands */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 59535ecb1487..f7d0623999ba 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1428,6 +1428,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
1428 qeth_clear_cmd_buffers(&card->read); 1428 qeth_clear_cmd_buffers(&card->read);
1429 qeth_clear_cmd_buffers(&card->write); 1429 qeth_clear_cmd_buffers(&card->write);
1430 } 1430 }
1431
1432 flush_workqueue(card->event_wq);
1431} 1433}
1432 1434
1433/* 1435/*
@@ -2278,6 +2280,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2278 if (cgdev->state == CCWGROUP_ONLINE) 2280 if (cgdev->state == CCWGROUP_ONLINE)
2279 qeth_l3_set_offline(cgdev); 2281 qeth_l3_set_offline(cgdev);
2280 2282
2283 cancel_work_sync(&card->close_dev_work);
2281 if (qeth_netdev_is_registered(card->dev)) 2284 if (qeth_netdev_is_registered(card->dev))
2282 unregister_netdev(card->dev); 2285 unregister_netdev(card->dev);
2283 qeth_l3_clear_ip_htable(card, 0); 2286 qeth_l3_clear_ip_htable(card, 0);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cf30d124b9e..e390f8c6d5f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
403 goto failed; 403 goto failed;
404 404
405 /* report size limit per scatter-gather segment */ 405 /* report size limit per scatter-gather segment */
406 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
407 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 406 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
408 407
409 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; 408 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 00acc7144bbc..f4f6a07c5222 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
430 /* GCD, adjusted later */ 430 /* GCD, adjusted later */
431 /* report size limit per scatter-gather segment */
432 .max_segment_size = ZFCP_QDIO_SBALE_LEN,
431 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 433 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
432 .shost_attrs = zfcp_sysfs_shost_attrs, 434 .shost_attrs = zfcp_sysfs_shost_attrs,
433 .sdev_attrs = zfcp_sysfs_sdev_attrs, 435 .sdev_attrs = zfcp_sysfs_sdev_attrs,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 128d658d472a..16957d7ac414 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
295 if(tpnt->sdev_attrs == NULL) 295 if(tpnt->sdev_attrs == NULL)
296 tpnt->sdev_attrs = NCR_700_dev_attrs; 296 tpnt->sdev_attrs = NCR_700_dev_attrs;
297 297
298 memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, 298 memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); 299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
300 if(memory == NULL) { 300 if(memory == NULL) {
301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); 301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c13a5b..bc9f2a2365f4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
240 return NULL; 240 return NULL;
241 } 241 }
242 242
243 cmgr->hba = hba;
243 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), 244 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
244 GFP_KERNEL); 245 GFP_KERNEL);
245 if (!cmgr->free_list) { 246 if (!cmgr->free_list) {
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
256 goto mem_err; 257 goto mem_err;
257 } 258 }
258 259
259 cmgr->hba = hba;
260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
261 261
262 for (i = 0; i < arr_sz; i++) { 262 for (i = 0; i < arr_sz; i++) {
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
295 295
296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297 mem_size = num_ios * sizeof(struct io_bdt *); 297 mem_size = num_ios * sizeof(struct io_bdt *);
298 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 298 cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299 if (!cmgr->io_bdt_pool) { 299 if (!cmgr->io_bdt_pool) {
300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
301 goto mem_err; 301 goto mem_err;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590ed955..ff943f477d6f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1726 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1726 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
1728 fc_lport_error(lport, fp); 1728 fc_lport_error(lport, fp);
1729 goto err; 1729 goto out;
1730 } 1730 }
1731 1731
1732 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1732 flp = fc_frame_payload_get(fp, sizeof(*flp));
1733 if (!flp) { 1733 if (!flp) {
1734 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1734 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1735 fc_lport_error(lport, fp); 1735 fc_lport_error(lport, fp);
1736 goto err; 1736 goto out;
1737 } 1737 }
1738 1738
1739 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1739 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1744 "lport->mfs:%hu\n", mfs, lport->mfs); 1744 "lport->mfs:%hu\n", mfs, lport->mfs);
1745 fc_lport_error(lport, fp); 1745 fc_lport_error(lport, fp);
1746 goto err; 1746 goto out;
1747 } 1747 }
1748 1748
1749 if (mfs <= lport->mfs) { 1749 if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 661512bec3ac..e27f4df24021 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
62 62
63/* make sure inq_product_rev string corresponds to this version */ 63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ 64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20180128"; 65static const char *sdebug_version_date = "20190125";
66 66
67#define MY_NAME "scsi_debug" 67#define MY_NAME "scsi_debug"
68 68
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
736} 736}
737 737
738static void *fake_store(unsigned long long lba) 738static void *lba2fake_store(unsigned long long lba)
739{ 739{
740 lba = do_div(lba, sdebug_store_sectors); 740 lba = do_div(lba, sdebug_store_sectors);
741 741
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2514 return ret; 2514 return ret;
2515} 2515}
2516 2516
2517/* If fake_store(lba,num) compares equal to arr(num), then copy top half of 2517/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2518 * arr into fake_store(lba,num) and return true. If comparison fails then 2518 * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2519 * return false. */ 2519 * return false. */
2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) 2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2521{ 2521{
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2643 if (sdt->app_tag == cpu_to_be16(0xffff)) 2643 if (sdt->app_tag == cpu_to_be16(0xffff))
2644 continue; 2644 continue;
2645 2645
2646 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); 2646 ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2647 if (ret) { 2647 if (ret) {
2648 dif_errors++; 2648 dif_errors++;
2649 return ret; 2649 return ret;
@@ -3261,10 +3261,12 @@ err_out:
3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3262 u32 ei_lba, bool unmap, bool ndob) 3262 u32 ei_lba, bool unmap, bool ndob)
3263{ 3263{
3264 int ret;
3264 unsigned long iflags; 3265 unsigned long iflags;
3265 unsigned long long i; 3266 unsigned long long i;
3266 int ret; 3267 u32 lb_size = sdebug_sector_size;
3267 u64 lba_off; 3268 u64 block, lbaa;
3269 u8 *fs1p;
3268 3270
3269 ret = check_device_access_params(scp, lba, num); 3271 ret = check_device_access_params(scp, lba, num);
3270 if (ret) 3272 if (ret)
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3276 unmap_region(lba, num); 3278 unmap_region(lba, num);
3277 goto out; 3279 goto out;
3278 } 3280 }
3279 3281 lbaa = lba;
3280 lba_off = lba * sdebug_sector_size; 3282 block = do_div(lbaa, sdebug_store_sectors);
3281 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 3283 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3284 fs1p = fake_storep + (block * lb_size);
3282 if (ndob) { 3285 if (ndob) {
3283 memset(fake_storep + lba_off, 0, sdebug_sector_size); 3286 memset(fs1p, 0, lb_size);
3284 ret = 0; 3287 ret = 0;
3285 } else 3288 } else
3286 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, 3289 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3287 sdebug_sector_size);
3288 3290
3289 if (-1 == ret) { 3291 if (-1 == ret) {
3290 write_unlock_irqrestore(&atomic_rw, iflags); 3292 write_unlock_irqrestore(&atomic_rw, iflags);
3291 return DID_ERROR << 16; 3293 return DID_ERROR << 16;
3292 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) 3294 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3293 sdev_printk(KERN_INFO, scp->device, 3295 sdev_printk(KERN_INFO, scp->device,
3294 "%s: %s: lb size=%u, IO sent=%d bytes\n", 3296 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3295 my_name, "write same", 3297 my_name, "write same", lb_size, ret);
3296 sdebug_sector_size, ret);
3297 3298
3298 /* Copy first sector to remaining blocks */ 3299 /* Copy first sector to remaining blocks */
3299 for (i = 1 ; i < num ; i++) 3300 for (i = 1 ; i < num ; i++) {
3300 memcpy(fake_storep + ((lba + i) * sdebug_sector_size), 3301 lbaa = lba + i;
3301 fake_storep + lba_off, 3302 block = do_div(lbaa, sdebug_store_sectors);
3302 sdebug_sector_size); 3303 memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3303 3304 }
3304 if (scsi_debug_lbp()) 3305 if (scsi_debug_lbp())
3305 map_region(lba, num); 3306 map_region(lba, num);
3306out: 3307out:
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 2848fa71a33d..d6248eecf123 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
170 return -ENODEV; 170 return -ENODEV;
171 171
172 priv->last_link = 0; 172 priv->last_link = 0;
173 phy_start_aneg(phydev); 173 phy_start(phydev);
174 174
175 return 0; 175 return 0;
176no_phy: 176no_phy:
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index c92bbd05516e..005de0024dd4 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
265 return; 265 return;
266 } 266 }
267 267
268 speakup_tty->ops->send_xchar(speakup_tty, ch); 268 if (speakup_tty->ops->send_xchar)
269 speakup_tty->ops->send_xchar(speakup_tty, ch);
269 mutex_unlock(&speakup_tty_mutex); 270 mutex_unlock(&speakup_tty_mutex);
270} 271}
271 272
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
277 return; 278 return;
278 } 279 }
279 280
280 speakup_tty->ops->tiocmset(speakup_tty, set, clear); 281 if (speakup_tty->ops->tiocmset)
282 speakup_tty->ops->tiocmset(speakup_tty, set, clear);
281 mutex_unlock(&speakup_tty_mutex); 283 mutex_unlock(&speakup_tty_mutex);
282} 284}
283 285
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index e2c407656fa6..c1fdbc0b6840 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
357 if (dmacnt == 2) { 357 if (dmacnt == 2) {
358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), 358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!data->dma)
361 return -ENOMEM;
362
360 data->dma->fn = mtk8250_dma_filter; 363 data->dma->fn = mtk8250_dma_filter;
361 data->dma->rx_size = MTK_UART_RX_SIZE; 364 data->dma->rx_size = MTK_UART_RX_SIZE;
362 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; 365 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index f80a300b5d68..48bd694a5fa1 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3420,6 +3420,11 @@ static int
3420serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) 3420serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
3421{ 3421{
3422 int num_iomem, num_port, first_port = -1, i; 3422 int num_iomem, num_port, first_port = -1, i;
3423 int rc;
3424
3425 rc = serial_pci_is_class_communication(dev);
3426 if (rc)
3427 return rc;
3423 3428
3424 /* 3429 /*
3425 * Should we try to make guesses for multiport serial devices later? 3430 * Should we try to make guesses for multiport serial devices later?
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
3647 3652
3648 board = &pci_boards[ent->driver_data]; 3653 board = &pci_boards[ent->driver_data];
3649 3654
3650 rc = serial_pci_is_class_communication(dev);
3651 if (rc)
3652 return rc;
3653
3654 rc = serial_pci_is_blacklisted(dev); 3655 rc = serial_pci_is_blacklisted(dev);
3655 if (rc) 3656 if (rc)
3656 return rc; 3657 return rc;
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
index e1a551aae336..ce81523c3113 100644
--- a/drivers/tty/serial/earlycon-riscv-sbi.c
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -10,13 +10,16 @@
10#include <linux/serial_core.h> 10#include <linux/serial_core.h>
11#include <asm/sbi.h> 11#include <asm/sbi.h>
12 12
13static void sbi_console_write(struct console *con, 13static void sbi_putc(struct uart_port *port, int c)
14 const char *s, unsigned int n)
15{ 14{
16 int i; 15 sbi_console_putchar(c);
16}
17 17
18 for (i = 0; i < n; ++i) 18static void sbi_console_write(struct console *con,
19 sbi_console_putchar(s[i]); 19 const char *s, unsigned n)
20{
21 struct earlycon_device *dev = con->data;
22 uart_console_write(&dev->port, s, n, sbi_putc);
20} 23}
21 24
22static int __init early_sbi_setup(struct earlycon_device *device, 25static int __init early_sbi_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 5c01bb6d1c24..556f50aa1b58 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
130 struct uart_port *port; 130 struct uart_port *port;
131 unsigned long flags; 131 unsigned long flags;
132 132
133 if (!state)
134 return;
135
133 port = uart_port_lock(state, flags); 136 port = uart_port_lock(state, flags);
134 __uart_start(tty); 137 __uart_start(tty);
135 uart_port_unlock(port, flags); 138 uart_port_unlock(port, flags);
@@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
727 upstat_t mask = UPSTAT_SYNC_FIFO; 730 upstat_t mask = UPSTAT_SYNC_FIFO;
728 struct uart_port *port; 731 struct uart_port *port;
729 732
733 if (!state)
734 return;
735
730 port = uart_port_ref(state); 736 port = uart_port_ref(state);
731 if (!port) 737 if (!port)
732 return; 738 return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 8df0fd824520..64bbeb7d7e0c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1921,7 +1921,7 @@ out_nomem:
1921 1921
1922static void sci_free_irq(struct sci_port *port) 1922static void sci_free_irq(struct sci_port *port)
1923{ 1923{
1924 int i; 1924 int i, j;
1925 1925
1926 /* 1926 /*
1927 * Intentionally in reverse order so we iterate over the muxed 1927 * Intentionally in reverse order so we iterate over the muxed
@@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
1937 if (unlikely(irq < 0)) 1937 if (unlikely(irq < 0))
1938 continue; 1938 continue;
1939 1939
1940 /* Check if already freed (irq was muxed) */
1941 for (j = 0; j < i; j++)
1942 if (port->irqs[j] == irq)
1943 j = i + 1;
1944 if (j > i)
1945 continue;
1946
1940 free_irq(port->irqs[i], port); 1947 free_irq(port->irqs[i], port);
1941 kfree(port->irqstr[i]); 1948 kfree(port->irqstr[i]);
1942 1949
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index cb7fcd7c0ad8..c1e9ea621f41 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
78 for (i = 0; i < exynos->num_clks; i++) { 78 for (i = 0; i < exynos->num_clks; i++) {
79 ret = clk_prepare_enable(exynos->clks[i]); 79 ret = clk_prepare_enable(exynos->clks[i]);
80 if (ret) { 80 if (ret) {
81 while (--i > 0) 81 while (i-- > 0)
82 clk_disable_unprepare(exynos->clks[i]); 82 clk_disable_unprepare(exynos->clks[i]);
83 return ret; 83 return ret;
84 } 84 }
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
223 for (i = 0; i < exynos->num_clks; i++) { 223 for (i = 0; i < exynos->num_clks; i++) {
224 ret = clk_prepare_enable(exynos->clks[i]); 224 ret = clk_prepare_enable(exynos->clks[i]);
225 if (ret) { 225 if (ret) {
226 while (--i > 0) 226 while (i-- > 0)
227 clk_disable_unprepare(exynos->clks[i]); 227 clk_disable_unprepare(exynos->clks[i]);
228 return ret; 228 return ret;
229 } 229 }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index bed2ff42780b..6c9b76bcc2e1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1119,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1120 unsigned int rem = length % maxp; 1120 unsigned int rem = length % maxp;
1121 1121
1122 if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1122 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
1123 struct dwc3 *dwc = dep->dwc; 1123 struct dwc3 *dwc = dep->dwc;
1124 struct dwc3_trb *trb; 1124 struct dwc3_trb *trb;
1125 1125
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 660878a19505..b77f3126580e 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
2083#if defined(PLX_PCI_RDK2) 2083#if defined(PLX_PCI_RDK2)
2084 /* see if PCI int for us by checking irqstat */ 2084 /* see if PCI int for us by checking irqstat */
2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2086 if (!intcsr & (1 << NET2272_PCI_IRQ)) { 2086 if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2087 spin_unlock(&dev->lock); 2087 spin_unlock(&dev->lock);
2088 return IRQ_NONE; 2088 return IRQ_NONE;
2089 } 2089 }
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index eae8b1b1b45b..ffe462a657b1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
452 } 452 }
453 453
454 if (request) { 454 if (request) {
455 u8 is_dma = 0;
456 bool short_packet = false;
457 455
458 trace_musb_req_tx(req); 456 trace_musb_req_tx(req);
459 457
460 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 458 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
461 is_dma = 1;
462 csr |= MUSB_TXCSR_P_WZC_BITS; 459 csr |= MUSB_TXCSR_P_WZC_BITS;
463 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 460 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
464 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 461 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
476 */ 473 */
477 if ((request->zero && request->length) 474 if ((request->zero && request->length)
478 && (request->length % musb_ep->packet_sz == 0) 475 && (request->length % musb_ep->packet_sz == 0)
479 && (request->actual == request->length)) 476 && (request->actual == request->length)) {
480 short_packet = true;
481 477
482 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
483 (is_dma && (!dma->desired_mode ||
484 (request->actual &
485 (musb_ep->packet_sz - 1)))))
486 short_packet = true;
487
488 if (short_packet) {
489 /* 478 /*
490 * On DMA completion, FIFO may not be 479 * On DMA completion, FIFO may not be
491 * available yet... 480 * available yet...
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a688f7f87829..5fc6825745f2 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
346 channel->status = MUSB_DMA_STATUS_FREE; 346 channel->status = MUSB_DMA_STATUS_FREE;
347 347
348 /* completed */ 348 /* completed */
349 if ((devctl & MUSB_DEVCTL_HM) 349 if (musb_channel->transmit &&
350 && (musb_channel->transmit) 350 (!channel->desired_mode ||
351 && ((channel->desired_mode == 0) 351 (channel->actual_len %
352 || (channel->actual_len & 352 musb_channel->max_packet_sz))) {
353 (musb_channel->max_packet_sz - 1)))
354 ) {
355 u8 epnum = musb_channel->epnum; 353 u8 epnum = musb_channel->epnum;
356 int offset = musb->io.ep_offset(epnum, 354 int offset = musb->io.ep_offset(epnum,
357 MUSB_TXCSR); 355 MUSB_TXCSR);
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
363 */ 361 */
364 musb_ep_select(mbase, epnum); 362 musb_ep_select(mbase, epnum);
365 txcsr = musb_readw(mbase, offset); 363 txcsr = musb_readw(mbase, offset);
366 txcsr &= ~(MUSB_TXCSR_DMAENAB 364 if (channel->desired_mode == 1) {
365 txcsr &= ~(MUSB_TXCSR_DMAENAB
367 | MUSB_TXCSR_AUTOSET); 366 | MUSB_TXCSR_AUTOSET);
368 musb_writew(mbase, offset, txcsr); 367 musb_writew(mbase, offset, txcsr);
369 /* Send out the packet */ 368 /* Send out the packet */
370 txcsr &= ~MUSB_TXCSR_DMAMODE; 369 txcsr &= ~MUSB_TXCSR_DMAMODE;
370 txcsr |= MUSB_TXCSR_DMAENAB;
371 }
371 txcsr |= MUSB_TXCSR_TXPKTRDY; 372 txcsr |= MUSB_TXCSR_TXPKTRDY;
372 musb_writew(mbase, offset, txcsr); 373 musb_writew(mbase, offset, txcsr);
373 } 374 }
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index d7312eed6088..91ea3083e7ad 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@ config AB8500_USB
21 21
22config FSL_USB2_OTG 22config FSL_USB2_OTG
23 bool "Freescale USB OTG Transceiver Driver" 23 bool "Freescale USB OTG Transceiver Driver"
24 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 24 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' 25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
26 select USB_PHY 26 select USB_PHY
27 help 27 help
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 27bdb7222527..f5f0568d8533 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
65 if (ret)
66 return ret;
67 am_phy->usb_phy_gen.phy.init = am335x_init; 64 am_phy->usb_phy_gen.phy.init = am335x_init;
68 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; 65 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
69 66
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
82 device_set_wakeup_enable(dev, false); 79 device_set_wakeup_enable(dev, false);
83 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); 80 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
84 81
85 return 0; 82 return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
86} 83}
87 84
88static int am335x_phy_remove(struct platform_device *pdev) 85static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4bc29b586698..f1c39a3c7534 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2297 pdo_pps_apdo_max_voltage(snk)); 2297 pdo_pps_apdo_max_voltage(snk));
2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk); 2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk);
2299 port->pps_data.out_volt = min(port->pps_data.max_volt, 2299 port->pps_data.out_volt = min(port->pps_data.max_volt,
2300 port->pps_data.out_volt); 2300 max(port->pps_data.min_volt,
2301 port->pps_data.out_volt));
2301 port->pps_data.op_curr = min(port->pps_data.max_curr, 2302 port->pps_data.op_curr = min(port->pps_data.max_curr,
2302 port->pps_data.op_curr); 2303 port->pps_data.op_curr);
2303 } 2304 }
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cd7e755484e3..a0b07c331255 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -152,7 +152,12 @@ struct vring_virtqueue {
152 /* Available for packed ring */ 152 /* Available for packed ring */
153 struct { 153 struct {
154 /* Actual memory layout for this queue. */ 154 /* Actual memory layout for this queue. */
155 struct vring_packed vring; 155 struct {
156 unsigned int num;
157 struct vring_packed_desc *desc;
158 struct vring_packed_desc_event *driver;
159 struct vring_packed_desc_event *device;
160 } vring;
156 161
157 /* Driver ring wrap counter. */ 162 /* Driver ring wrap counter. */
158 bool avail_wrap_counter; 163 bool avail_wrap_counter;
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
1609 !context; 1614 !context;
1610 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1615 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1611 1616
1617 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1618 vq->weak_barriers = false;
1619
1612 vq->packed.ring_dma_addr = ring_dma_addr; 1620 vq->packed.ring_dma_addr = ring_dma_addr;
1613 vq->packed.driver_event_dma_addr = driver_event_dma_addr; 1621 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1614 vq->packed.device_event_dma_addr = device_event_dma_addr; 1622 vq->packed.device_event_dma_addr = device_event_dma_addr;
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
2079 !context; 2087 !context;
2080 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 2088 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2081 2089
2090 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2091 vq->weak_barriers = false;
2092
2082 vq->split.queue_dma_addr = 0; 2093 vq->split.queue_dma_addr = 0;
2083 vq->split.queue_size_in_bytes = 0; 2094 vq->split.queue_size_in_bytes = 0;
2084 2095
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev)
2213 break; 2224 break;
2214 case VIRTIO_F_RING_PACKED: 2225 case VIRTIO_F_RING_PACKED:
2215 break; 2226 break;
2227 case VIRTIO_F_ORDER_PLATFORM:
2228 break;
2216 default: 2229 default:
2217 /* We don't understand this bit. */ 2230 /* We don't understand this bit. */
2218 __virtio_clear_bit(vdev, i); 2231 __virtio_clear_bit(vdev, i);
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244b79df..28d9c2b1b3bb 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
596 pkt.len = dentry->d_name.len; 596 pkt.len = dentry->d_name.len;
597 memcpy(pkt.name, dentry->d_name.name, pkt.len); 597 memcpy(pkt.name, dentry->d_name.name, pkt.len);
598 pkt.name[pkt.len] = '\0'; 598 pkt.name[pkt.len] = '\0';
599 dput(dentry);
600 599
601 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) 600 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
602 ret = -EFAULT; 601 ret = -EFAULT;
@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
609 complete_all(&ino->expire_complete); 608 complete_all(&ino->expire_complete);
610 spin_unlock(&sbi->fs_lock); 609 spin_unlock(&sbi->fs_lock);
611 610
611 dput(dentry);
612
612 return ret; 613 return ret;
613} 614}
614 615
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 0e8ea2d9a2bb..078992eee299 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
266 } 266 }
267 root_inode = autofs_get_inode(s, S_IFDIR | 0755); 267 root_inode = autofs_get_inode(s, S_IFDIR | 0755);
268 root = d_make_root(root_inode); 268 root = d_make_root(root_inode);
269 if (!root) 269 if (!root) {
270 ret = -ENOMEM;
270 goto fail_ino; 271 goto fail_ino;
272 }
271 pipe = NULL; 273 pipe = NULL;
272 274
273 root->d_fsdata = ino; 275 root->d_fsdata = ino;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f64aad613727..5a6c39b44c84 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
968 return 0; 968 return 0;
969} 969}
970 970
971static struct extent_buffer *alloc_tree_block_no_bg_flush(
972 struct btrfs_trans_handle *trans,
973 struct btrfs_root *root,
974 u64 parent_start,
975 const struct btrfs_disk_key *disk_key,
976 int level,
977 u64 hint,
978 u64 empty_size)
979{
980 struct btrfs_fs_info *fs_info = root->fs_info;
981 struct extent_buffer *ret;
982
983 /*
984 * If we are COWing a node/leaf from the extent, chunk, device or free
985 * space trees, make sure that we do not finish block group creation of
986 * pending block groups. We do this to avoid a deadlock.
987 * COWing can result in allocation of a new chunk, and flushing pending
988 * block groups (btrfs_create_pending_block_groups()) can be triggered
989 * when finishing allocation of a new chunk. Creation of a pending block
990 * group modifies the extent, chunk, device and free space trees,
991 * therefore we could deadlock with ourselves since we are holding a
992 * lock on an extent buffer that btrfs_create_pending_block_groups() may
993 * try to COW later.
994 * For similar reasons, we also need to delay flushing pending block
995 * groups when splitting a leaf or node, from one of those trees, since
996 * we are holding a write lock on it and its parent or when inserting a
997 * new root node for one of those trees.
998 */
999 if (root == fs_info->extent_root ||
1000 root == fs_info->chunk_root ||
1001 root == fs_info->dev_root ||
1002 root == fs_info->free_space_root)
1003 trans->can_flush_pending_bgs = false;
1004
1005 ret = btrfs_alloc_tree_block(trans, root, parent_start,
1006 root->root_key.objectid, disk_key, level,
1007 hint, empty_size);
1008 trans->can_flush_pending_bgs = true;
1009
1010 return ret;
1011}
1012
971/* 1013/*
972 * does the dirty work in cow of a single block. The parent block (if 1014 * does the dirty work in cow of a single block. The parent block (if
973 * supplied) is updated to point to the new cow copy. The new buffer is marked 1015 * supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -1015,28 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1015 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 1057 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1016 parent_start = parent->start; 1058 parent_start = parent->start;
1017 1059
1018 /* 1060 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1019 * If we are COWing a node/leaf from the extent, chunk, device or free 1061 level, search_start, empty_size);
1020 * space trees, make sure that we do not finish block group creation of
1021 * pending block groups. We do this to avoid a deadlock.
1022 * COWing can result in allocation of a new chunk, and flushing pending
1023 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 * when finishing allocation of a new chunk. Creation of a pending block
1025 * group modifies the extent, chunk, device and free space trees,
1026 * therefore we could deadlock with ourselves since we are holding a
1027 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1028 * try to COW later.
1029 */
1030 if (root == fs_info->extent_root ||
1031 root == fs_info->chunk_root ||
1032 root == fs_info->dev_root ||
1033 root == fs_info->free_space_root)
1034 trans->can_flush_pending_bgs = false;
1035
1036 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1037 root->root_key.objectid, &disk_key, level,
1038 search_start, empty_size);
1039 trans->can_flush_pending_bgs = true;
1040 if (IS_ERR(cow)) 1062 if (IS_ERR(cow))
1041 return PTR_ERR(cow); 1063 return PTR_ERR(cow);
1042 1064
@@ -3345,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3345 else 3367 else
3346 btrfs_node_key(lower, &lower_key, 0); 3368 btrfs_node_key(lower, &lower_key, 0);
3347 3369
3348 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3370 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3349 &lower_key, level, root->node->start, 0); 3371 root->node->start, 0);
3350 if (IS_ERR(c)) 3372 if (IS_ERR(c))
3351 return PTR_ERR(c); 3373 return PTR_ERR(c);
3352 3374
@@ -3475,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3475 mid = (c_nritems + 1) / 2; 3497 mid = (c_nritems + 1) / 2;
3476 btrfs_node_key(c, &disk_key, mid); 3498 btrfs_node_key(c, &disk_key, mid);
3477 3499
3478 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3500 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3479 &disk_key, level, c->start, 0); 3501 c->start, 0);
3480 if (IS_ERR(split)) 3502 if (IS_ERR(split))
3481 return PTR_ERR(split); 3503 return PTR_ERR(split);
3482 3504
@@ -4260,8 +4282,8 @@ again:
4260 else 4282 else
4261 btrfs_item_key(l, &disk_key, mid); 4283 btrfs_item_key(l, &disk_key, mid);
4262 4284
4263 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 4285 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4264 &disk_key, 0, l->start, 0); 4286 l->start, 0);
4265 if (IS_ERR(right)) 4287 if (IS_ERR(right))
4266 return PTR_ERR(right); 4288 return PTR_ERR(right);
4267 4289
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c5586ffd1426..0a3f122dd61f 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1621,6 +1621,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1621 flags | SB_RDONLY, device_name, data); 1621 flags | SB_RDONLY, device_name, data);
1622 if (IS_ERR(mnt_root)) { 1622 if (IS_ERR(mnt_root)) {
1623 root = ERR_CAST(mnt_root); 1623 root = ERR_CAST(mnt_root);
1624 kfree(subvol_name);
1624 goto out; 1625 goto out;
1625 } 1626 }
1626 1627
@@ -1630,12 +1631,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1630 if (error < 0) { 1631 if (error < 0) {
1631 root = ERR_PTR(error); 1632 root = ERR_PTR(error);
1632 mntput(mnt_root); 1633 mntput(mnt_root);
1634 kfree(subvol_name);
1633 goto out; 1635 goto out;
1634 } 1636 }
1635 } 1637 }
1636 } 1638 }
1637 if (IS_ERR(mnt_root)) { 1639 if (IS_ERR(mnt_root)) {
1638 root = ERR_CAST(mnt_root); 1640 root = ERR_CAST(mnt_root);
1641 kfree(subvol_name);
1639 goto out; 1642 goto out;
1640 } 1643 }
1641 1644
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 127fa1535f58..4ec2b660d014 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -850,14 +850,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
850 850
851 btrfs_trans_release_chunk_metadata(trans); 851 btrfs_trans_release_chunk_metadata(trans);
852 852
853 if (lock && should_end_transaction(trans) &&
854 READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
855 spin_lock(&info->trans_lock);
856 if (cur_trans->state == TRANS_STATE_RUNNING)
857 cur_trans->state = TRANS_STATE_BLOCKED;
858 spin_unlock(&info->trans_lock);
859 }
860
861 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 853 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
862 if (throttle) 854 if (throttle)
863 return btrfs_commit_transaction(trans); 855 return btrfs_commit_transaction(trans);
@@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1879 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1871 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1880} 1872}
1881 1873
1874/*
1875 * Release reserved delayed ref space of all pending block groups of the
1876 * transaction and remove them from the list
1877 */
1878static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1879{
1880 struct btrfs_fs_info *fs_info = trans->fs_info;
1881 struct btrfs_block_group_cache *block_group, *tmp;
1882
1883 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1884 btrfs_delayed_refs_rsv_release(fs_info, 1);
1885 list_del_init(&block_group->bg_list);
1886 }
1887}
1888
1882static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1889static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1883{ 1890{
1884 /* 1891 /*
@@ -2270,6 +2277,7 @@ scrub_continue:
2270 btrfs_scrub_continue(fs_info); 2277 btrfs_scrub_continue(fs_info);
2271cleanup_transaction: 2278cleanup_transaction:
2272 btrfs_trans_release_metadata(trans); 2279 btrfs_trans_release_metadata(trans);
2280 btrfs_cleanup_pending_block_groups(trans);
2273 btrfs_trans_release_chunk_metadata(trans); 2281 btrfs_trans_release_chunk_metadata(trans);
2274 trans->block_rsv = NULL; 2282 trans->block_rsv = NULL;
2275 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2283 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3e4f8f88353e..15561926ab32 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -957,11 +957,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
957 else 957 else
958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
959 959
960 fs_devices->fsid_change = fsid_change_in_progress;
961
962 if (IS_ERR(fs_devices)) 960 if (IS_ERR(fs_devices))
963 return ERR_CAST(fs_devices); 961 return ERR_CAST(fs_devices);
964 962
963 fs_devices->fsid_change = fsid_change_in_progress;
964
965 mutex_lock(&fs_devices->device_list_mutex); 965 mutex_lock(&fs_devices->device_list_mutex);
966 list_add(&fs_devices->fs_list, &fs_uuids); 966 list_add(&fs_devices->fs_list, &fs_uuids);
967 967
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d1f9c2f3f575..7652551a1fc4 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
150extern const struct export_operations cifs_export_ops; 150extern const struct export_operations cifs_export_ops;
151#endif /* CONFIG_CIFS_NFSD_EXPORT */ 151#endif /* CONFIG_CIFS_NFSD_EXPORT */
152 152
153#define CIFS_VERSION "2.16" 153#define CIFS_VERSION "2.17"
154#endif /* _CIFSFS_H */ 154#endif /* _CIFSFS_H */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 2c7689f3998d..659ce1b92c44 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2696,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2696 2696
2697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages); 2697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2698 if (rc) { 2698 if (rc) {
2699 kvfree(wdata->pages);
2699 kfree(wdata); 2700 kfree(wdata);
2700 add_credits_and_wake_if(server, credits, 0); 2701 add_credits_and_wake_if(server, credits, 0);
2701 break; 2702 break;
@@ -2707,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2707 if (rc) { 2708 if (rc) {
2708 for (i = 0; i < nr_pages; i++) 2709 for (i = 0; i < nr_pages; i++)
2709 put_page(wdata->pages[i]); 2710 put_page(wdata->pages[i]);
2711 kvfree(wdata->pages);
2710 kfree(wdata); 2712 kfree(wdata);
2711 add_credits_and_wake_if(server, credits, 0); 2713 add_credits_and_wake_if(server, credits, 0);
2712 break; 2714 break;
@@ -3386,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3386 } 3388 }
3387 3389
3388 rc = cifs_read_allocate_pages(rdata, npages); 3390 rc = cifs_read_allocate_pages(rdata, npages);
3389 if (rc) 3391 if (rc) {
3390 goto error; 3392 kvfree(rdata->pages);
3393 kfree(rdata);
3394 add_credits_and_wake_if(server, credits, 0);
3395 break;
3396 }
3391 3397
3392 rdata->tailsz = PAGE_SIZE; 3398 rdata->tailsz = PAGE_SIZE;
3393 } 3399 }
@@ -3407,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3407 if (!rdata->cfile->invalidHandle || 3413 if (!rdata->cfile->invalidHandle ||
3408 !(rc = cifs_reopen_file(rdata->cfile, true))) 3414 !(rc = cifs_reopen_file(rdata->cfile, true)))
3409 rc = server->ops->async_readv(rdata); 3415 rc = server->ops->async_readv(rdata);
3410error:
3411 if (rc) { 3416 if (rc) {
3412 add_credits_and_wake_if(server, rdata->credits, 0); 3417 add_credits_and_wake_if(server, rdata->credits, 0);
3413 kref_put(&rdata->refcount, 3418 kref_put(&rdata->refcount,
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 153238fc4fa9..6f96e2292856 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -866,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
866 FILE_READ_EA, 866 FILE_READ_EA,
867 FILE_FULL_EA_INFORMATION, 867 FILE_FULL_EA_INFORMATION,
868 SMB2_O_INFO_FILE, 868 SMB2_O_INFO_FILE,
869 SMB2_MAX_EA_BUF, 869 CIFSMaxBufSize -
870 MAX_SMB2_CREATE_RESPONSE_SIZE -
871 MAX_SMB2_CLOSE_RESPONSE_SIZE,
870 &rsp_iov, &buftype, cifs_sb); 872 &rsp_iov, &buftype, cifs_sb);
871 if (rc) { 873 if (rc) {
872 /* 874 /*
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2ff209ec4fab..77b3aaa39b35 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3241,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
3241 rdata->mr = NULL; 3241 rdata->mr = NULL;
3242 } 3242 }
3243#endif 3243#endif
3244 if (rdata->result) 3244 if (rdata->result && rdata->result != -ENODATA) {
3245 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 3245 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
3246 trace_smb3_read_err(0 /* xid */,
3247 rdata->cfile->fid.persistent_fid,
3248 tcon->tid, tcon->ses->Suid, rdata->offset,
3249 rdata->bytes, rdata->result);
3250 } else
3251 trace_smb3_read_done(0 /* xid */,
3252 rdata->cfile->fid.persistent_fid,
3253 tcon->tid, tcon->ses->Suid,
3254 rdata->offset, rdata->got_bytes);
3246 3255
3247 queue_work(cifsiod_wq, &rdata->work); 3256 queue_work(cifsiod_wq, &rdata->work);
3248 DeleteMidQEntry(mid); 3257 DeleteMidQEntry(mid);
@@ -3317,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
3317 if (rc) { 3326 if (rc) {
3318 kref_put(&rdata->refcount, cifs_readdata_release); 3327 kref_put(&rdata->refcount, cifs_readdata_release);
3319 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 3328 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
3320 trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid, 3329 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
3321 io_parms.tcon->tid, io_parms.tcon->ses->Suid, 3330 io_parms.tcon->tid,
3322 io_parms.offset, io_parms.length); 3331 io_parms.tcon->ses->Suid,
3323 } else 3332 io_parms.offset, io_parms.length, rc);
3324 trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid, 3333 }
3325 io_parms.tcon->tid, io_parms.tcon->ses->Suid,
3326 io_parms.offset, io_parms.length);
3327 3334
3328 cifs_small_buf_release(buf); 3335 cifs_small_buf_release(buf);
3329 return rc; 3336 return rc;
@@ -3367,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
3367 if (rc != -ENODATA) { 3374 if (rc != -ENODATA) {
3368 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 3375 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
3369 cifs_dbg(VFS, "Send error in read = %d\n", rc); 3376 cifs_dbg(VFS, "Send error in read = %d\n", rc);
3377 trace_smb3_read_err(xid, req->PersistentFileId,
3378 io_parms->tcon->tid, ses->Suid,
3379 io_parms->offset, io_parms->length,
3380 rc);
3370 } 3381 }
3371 trace_smb3_read_err(rc, xid, req->PersistentFileId,
3372 io_parms->tcon->tid, ses->Suid,
3373 io_parms->offset, io_parms->length);
3374 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 3382 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3375 return rc == -ENODATA ? 0 : rc; 3383 return rc == -ENODATA ? 0 : rc;
3376 } else 3384 } else
@@ -3459,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
3459 wdata->mr = NULL; 3467 wdata->mr = NULL;
3460 } 3468 }
3461#endif 3469#endif
3462 if (wdata->result) 3470 if (wdata->result) {
3463 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3471 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
3472 trace_smb3_write_err(0 /* no xid */,
3473 wdata->cfile->fid.persistent_fid,
3474 tcon->tid, tcon->ses->Suid, wdata->offset,
3475 wdata->bytes, wdata->result);
3476 } else
3477 trace_smb3_write_done(0 /* no xid */,
3478 wdata->cfile->fid.persistent_fid,
3479 tcon->tid, tcon->ses->Suid,
3480 wdata->offset, wdata->bytes);
3464 3481
3465 queue_work(cifsiod_wq, &wdata->work); 3482 queue_work(cifsiod_wq, &wdata->work);
3466 DeleteMidQEntry(mid); 3483 DeleteMidQEntry(mid);
@@ -3602,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
3602 wdata->bytes, rc); 3619 wdata->bytes, rc);
3603 kref_put(&wdata->refcount, release); 3620 kref_put(&wdata->refcount, release);
3604 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3621 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
3605 } else 3622 }
3606 trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
3607 tcon->tid, tcon->ses->Suid, wdata->offset,
3608 wdata->bytes);
3609 3623
3610async_writev_out: 3624async_writev_out:
3611 cifs_small_buf_release(req); 3625 cifs_small_buf_release(req);
@@ -3831,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
3831 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { 3845 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
3832 srch_inf->endOfSearch = true; 3846 srch_inf->endOfSearch = true;
3833 rc = 0; 3847 rc = 0;
3834 } 3848 } else
3835 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 3849 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
3836 goto qdir_exit; 3850 goto qdir_exit;
3837 } 3851 }
3838 3852
@@ -4427,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
4427 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); 4441 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
4428 cifs_small_buf_release(req); 4442 cifs_small_buf_release(req);
4429 4443
4430 please_key_low = (__u64 *)req->LeaseKey; 4444 please_key_low = (__u64 *)lease_key;
4431 please_key_high = (__u64 *)(req->LeaseKey+8); 4445 please_key_high = (__u64 *)(lease_key+8);
4432 if (rc) { 4446 if (rc) {
4433 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 4447 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
4434 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, 4448 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 7a2d0a2255e6..538e2299805f 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,9 @@
84 84
85#define NUMBER_OF_SMB2_COMMANDS 0x0013 85#define NUMBER_OF_SMB2_COMMANDS 0x0013
86 86
87/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ 87/* 52 transform hdr + 64 hdr + 88 create rsp */
88#define MAX_SMB2_HDR_SIZE 0x00b0 88#define SMB2_TRANSFORM_HEADER_SIZE 52
89#define MAX_SMB2_HDR_SIZE 204
89 90
90#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) 91#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
91#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) 92#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
@@ -648,6 +649,13 @@ struct smb2_create_req {
648 __u8 Buffer[0]; 649 __u8 Buffer[0];
649} __packed; 650} __packed;
650 651
652/*
653 * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
654 * 88 (fixed part of create response) + 520 (path) + 150 (contexts) +
655 * 2 bytes of padding.
656 */
657#define MAX_SMB2_CREATE_RESPONSE_SIZE 824
658
651struct smb2_create_rsp { 659struct smb2_create_rsp {
652 struct smb2_sync_hdr sync_hdr; 660 struct smb2_sync_hdr sync_hdr;
653 __le16 StructureSize; /* Must be 89 */ 661 __le16 StructureSize; /* Must be 89 */
@@ -996,6 +1004,11 @@ struct smb2_close_req {
996 __u64 VolatileFileId; /* opaque endianness */ 1004 __u64 VolatileFileId; /* opaque endianness */
997} __packed; 1005} __packed;
998 1006
1007/*
1008 * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
1009 */
1010#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
1011
999struct smb2_close_rsp { 1012struct smb2_close_rsp {
1000 struct smb2_sync_hdr sync_hdr; 1013 struct smb2_sync_hdr sync_hdr;
1001 __le16 StructureSize; /* 60 */ 1014 __le16 StructureSize; /* 60 */
@@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
1398 char FileName[0]; /* Name to be assigned to new link */ 1411 char FileName[0]; /* Name to be assigned to new link */
1399} __packed; /* level 11 Set */ 1412} __packed; /* level 11 Set */
1400 1413
1401#define SMB2_MAX_EA_BUF 65536
1402
1403struct smb2_file_full_ea_info { /* encoding of response for level 15 */ 1414struct smb2_file_full_ea_info { /* encoding of response for level 15 */
1404 __le32 next_entry_offset; 1415 __le32 next_entry_offset;
1405 __u8 flags; 1416 __u8 flags;
diff --git a/fs/dcache.c b/fs/dcache.c
index 2593153471cf..aac41adf4743 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = {
119 119
120static DEFINE_PER_CPU(long, nr_dentry); 120static DEFINE_PER_CPU(long, nr_dentry);
121static DEFINE_PER_CPU(long, nr_dentry_unused); 121static DEFINE_PER_CPU(long, nr_dentry_unused);
122static DEFINE_PER_CPU(long, nr_dentry_negative);
122 123
123#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 124#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124 125
@@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void)
152 return sum < 0 ? 0 : sum; 153 return sum < 0 ? 0 : sum;
153} 154}
154 155
156static long get_nr_dentry_negative(void)
157{
158 int i;
159 long sum = 0;
160
161 for_each_possible_cpu(i)
162 sum += per_cpu(nr_dentry_negative, i);
163 return sum < 0 ? 0 : sum;
164}
165
155int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 166int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos) 167 size_t *lenp, loff_t *ppos)
157{ 168{
158 dentry_stat.nr_dentry = get_nr_dentry(); 169 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused(); 170 dentry_stat.nr_unused = get_nr_dentry_unused();
171 dentry_stat.nr_negative = get_nr_dentry_negative();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 172 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161} 173}
162#endif 174#endif
@@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
317 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 329 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
318 WRITE_ONCE(dentry->d_flags, flags); 330 WRITE_ONCE(dentry->d_flags, flags);
319 dentry->d_inode = NULL; 331 dentry->d_inode = NULL;
332 if (dentry->d_flags & DCACHE_LRU_LIST)
333 this_cpu_inc(nr_dentry_negative);
320} 334}
321 335
322static void dentry_free(struct dentry *dentry) 336static void dentry_free(struct dentry *dentry)
@@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
371 * The per-cpu "nr_dentry_unused" counters are updated with 385 * The per-cpu "nr_dentry_unused" counters are updated with
372 * the DCACHE_LRU_LIST bit. 386 * the DCACHE_LRU_LIST bit.
373 * 387 *
388 * The per-cpu "nr_dentry_negative" counters are only updated
389 * when deleted from or added to the per-superblock LRU list, not
390 * from/to the shrink list. That is to avoid an unneeded dec/inc
391 * pair when moving from LRU to shrink list in select_collect().
392 *
374 * These helper functions make sure we always follow the 393 * These helper functions make sure we always follow the
375 * rules. d_lock must be held by the caller. 394 * rules. d_lock must be held by the caller.
376 */ 395 */
@@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry)
380 D_FLAG_VERIFY(dentry, 0); 399 D_FLAG_VERIFY(dentry, 0);
381 dentry->d_flags |= DCACHE_LRU_LIST; 400 dentry->d_flags |= DCACHE_LRU_LIST;
382 this_cpu_inc(nr_dentry_unused); 401 this_cpu_inc(nr_dentry_unused);
402 if (d_is_negative(dentry))
403 this_cpu_inc(nr_dentry_negative);
383 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 404 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
384} 405}
385 406
@@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry)
388 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
389 dentry->d_flags &= ~DCACHE_LRU_LIST; 410 dentry->d_flags &= ~DCACHE_LRU_LIST;
390 this_cpu_dec(nr_dentry_unused); 411 this_cpu_dec(nr_dentry_unused);
412 if (d_is_negative(dentry))
413 this_cpu_dec(nr_dentry_negative);
391 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 414 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
392} 415}
393 416
@@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
418 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
419 dentry->d_flags &= ~DCACHE_LRU_LIST; 442 dentry->d_flags &= ~DCACHE_LRU_LIST;
420 this_cpu_dec(nr_dentry_unused); 443 this_cpu_dec(nr_dentry_unused);
444 if (d_is_negative(dentry))
445 this_cpu_dec(nr_dentry_negative);
421 list_lru_isolate(lru, &dentry->d_lru); 446 list_lru_isolate(lru, &dentry->d_lru);
422} 447}
423 448
@@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
426{ 451{
427 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 452 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
428 dentry->d_flags |= DCACHE_SHRINK_LIST; 453 dentry->d_flags |= DCACHE_SHRINK_LIST;
454 if (d_is_negative(dentry))
455 this_cpu_dec(nr_dentry_negative);
429 list_lru_isolate_move(lru, &dentry->d_lru, list); 456 list_lru_isolate_move(lru, &dentry->d_lru, list);
430} 457}
431 458
@@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1188 */ 1215 */
1189void shrink_dcache_sb(struct super_block *sb) 1216void shrink_dcache_sb(struct super_block *sb)
1190{ 1217{
1191 long freed;
1192
1193 do { 1218 do {
1194 LIST_HEAD(dispose); 1219 LIST_HEAD(dispose);
1195 1220
1196 freed = list_lru_walk(&sb->s_dentry_lru, 1221 list_lru_walk(&sb->s_dentry_lru,
1197 dentry_lru_isolate_shrink, &dispose, 1024); 1222 dentry_lru_isolate_shrink, &dispose, 1024);
1198
1199 this_cpu_sub(nr_dentry_unused, freed);
1200 shrink_dentry_list(&dispose); 1223 shrink_dentry_list(&dispose);
1201 } while (list_lru_count(&sb->s_dentry_lru) > 0); 1224 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1202} 1225}
@@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1820 WARN_ON(d_in_lookup(dentry)); 1843 WARN_ON(d_in_lookup(dentry));
1821 1844
1822 spin_lock(&dentry->d_lock); 1845 spin_lock(&dentry->d_lock);
1846 /*
1847 * Decrement negative dentry count if it was in the LRU list.
1848 */
1849 if (dentry->d_flags & DCACHE_LRU_LIST)
1850 this_cpu_dec(nr_dentry_negative);
1823 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1851 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1824 raw_write_seqcount_begin(&dentry->d_seq); 1852 raw_write_seqcount_begin(&dentry->d_seq);
1825 __d_set_inode_and_type(dentry, inode, add_flags); 1853 __d_set_inode_and_type(dentry, inode, add_flags);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 13b01351dd1c..29c68c5d44d5 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
324 inode_unlock(d_inode(dentry->d_parent)); 324 inode_unlock(d_inode(dentry->d_parent));
325 dput(dentry); 325 dput(dentry);
326 simple_release_fs(&debugfs_mount, &debugfs_mount_count); 326 simple_release_fs(&debugfs_mount, &debugfs_mount_count);
327 return NULL; 327 return ERR_PTR(-ENOMEM);
328} 328}
329 329
330static struct dentry *end_creating(struct dentry *dentry) 330static struct dentry *end_creating(struct dentry *dentry)
@@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
347 dentry = start_creating(name, parent); 347 dentry = start_creating(name, parent);
348 348
349 if (IS_ERR(dentry)) 349 if (IS_ERR(dentry))
350 return NULL; 350 return dentry;
351 351
352 inode = debugfs_get_inode(dentry->d_sb); 352 inode = debugfs_get_inode(dentry->d_sb);
353 if (unlikely(!inode)) 353 if (unlikely(!inode))
@@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
386 * This function will return a pointer to a dentry if it succeeds. This 386 * This function will return a pointer to a dentry if it succeeds. This
387 * pointer must be passed to the debugfs_remove() function when the file is 387 * pointer must be passed to the debugfs_remove() function when the file is
388 * to be removed (no automatic cleanup happens if your module is unloaded, 388 * to be removed (no automatic cleanup happens if your module is unloaded,
389 * you are responsible here.) If an error occurs, %NULL will be returned. 389 * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
390 * returned.
390 * 391 *
391 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 392 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
392 * returned. 393 * returned.
@@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
464 * This function will return a pointer to a dentry if it succeeds. This 465 * This function will return a pointer to a dentry if it succeeds. This
465 * pointer must be passed to the debugfs_remove() function when the file is 466 * pointer must be passed to the debugfs_remove() function when the file is
466 * to be removed (no automatic cleanup happens if your module is unloaded, 467 * to be removed (no automatic cleanup happens if your module is unloaded,
467 * you are responsible here.) If an error occurs, %NULL will be returned. 468 * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
469 * returned.
468 * 470 *
469 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 471 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
470 * returned. 472 * returned.
@@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
495 * This function will return a pointer to a dentry if it succeeds. This 497 * This function will return a pointer to a dentry if it succeeds. This
496 * pointer must be passed to the debugfs_remove() function when the file is 498 * pointer must be passed to the debugfs_remove() function when the file is
497 * to be removed (no automatic cleanup happens if your module is unloaded, 499 * to be removed (no automatic cleanup happens if your module is unloaded,
498 * you are responsible here.) If an error occurs, %NULL will be returned. 500 * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
501 * returned.
499 * 502 *
500 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 503 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
501 * returned. 504 * returned.
@@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
506 struct inode *inode; 509 struct inode *inode;
507 510
508 if (IS_ERR(dentry)) 511 if (IS_ERR(dentry))
509 return NULL; 512 return dentry;
510 513
511 inode = debugfs_get_inode(dentry->d_sb); 514 inode = debugfs_get_inode(dentry->d_sb);
512 if (unlikely(!inode)) 515 if (unlikely(!inode))
@@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name,
545 struct inode *inode; 548 struct inode *inode;
546 549
547 if (IS_ERR(dentry)) 550 if (IS_ERR(dentry))
548 return NULL; 551 return dentry;
549 552
550 inode = debugfs_get_inode(dentry->d_sb); 553 inode = debugfs_get_inode(dentry->d_sb);
551 if (unlikely(!inode)) 554 if (unlikely(!inode))
@@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount);
581 * This function will return a pointer to a dentry if it succeeds. This 584 * This function will return a pointer to a dentry if it succeeds. This
582 * pointer must be passed to the debugfs_remove() function when the symbolic 585 * pointer must be passed to the debugfs_remove() function when the symbolic
583 * link is to be removed (no automatic cleanup happens if your module is 586 * link is to be removed (no automatic cleanup happens if your module is
584 * unloaded, you are responsible here.) If an error occurs, %NULL will be 587 * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR)
585 * returned. 588 * will be returned.
586 * 589 *
587 * If debugfs is not enabled in the kernel, the value -%ENODEV will be 590 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
588 * returned. 591 * returned.
@@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
594 struct inode *inode; 597 struct inode *inode;
595 char *link = kstrdup(target, GFP_KERNEL); 598 char *link = kstrdup(target, GFP_KERNEL);
596 if (!link) 599 if (!link)
597 return NULL; 600 return ERR_PTR(-ENOMEM);
598 601
599 dentry = start_creating(name, parent); 602 dentry = start_creating(name, parent);
600 if (IS_ERR(dentry)) { 603 if (IS_ERR(dentry)) {
601 kfree(link); 604 kfree(link);
602 return NULL; 605 return dentry;
603 } 606 }
604 607
605 inode = debugfs_get_inode(dentry->d_sb); 608 inode = debugfs_get_inode(dentry->d_sb);
@@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
787 struct dentry *dentry = NULL, *trap; 790 struct dentry *dentry = NULL, *trap;
788 struct name_snapshot old_name; 791 struct name_snapshot old_name;
789 792
793 if (IS_ERR(old_dir))
794 return old_dir;
795 if (IS_ERR(new_dir))
796 return new_dir;
797 if (IS_ERR_OR_NULL(old_dentry))
798 return old_dentry;
799
790 trap = lock_rename(new_dir, old_dir); 800 trap = lock_rename(new_dir, old_dir);
791 /* Source or destination directories don't exist? */ 801 /* Source or destination directories don't exist? */
792 if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) 802 if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
@@ -820,7 +830,9 @@ exit:
820 if (dentry && !IS_ERR(dentry)) 830 if (dentry && !IS_ERR(dentry))
821 dput(dentry); 831 dput(dentry);
822 unlock_rename(new_dir, old_dir); 832 unlock_rename(new_dir, old_dir);
823 return NULL; 833 if (IS_ERR(dentry))
834 return dentry;
835 return ERR_PTR(-EINVAL);
824} 836}
825EXPORT_SYMBOL_GPL(debugfs_rename); 837EXPORT_SYMBOL_GPL(debugfs_rename);
826 838
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 82377017130f..d31b6c72b476 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
21 spin_lock(&sb->s_inode_list_lock); 21 spin_lock(&sb->s_inode_list_lock);
22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
23 spin_lock(&inode->i_lock); 23 spin_lock(&inode->i_lock);
24 /*
25 * We must skip inodes in unusual state. We may also skip
26 * inodes without pages but we deliberately won't in case
27 * we need to reschedule to avoid softlockups.
28 */
24 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 29 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
25 (inode->i_mapping->nrpages == 0)) { 30 (inode->i_mapping->nrpages == 0 && !need_resched())) {
26 spin_unlock(&inode->i_lock); 31 spin_unlock(&inode->i_lock);
27 continue; 32 continue;
28 } 33 }
@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
30 spin_unlock(&inode->i_lock); 35 spin_unlock(&inode->i_lock);
31 spin_unlock(&sb->s_inode_list_lock); 36 spin_unlock(&sb->s_inode_list_lock);
32 37
38 cond_resched();
33 invalidate_mapping_pages(inode->i_mapping, 0, -1); 39 invalidate_mapping_pages(inode->i_mapping, 0, -1);
34 iput(toput_inode); 40 iput(toput_inode);
35 toput_inode = inode; 41 toput_inode = inode;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a5e516a40e7a..809c0f2f9942 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1742 req->in.h.nodeid = outarg->nodeid; 1742 req->in.h.nodeid = outarg->nodeid;
1743 req->in.numargs = 2; 1743 req->in.numargs = 2;
1744 req->in.argpages = 1; 1744 req->in.argpages = 1;
1745 req->page_descs[0].offset = offset;
1746 req->end = fuse_retrieve_end; 1745 req->end = fuse_retrieve_end;
1747 1746
1748 index = outarg->offset >> PAGE_SHIFT; 1747 index = outarg->offset >> PAGE_SHIFT;
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1757 1756
1758 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1757 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1759 req->pages[req->num_pages] = page; 1758 req->pages[req->num_pages] = page;
1759 req->page_descs[req->num_pages].offset = offset;
1760 req->page_descs[req->num_pages].length = this_num; 1760 req->page_descs[req->num_pages].length = this_num;
1761 req->num_pages++; 1761 req->num_pages++;
1762 1762
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2077 2077
2078 ret = fuse_dev_do_write(fud, &cs, len); 2078 ret = fuse_dev_do_write(fud, &cs, len);
2079 2079
2080 pipe_lock(pipe);
2080 for (idx = 0; idx < nbuf; idx++) 2081 for (idx = 0; idx < nbuf; idx++)
2081 pipe_buf_release(pipe, &bufs[idx]); 2082 pipe_buf_release(pipe, &bufs[idx]);
2083 pipe_unlock(pipe);
2082 2084
2083out: 2085out:
2084 kvfree(bufs); 2086 kvfree(bufs);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ffaffe18352a..a59c16bd90ac 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1782 spin_unlock(&fc->lock); 1782 spin_unlock(&fc->lock);
1783 1783
1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1785 dec_node_page_state(page, NR_WRITEBACK_TEMP); 1785 dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
1786 wb_writeout_inc(&bdi->wb); 1786 wb_writeout_inc(&bdi->wb);
1787 fuse_writepage_free(fc, new_req); 1787 fuse_writepage_free(fc, new_req);
1788 fuse_request_free(new_req); 1788 fuse_request_free(new_req);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 76baaa6be393..c2d4099429be 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); 629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
630 fc->user_ns = get_user_ns(user_ns); 630 fc->user_ns = get_user_ns(user_ns);
631 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
631} 632}
632EXPORT_SYMBOL_GPL(fuse_conn_init); 633EXPORT_SYMBOL_GPL(fuse_conn_init);
633 634
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1162 fc->user_id = d.user_id; 1163 fc->user_id = d.user_id;
1163 fc->group_id = d.group_id; 1164 fc->group_id = d.group_id;
1164 fc->max_read = max_t(unsigned, 4096, d.max_read); 1165 fc->max_read = max_t(unsigned, 4096, d.max_read);
1165 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
1166 1166
1167 /* Used by get_root_inode() */ 1167 /* Used by get_root_inode() */
1168 sb->s_fs_info = fc; 1168 sb->s_fs_info = fc;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 831d7cb5a49c..17a8d3b43990 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1780 goto next_iter; 1780 goto next_iter;
1781 } 1781 }
1782 if (ret == -E2BIG) { 1782 if (ret == -E2BIG) {
1783 n += rbm->bii - initial_bii;
1784 rbm->bii = 0; 1783 rbm->bii = 0;
1785 rbm->offset = 0; 1784 rbm->offset = 0;
1785 n += (rbm->bii - initial_bii);
1786 goto res_covered_end_of_rgrp; 1786 goto res_covered_end_of_rgrp;
1787 } 1787 }
1788 return ret; 1788 return ret;
diff --git a/fs/iomap.c b/fs/iomap.c
index a3088fae567b..897c60215dd1 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
116 atomic_set(&iop->read_count, 0); 116 atomic_set(&iop->read_count, 0);
117 atomic_set(&iop->write_count, 0); 117 atomic_set(&iop->write_count, 0);
118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
119
120 /*
121 * migrate_page_move_mapping() assumes that pages with private data have
122 * their count elevated by 1.
123 */
124 get_page(page);
119 set_page_private(page, (unsigned long)iop); 125 set_page_private(page, (unsigned long)iop);
120 SetPagePrivate(page); 126 SetPagePrivate(page);
121 return iop; 127 return iop;
@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
132 WARN_ON_ONCE(atomic_read(&iop->write_count)); 138 WARN_ON_ONCE(atomic_read(&iop->write_count));
133 ClearPagePrivate(page); 139 ClearPagePrivate(page);
134 set_page_private(page, 0); 140 set_page_private(page, 0);
141 put_page(page);
135 kfree(iop); 142 kfree(iop);
136} 143}
137 144
@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
569 576
570 if (page_has_private(page)) { 577 if (page_has_private(page)) {
571 ClearPagePrivate(page); 578 ClearPagePrivate(page);
579 get_page(newpage);
572 set_page_private(newpage, page_private(page)); 580 set_page_private(newpage, page_private(page));
573 set_page_private(page, 0); 581 set_page_private(page, 0);
582 put_page(page);
574 SetPagePrivate(newpage); 583 SetPagePrivate(newpage);
575 } 584 }
576 585
@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1804 loff_t pos = iocb->ki_pos, start = pos; 1813 loff_t pos = iocb->ki_pos, start = pos;
1805 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1814 loff_t end = iocb->ki_pos + count - 1, ret = 0;
1806 unsigned int flags = IOMAP_DIRECT; 1815 unsigned int flags = IOMAP_DIRECT;
1816 bool wait_for_completion = is_sync_kiocb(iocb);
1807 struct blk_plug plug; 1817 struct blk_plug plug;
1808 struct iomap_dio *dio; 1818 struct iomap_dio *dio;
1809 1819
@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1823 dio->end_io = end_io; 1833 dio->end_io = end_io;
1824 dio->error = 0; 1834 dio->error = 0;
1825 dio->flags = 0; 1835 dio->flags = 0;
1826 dio->wait_for_completion = is_sync_kiocb(iocb);
1827 1836
1828 dio->submit.iter = iter; 1837 dio->submit.iter = iter;
1829 dio->submit.waiter = current; 1838 dio->submit.waiter = current;
@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1878 dio_warn_stale_pagecache(iocb->ki_filp); 1887 dio_warn_stale_pagecache(iocb->ki_filp);
1879 ret = 0; 1888 ret = 0;
1880 1889
1881 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && 1890 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
1882 !inode->i_sb->s_dio_done_wq) { 1891 !inode->i_sb->s_dio_done_wq) {
1883 ret = sb_init_dio_done_wq(inode->i_sb); 1892 ret = sb_init_dio_done_wq(inode->i_sb);
1884 if (ret < 0) 1893 if (ret < 0)
@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1894 if (ret <= 0) { 1903 if (ret <= 0) {
1895 /* magic error code to fall back to buffered I/O */ 1904 /* magic error code to fall back to buffered I/O */
1896 if (ret == -ENOTBLK) { 1905 if (ret == -ENOTBLK) {
1897 dio->wait_for_completion = true; 1906 wait_for_completion = true;
1898 ret = 0; 1907 ret = 0;
1899 } 1908 }
1900 break; 1909 break;
@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1916 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1925 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1917 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1926 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1918 1927
1928 /*
1929 * We are about to drop our additional submission reference, which
1930 * might be the last reference to the dio. There are three three
1931 * different ways we can progress here:
1932 *
1933 * (a) If this is the last reference we will always complete and free
1934 * the dio ourselves.
1935 * (b) If this is not the last reference, and we serve an asynchronous
1936 * iocb, we must never touch the dio after the decrement, the
1937 * I/O completion handler will complete and free it.
1938 * (c) If this is not the last reference, but we serve a synchronous
1939 * iocb, the I/O completion handler will wake us up on the drop
1940 * of the final reference, and we will complete and free it here
1941 * after we got woken by the I/O completion handler.
1942 */
1943 dio->wait_for_completion = wait_for_completion;
1919 if (!atomic_dec_and_test(&dio->ref)) { 1944 if (!atomic_dec_and_test(&dio->ref)) {
1920 if (!dio->wait_for_completion) 1945 if (!wait_for_completion)
1921 return -EIOCBQUEUED; 1946 return -EIOCBQUEUED;
1922 1947
1923 for (;;) { 1948 for (;;) {
@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1934 __set_current_state(TASK_RUNNING); 1959 __set_current_state(TASK_RUNNING);
1935 } 1960 }
1936 1961
1937 ret = iomap_dio_complete(dio); 1962 return iomap_dio_complete(dio);
1938
1939 return ret;
1940 1963
1941out_free_dio: 1964out_free_dio:
1942 kfree(dio); 1965 kfree(dio);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 22ce3c8a2f46..0570391eaa16 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name,
1895 size_t len; 1895 size_t len;
1896 char *end; 1896 char *end;
1897 1897
1898 if (unlikely(!dev_name || !*dev_name)) {
1899 dfprintk(MOUNT, "NFS: device name not specified\n");
1900 return -EINVAL;
1901 }
1902
1898 /* Is the host name protected with square brakcets? */ 1903 /* Is the host name protected with square brakcets? */
1899 if (*dev_name == '[') { 1904 if (*dev_name == '[') {
1900 end = strchr(++dev_name, ']'); 1905 end = strchr(++dev_name, ']');
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5a0bbf917a32..f12cb31a41e5 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
621 nfs_set_page_writeback(page); 621 nfs_set_page_writeback(page);
622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
623 623
624 ret = 0; 624 ret = req->wb_context->error;
625 /* If there is a fatal error that covers this write, just exit */ 625 /* If there is a fatal error that covers this write, just exit */
626 if (nfs_error_is_fatal_on_server(req->wb_context->error)) 626 if (nfs_error_is_fatal_on_server(ret))
627 goto out_launder; 627 goto out_launder;
628 628
629 ret = 0;
629 if (!nfs_pageio_add_request(pgio, req)) { 630 if (!nfs_pageio_add_request(pgio, req)) {
630 ret = pgio->pg_error; 631 ret = pgio->pg_error;
631 /* 632 /*
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
635 nfs_context_set_write_error(req->wb_context, ret); 636 nfs_context_set_write_error(req->wb_context, ret);
636 if (nfs_error_is_fatal_on_server(ret)) 637 if (nfs_error_is_fatal_on_server(ret))
637 goto out_launder; 638 goto out_launder;
638 } 639 } else
640 ret = -EAGAIN;
639 nfs_redirty_request(req); 641 nfs_redirty_request(req);
640 ret = -EAGAIN;
641 } else 642 } else
642 nfs_add_stats(page_file_mapping(page)->host, 643 nfs_add_stats(page_file_mapping(page)->host,
643 NFSIOS_WRITEPAGES, 1); 644 NFSIOS_WRITEPAGES, 1);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 9824e32b2f23..7dc98e14655d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
557 loff_t cloned; 557 loff_t cloned;
558 558
559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
560 if (cloned < 0)
561 return nfserrno(cloned);
560 if (count && cloned != count) 562 if (count && cloned != count)
561 cloned = -EINVAL; 563 return nfserrno(-EINVAL);
562 return nfserrno(cloned < 0 ? cloned : 0); 564 return 0;
563} 565}
564 566
565ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 567ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae109429a88..e39bac94dead 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
256 inode = proc_get_inode(dir->i_sb, de); 256 inode = proc_get_inode(dir->i_sb, de);
257 if (!inode) 257 if (!inode)
258 return ERR_PTR(-ENOMEM); 258 return ERR_PTR(-ENOMEM);
259 d_set_d_op(dentry, &proc_misc_dentry_ops); 259 d_set_d_op(dentry, de->proc_dops);
260 return d_splice_alias(inode, dentry); 260 return d_splice_alias(inode, dentry);
261 } 261 }
262 read_unlock(&proc_subdir_lock); 262 read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
429 INIT_LIST_HEAD(&ent->pde_openers); 429 INIT_LIST_HEAD(&ent->pde_openers);
430 proc_set_user(ent, (*parent)->uid, (*parent)->gid); 430 proc_set_user(ent, (*parent)->uid, (*parent)->gid);
431 431
432 ent->proc_dops = &proc_misc_dentry_ops;
433
432out: 434out:
433 return ent; 435 return ent;
434} 436}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5185d7f6a51e..95b14196f284 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@ struct proc_dir_entry {
44 struct completion *pde_unload_completion; 44 struct completion *pde_unload_completion;
45 const struct inode_operations *proc_iops; 45 const struct inode_operations *proc_iops;
46 const struct file_operations *proc_fops; 46 const struct file_operations *proc_fops;
47 const struct dentry_operations *proc_dops;
47 union { 48 union {
48 const struct seq_operations *seq_ops; 49 const struct seq_operations *seq_ops;
49 int (*single_show)(struct seq_file *, void *); 50 int (*single_show)(struct seq_file *, void *);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb3439e..a7b12435519e 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
38 return maybe_get_net(PDE_NET(PDE(inode))); 38 return maybe_get_net(PDE_NET(PDE(inode)));
39} 39}
40 40
41static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
42{
43 return 0;
44}
45
46static const struct dentry_operations proc_net_dentry_ops = {
47 .d_revalidate = proc_net_d_revalidate,
48 .d_delete = always_delete_dentry,
49};
50
51static void pde_force_lookup(struct proc_dir_entry *pde)
52{
53 /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
54 pde->proc_dops = &proc_net_dentry_ops;
55}
56
41static int seq_open_net(struct inode *inode, struct file *file) 57static int seq_open_net(struct inode *inode, struct file *file)
42{ 58{
43 unsigned int state_size = PDE(inode)->state_size; 59 unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
90 p = proc_create_reg(name, mode, &parent, data); 106 p = proc_create_reg(name, mode, &parent, data);
91 if (!p) 107 if (!p)
92 return NULL; 108 return NULL;
109 pde_force_lookup(p);
93 p->proc_fops = &proc_net_seq_fops; 110 p->proc_fops = &proc_net_seq_fops;
94 p->seq_ops = ops; 111 p->seq_ops = ops;
95 p->state_size = state_size; 112 p->state_size = state_size;
@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
133 p = proc_create_reg(name, mode, &parent, data); 150 p = proc_create_reg(name, mode, &parent, data);
134 if (!p) 151 if (!p)
135 return NULL; 152 return NULL;
153 pde_force_lookup(p);
136 p->proc_fops = &proc_net_seq_fops; 154 p->proc_fops = &proc_net_seq_fops;
137 p->seq_ops = ops; 155 p->seq_ops = ops;
138 p->state_size = state_size; 156 p->state_size = state_size;
@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
181 p = proc_create_reg(name, mode, &parent, data); 199 p = proc_create_reg(name, mode, &parent, data);
182 if (!p) 200 if (!p)
183 return NULL; 201 return NULL;
202 pde_force_lookup(p);
184 p->proc_fops = &proc_net_single_fops; 203 p->proc_fops = &proc_net_single_fops;
185 p->single_show = show; 204 p->single_show = show;
186 return proc_register(parent, p); 205 return proc_register(parent, p);
@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
223 p = proc_create_reg(name, mode, &parent, data); 242 p = proc_create_reg(name, mode, &parent, data);
224 if (!p) 243 if (!p)
225 return NULL; 244 return NULL;
245 pde_force_lookup(p);
226 p->proc_fops = &proc_net_single_fops; 246 p->proc_fops = &proc_net_single_fops;
227 p->single_show = show; 247 p->single_show = show;
228 p->write = write; 248 p->write = write;
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 1c8eecfe52b8..6acf1bfa0bfe 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -768,18 +768,23 @@ xrep_findroot_block(
768 if (!uuid_equal(&btblock->bb_u.s.bb_uuid, 768 if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
769 &mp->m_sb.sb_meta_uuid)) 769 &mp->m_sb.sb_meta_uuid))
770 goto out; 770 goto out;
771 /*
772 * Read verifiers can reference b_ops, so we set the pointer
773 * here. If the verifier fails we'll reset the buffer state
774 * to what it was before we touched the buffer.
775 */
776 bp->b_ops = fab->buf_ops;
771 fab->buf_ops->verify_read(bp); 777 fab->buf_ops->verify_read(bp);
772 if (bp->b_error) { 778 if (bp->b_error) {
779 bp->b_ops = NULL;
773 bp->b_error = 0; 780 bp->b_error = 0;
774 goto out; 781 goto out;
775 } 782 }
776 783
777 /* 784 /*
778 * Some read verifiers will (re)set b_ops, so we must be 785 * Some read verifiers will (re)set b_ops, so we must be
779 * careful not to blow away any such assignment. 786 * careful not to change b_ops after running the verifier.
780 */ 787 */
781 if (!bp->b_ops)
782 bp->b_ops = fab->buf_ops;
783 } 788 }
784 789
785 /* 790 /*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 338b9d9984e0..d9048bcea49c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -449,6 +449,7 @@ xfs_map_blocks(
449 } 449 }
450 450
451 wpc->imap = imap; 451 wpc->imap = imap;
452 xfs_trim_extent_eof(&wpc->imap, ip);
452 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); 453 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
453 return 0; 454 return 0;
454allocate_blocks: 455allocate_blocks:
@@ -459,6 +460,7 @@ allocate_blocks:
459 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || 460 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
460 imap.br_startoff + imap.br_blockcount <= cow_fsb); 461 imap.br_startoff + imap.br_blockcount <= cow_fsb);
461 wpc->imap = imap; 462 wpc->imap = imap;
463 xfs_trim_extent_eof(&wpc->imap, ip);
462 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); 464 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
463 return 0; 465 return 0;
464} 466}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index eedc5e0156ff..4f5f2ff3f70f 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -776,10 +776,26 @@ _xfs_buf_read(
776} 776}
777 777
778/* 778/*
779 * Set buffer ops on an unchecked buffer and validate it, if possible.
780 *
779 * If the caller passed in an ops structure and the buffer doesn't have ops 781 * If the caller passed in an ops structure and the buffer doesn't have ops
780 * assigned, set the ops and use them to verify the contents. If the contents 782 * assigned, set the ops and use them to verify the contents. If the contents
781 * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no 783 * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
782 * recorded errors and is already in XBF_DONE state. 784 * recorded errors and is already in XBF_DONE state.
785 *
786 * Under normal operations, every in-core buffer must have buffer ops assigned
787 * to them when the buffer is read in from disk so that we can validate the
788 * metadata.
789 *
790 * However, there are two scenarios where one can encounter in-core buffers
791 * that don't have buffer ops. The first is during log recovery of buffers on
792 * a V4 filesystem, though these buffers are purged at the end of recovery.
793 *
794 * The other is online repair, which tries to match arbitrary metadata blocks
795 * with btree types in order to find the root. If online repair doesn't match
796 * the buffer with /any/ btree type, the buffer remains in memory in DONE state
797 * with no ops, and a subsequent read_buf call from elsewhere will not set the
798 * ops. This function helps us fix this situation.
783 */ 799 */
784int 800int
785xfs_buf_ensure_ops( 801xfs_buf_ensure_ops(
@@ -1536,8 +1552,7 @@ __xfs_buf_submit(
1536 xfs_buf_ioerror(bp, -EIO); 1552 xfs_buf_ioerror(bp, -EIO);
1537 bp->b_flags &= ~XBF_DONE; 1553 bp->b_flags &= ~XBF_DONE;
1538 xfs_buf_stale(bp); 1554 xfs_buf_stale(bp);
1539 if (bp->b_flags & XBF_ASYNC) 1555 xfs_buf_ioend(bp);
1540 xfs_buf_ioend(bp);
1541 return -EIO; 1556 return -EIO;
1542 } 1557 }
1543 1558
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 7b24fc791146..228a5e234af0 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,7 +71,6 @@
71#define MMP2_CLK_CCIC1_MIX 117 71#define MMP2_CLK_CCIC1_MIX 117
72#define MMP2_CLK_CCIC1_PHY 118 72#define MMP2_CLK_CCIC1_PHY 118
73#define MMP2_CLK_CCIC1_SPHY 119 73#define MMP2_CLK_CCIC1_SPHY 119
74#define MMP2_CLK_SP 120
75 74
76#define MMP2_NR_CLKS 200 75#define MMP2_NR_CLKS 200
77#endif 76#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 218df7f4d3e1..5041357d0297 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) 180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
181extern enum cpuhp_smt_control cpu_smt_control; 181extern enum cpuhp_smt_control cpu_smt_control;
182extern void cpu_smt_disable(bool force); 182extern void cpu_smt_disable(bool force);
183extern void cpu_smt_check_topology_early(void);
184extern void cpu_smt_check_topology(void); 183extern void cpu_smt_check_topology(void);
185#else 184#else
186# define cpu_smt_control (CPU_SMT_ENABLED) 185# define cpu_smt_control (CPU_SMT_ENABLED)
187static inline void cpu_smt_disable(bool force) { } 186static inline void cpu_smt_disable(bool force) { }
188static inline void cpu_smt_check_topology_early(void) { }
189static inline void cpu_smt_check_topology(void) { } 187static inline void cpu_smt_check_topology(void) { }
190#endif 188#endif
191 189
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index ef4b70f64f33..60996e64c579 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
62struct dentry_stat_t { 62struct dentry_stat_t {
63 long nr_dentry; 63 long nr_dentry;
64 long nr_unused; 64 long nr_unused;
65 long age_limit; /* age in seconds */ 65 long age_limit; /* age in seconds */
66 long want_pages; /* pages requested by system */ 66 long want_pages; /* pages requested by system */
67 long dummy[2]; 67 long nr_negative; /* # of unused negative dentries */
68 long dummy; /* Reserved for future use */
68}; 69};
69extern struct dentry_stat_t dentry_stat; 70extern struct dentry_stat_t dentry_stat;
70 71
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 7317376734f7..95e2d7ebdf21 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -611,8 +611,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
611 return qdisc_skb_cb(skb)->data; 611 return qdisc_skb_cb(skb)->data;
612} 612}
613 613
614static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, 614static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
615 struct sk_buff *skb) 615 struct sk_buff *skb)
616{ 616{
617 u8 *cb_data = bpf_skb_cb(skb); 617 u8 *cb_data = bpf_skb_cb(skb);
618 u8 cb_saved[BPF_SKB_CB_LEN]; 618 u8 cb_saved[BPF_SKB_CB_LEN];
@@ -631,15 +631,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
631 return res; 631 return res;
632} 632}
633 633
634static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
635 struct sk_buff *skb)
636{
637 u32 res;
638
639 preempt_disable();
640 res = __bpf_prog_run_save_cb(prog, skb);
641 preempt_enable();
642 return res;
643}
644
634static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, 645static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
635 struct sk_buff *skb) 646 struct sk_buff *skb)
636{ 647{
637 u8 *cb_data = bpf_skb_cb(skb); 648 u8 *cb_data = bpf_skb_cb(skb);
649 u32 res;
638 650
639 if (unlikely(prog->cb_access)) 651 if (unlikely(prog->cb_access))
640 memset(cb_data, 0, BPF_SKB_CB_LEN); 652 memset(cb_data, 0, BPF_SKB_CB_LEN);
641 653
642 return BPF_PROG_RUN(prog, skb); 654 preempt_disable();
655 res = BPF_PROG_RUN(prog, skb);
656 preempt_enable();
657 return res;
643} 658}
644 659
645static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, 660static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 811c77743dad..29d8e2cfed0e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1479,11 +1479,12 @@ struct super_block {
1479 struct user_namespace *s_user_ns; 1479 struct user_namespace *s_user_ns;
1480 1480
1481 /* 1481 /*
1482 * Keep the lru lists last in the structure so they always sit on their 1482 * The list_lru structure is essentially just a pointer to a table
1483 * own individual cachelines. 1483 * of per-node lru lists, each of which has its own spinlock.
1484 * There is no need to put them into separate cachelines.
1484 */ 1485 */
1485 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1486 struct list_lru s_dentry_lru;
1486 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1487 struct list_lru s_inode_lru;
1487 struct rcu_head rcu; 1488 struct rcu_head rcu;
1488 struct work_struct destroy_work; 1489 struct work_struct destroy_work;
1489 1490
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563..2d6100edf204 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
24 24
25#ifdef CONFIG_DEBUG_FS 25#ifdef CONFIG_DEBUG_FS
26 26
27#include <linux/kfifo.h>
28
27#define HID_DEBUG_BUFSIZE 512 29#define HID_DEBUG_BUFSIZE 512
30#define HID_DEBUG_FIFOSIZE 512
28 31
29void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); 32void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
30void hid_dump_report(struct hid_device *, int , u8 *, int); 33void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
37void hid_debug_exit(void); 40void hid_debug_exit(void);
38void hid_debug_event(struct hid_device *, char *); 41void hid_debug_event(struct hid_device *, char *);
39 42
40
41struct hid_debug_list { 43struct hid_debug_list {
42 char *hid_debug_buf; 44 DECLARE_KFIFO_PTR(hid_debug_fifo, char);
43 int head;
44 int tail;
45 struct fasync_struct *fasync; 45 struct fasync_struct *fasync;
46 struct hid_device *hdev; 46 struct hid_device *hdev;
47 struct list_head node; 47 struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
64#endif 64#endif
65 65
66#endif 66#endif
67
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e7d29ae633cd..971cf76a78a0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -615,6 +615,7 @@ struct ide_drive_s {
615 615
616 /* current sense rq and buffer */ 616 /* current sense rq and buffer */
617 bool sense_rq_armed; 617 bool sense_rq_armed;
618 bool sense_rq_active;
618 struct request *sense_rq; 619 struct request *sense_rq;
619 struct request_sense sense_data; 620 struct request_sense sense_data;
620 621
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1219extern void ide_timer_expiry(struct timer_list *t); 1220extern void ide_timer_expiry(struct timer_list *t);
1220extern irqreturn_t ide_intr(int irq, void *dev_id); 1221extern irqreturn_t ide_intr(int irq, void *dev_id);
1221extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 1222extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
1223extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
1222extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); 1224extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1223 1225
1224void ide_init_disk(struct gendisk *, ide_drive_t *); 1226void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 07da5c6c5ba0..368267c1b71b 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -21,14 +21,16 @@ struct vmem_altmap;
21 * walkers which rely on the fully initialized page->flags and others 21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page 22 * should use this rather than pfn_valid && pfn_to_page
23 */ 23 */
24#define pfn_to_online_page(pfn) \ 24#define pfn_to_online_page(pfn) \
25({ \ 25({ \
26 struct page *___page = NULL; \ 26 struct page *___page = NULL; \
27 unsigned long ___nr = pfn_to_section_nr(pfn); \ 27 unsigned long ___pfn = pfn; \
28 \ 28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ 29 \
30 ___page = pfn_to_page(pfn); \ 30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 ___page; \ 31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
33 ___page; \
32}) 34})
33 35
34/* 36/*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1d95e634f3fe..1fb733f38a47 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1492,6 +1492,7 @@ struct net_device_ops {
1492 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook 1492 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1493 * @IFF_FAILOVER: device is a failover master device 1493 * @IFF_FAILOVER: device is a failover master device
1494 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device 1494 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1495 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1495 */ 1496 */
1496enum netdev_priv_flags { 1497enum netdev_priv_flags {
1497 IFF_802_1Q_VLAN = 1<<0, 1498 IFF_802_1Q_VLAN = 1<<0,
@@ -1523,6 +1524,7 @@ enum netdev_priv_flags {
1523 IFF_NO_RX_HANDLER = 1<<26, 1524 IFF_NO_RX_HANDLER = 1<<26,
1524 IFF_FAILOVER = 1<<27, 1525 IFF_FAILOVER = 1<<27,
1525 IFF_FAILOVER_SLAVE = 1<<28, 1526 IFF_FAILOVER_SLAVE = 1<<28,
1527 IFF_L3MDEV_RX_HANDLER = 1<<29,
1526}; 1528};
1527 1529
1528#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1530#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1553,6 +1555,7 @@ enum netdev_priv_flags {
1553#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER 1555#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1554#define IFF_FAILOVER IFF_FAILOVER 1556#define IFF_FAILOVER IFF_FAILOVER
1555#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE 1557#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1558#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1556 1559
1557/** 1560/**
1558 * struct net_device - The DEVICE structure. 1561 * struct net_device - The DEVICE structure.
@@ -4561,6 +4564,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
4561 return dev->priv_flags & IFF_SUPP_NOFCS; 4564 return dev->priv_flags & IFF_SUPP_NOFCS;
4562} 4565}
4563 4566
4567static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
4568{
4569 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
4570}
4571
4564static inline bool netif_is_l3_master(const struct net_device *dev) 4572static inline bool netif_is_l3_master(const struct net_device *dev)
4565{ 4573{
4566 return dev->priv_flags & IFF_L3MDEV_MASTER; 4574 return dev->priv_flags & IFF_L3MDEV_MASTER;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 54af4eef169f..fed5be706bc9 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
105 105
106static inline void pm_runtime_mark_last_busy(struct device *dev) 106static inline void pm_runtime_mark_last_busy(struct device *dev)
107{ 107{
108 WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); 108 WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
109} 109}
110 110
111static inline bool pm_runtime_is_irq_safe(struct device *dev) 111static inline bool pm_runtime_is_irq_safe(struct device *dev)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2f90fa92468..bba3afb4e9bf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -995,7 +995,7 @@ struct task_struct {
995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
996 struct list_head cg_list; 996 struct list_head cg_list;
997#endif 997#endif
998#ifdef CONFIG_X86_RESCTRL 998#ifdef CONFIG_X86_CPU_RESCTRL
999 u32 closid; 999 u32 closid;
1000 u32 rmid; 1000 u32 rmid;
1001#endif 1001#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d01126f..ecdc6542070f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ 73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
74#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 75#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
75 76
76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 77#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7ddfc65586b0..4335bd771ce5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
184 struct clk *pclk; 184 struct clk *pclk;
185 struct clk *clk_ptp_ref; 185 struct clk *clk_ptp_ref;
186 unsigned int clk_ptp_rate; 186 unsigned int clk_ptp_rate;
187 unsigned int clk_ref_rate;
187 struct reset_control *stmmac_rst; 188 struct reset_control *stmmac_rst;
188 struct stmmac_axi *axi; 189 struct stmmac_axi *axi;
189 int has_gmac4; 190 int has_gmac4;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 78fa0ac4613c..5175fd63cd82 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
153 153
154 if (netif_is_l3_slave(skb->dev)) 154 if (netif_is_l3_slave(skb->dev))
155 master = netdev_master_upper_dev_get_rcu(skb->dev); 155 master = netdev_master_upper_dev_get_rcu(skb->dev);
156 else if (netif_is_l3_master(skb->dev)) 156 else if (netif_is_l3_master(skb->dev) ||
157 netif_has_l3_rx_handler(skb->dev))
157 master = skb->dev; 158 master = skb->dev;
158 159
159 if (master && master->l3mdev_ops->l3mdev_l3_rcv) 160 if (master && master->l3mdev_ops->l3mdev_l3_rcv)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 45eba7d7ab38..a66fcd316734 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -469,9 +469,7 @@ struct nft_set_binding {
469int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, 469int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
470 struct nft_set_binding *binding); 470 struct nft_set_binding *binding);
471void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 471void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
472 struct nft_set_binding *binding); 472 struct nft_set_binding *binding, bool commit);
473void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
474 struct nft_set_binding *binding);
475void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); 473void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
476 474
477/** 475/**
@@ -721,6 +719,13 @@ struct nft_expr_type {
721#define NFT_EXPR_STATEFUL 0x1 719#define NFT_EXPR_STATEFUL 0x1
722#define NFT_EXPR_GC 0x2 720#define NFT_EXPR_GC 0x2
723 721
722enum nft_trans_phase {
723 NFT_TRANS_PREPARE,
724 NFT_TRANS_ABORT,
725 NFT_TRANS_COMMIT,
726 NFT_TRANS_RELEASE
727};
728
724/** 729/**
725 * struct nft_expr_ops - nf_tables expression operations 730 * struct nft_expr_ops - nf_tables expression operations
726 * 731 *
@@ -750,7 +755,8 @@ struct nft_expr_ops {
750 void (*activate)(const struct nft_ctx *ctx, 755 void (*activate)(const struct nft_ctx *ctx,
751 const struct nft_expr *expr); 756 const struct nft_expr *expr);
752 void (*deactivate)(const struct nft_ctx *ctx, 757 void (*deactivate)(const struct nft_ctx *ctx,
753 const struct nft_expr *expr); 758 const struct nft_expr *expr,
759 enum nft_trans_phase phase);
754 void (*destroy)(const struct nft_ctx *ctx, 760 void (*destroy)(const struct nft_ctx *ctx,
755 const struct nft_expr *expr); 761 const struct nft_expr *expr);
756 void (*destroy_clone)(const struct nft_ctx *ctx, 762 void (*destroy_clone)(const struct nft_ctx *ctx,
@@ -1335,12 +1341,15 @@ struct nft_trans_rule {
1335struct nft_trans_set { 1341struct nft_trans_set {
1336 struct nft_set *set; 1342 struct nft_set *set;
1337 u32 set_id; 1343 u32 set_id;
1344 bool bound;
1338}; 1345};
1339 1346
1340#define nft_trans_set(trans) \ 1347#define nft_trans_set(trans) \
1341 (((struct nft_trans_set *)trans->data)->set) 1348 (((struct nft_trans_set *)trans->data)->set)
1342#define nft_trans_set_id(trans) \ 1349#define nft_trans_set_id(trans) \
1343 (((struct nft_trans_set *)trans->data)->set_id) 1350 (((struct nft_trans_set *)trans->data)->set_id)
1351#define nft_trans_set_bound(trans) \
1352 (((struct nft_trans_set *)trans->data)->bound)
1344 1353
1345struct nft_trans_chain { 1354struct nft_trans_chain {
1346 bool update; 1355 bool update;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a3ceed3a040a..80debf5982ac 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2579,9 +2579,10 @@ struct ib_device {
2579 2579
2580 const struct uapi_definition *driver_def; 2580 const struct uapi_definition *driver_def;
2581 enum rdma_driver_id driver_id; 2581 enum rdma_driver_id driver_id;
2582
2582 /* 2583 /*
2583 * Provides synchronization between device unregistration and netlink 2584 * Positive refcount indicates that the device is currently
2584 * commands on a device. To be used only by core. 2585 * registered and cannot be unregistered.
2585 */ 2586 */
2586 refcount_t refcount; 2587 refcount_t refcount;
2587 struct completion unreg_completion; 2588 struct completion unreg_completion;
@@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags)
3926int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3927int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3927 struct ib_mr_status *mr_status); 3928 struct ib_mr_status *mr_status);
3928 3929
3930/**
3931 * ib_device_try_get: Hold a registration lock
3932 * device: The device to lock
3933 *
3934 * A device under an active registration lock cannot become unregistered. It
3935 * is only possible to obtain a registration lock on a device that is fully
3936 * registered, otherwise this function returns false.
3937 *
3938 * The registration lock is only necessary for actions which require the
3939 * device to still be registered. Uses that only require the device pointer to
3940 * be valid should use get_device(&ibdev->dev) to hold the memory.
3941 *
3942 */
3943static inline bool ib_device_try_get(struct ib_device *dev)
3944{
3945 return refcount_inc_not_zero(&dev->refcount);
3946}
3947
3948void ib_device_put(struct ib_device *device);
3929struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3949struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3930 u16 pkey, const union ib_gid *gid, 3950 u16 pkey, const union ib_gid *gid,
3931 const struct sockaddr *addr); 3951 const struct sockaddr *addr);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 0cdc3999ecfa..c5188ff724d1 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
173 if (snd_BUG_ON(!stream)) 173 if (snd_BUG_ON(!stream))
174 return; 174 return;
175 175
176 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 176 if (stream->direction == SND_COMPRESS_PLAYBACK)
177 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
178 else
179 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
180
177 wake_up(&stream->runtime->sleep); 181 wake_up(&stream->runtime->sleep);
178} 182}
179 183
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 7fa48b100936..cc7c8d42d4fd 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -68,6 +68,7 @@ struct hda_bus {
68 unsigned int response_reset:1; /* controller was reset */ 68 unsigned int response_reset:1; /* controller was reset */
69 unsigned int in_reset:1; /* during reset operation */ 69 unsigned int in_reset:1; /* during reset operation */
70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ 70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
71 unsigned int bus_probing :1; /* during probing process */
71 72
72 int primary_dig_out_type; /* primary digital out PCM type */ 73 int primary_dig_out_type; /* primary digital out PCM type */
73 unsigned int mixer_assigned; /* codec addr for mixer name */ 74 unsigned int mixer_assigned; /* codec addr for mixer name */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 1196e1c1d4f6..ff8e7dc9d4dd 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -79,6 +79,12 @@
79#define VIRTIO_F_RING_PACKED 34 79#define VIRTIO_F_RING_PACKED 34
80 80
81/* 81/*
82 * This feature indicates that memory accesses by the driver and the
83 * device are ordered in a way described by the platform.
84 */
85#define VIRTIO_F_ORDER_PLATFORM 36
86
87/*
82 * Does the device support Single Root I/O Virtualization? 88 * Does the device support Single Root I/O Virtualization?
83 */ 89 */
84#define VIRTIO_F_SR_IOV 37 90#define VIRTIO_F_SR_IOV 37
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 2414f8af26b3..4c4e24c291a5 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -213,14 +213,4 @@ struct vring_packed_desc {
213 __le16 flags; 213 __le16 flags;
214}; 214};
215 215
216struct vring_packed {
217 unsigned int num;
218
219 struct vring_packed_desc *desc;
220
221 struct vring_packed_desc_event *driver;
222
223 struct vring_packed_desc_event *device;
224};
225
226#endif /* _UAPI_LINUX_VIRTIO_RING_H */ 216#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index ef3c7ec793a7..eb76b38a00d4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq {
52 __aligned_u64 que_addr; 52 __aligned_u64 que_addr;
53}; 53};
54 54
55struct hns_roce_ib_create_srq_resp {
56 __u32 srqn;
57 __u32 reserved;
58};
59
55struct hns_roce_ib_create_qp { 60struct hns_roce_ib_create_qp {
56 __aligned_u64 buf_addr; 61 __aligned_u64 buf_addr;
57 __aligned_u64 db_addr; 62 __aligned_u64 db_addr;
diff --git a/init/Kconfig b/init/Kconfig
index 513fa544a134..c9386a365eea 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED
512 per default but can be enabled through passing psi=1 on the 512 per default but can be enabled through passing psi=1 on the
513 kernel commandline during boot. 513 kernel commandline during boot.
514 514
515 This feature adds some code to the task wakeup and sleep
516 paths of the scheduler. The overhead is too low to affect
517 common scheduling-intense workloads in practice (such as
518 webservers, memcache), but it does show up in artificial
519 scheduler stress tests, such as hackbench.
520
521 If you are paranoid and not sure what the kernel will be
522 used for, say Y.
523
524 Say N if unsure.
525
515endmenu # "CPU/Task time and stats accounting" 526endmenu # "CPU/Task time and stats accounting"
516 527
517config CPU_ISOLATION 528config CPU_ISOLATION
@@ -825,7 +836,7 @@ config CGROUP_PIDS
825 PIDs controller is designed to stop this from happening. 836 PIDs controller is designed to stop this from happening.
826 837
827 It should be noted that organisational operations (such as attaching 838 It should be noted that organisational operations (such as attaching
828 to a cgroup hierarchy will *not* be blocked by the PIDs controller), 839 to a cgroup hierarchy) will *not* be blocked by the PIDs controller,
829 since the PIDs limit only affects a process's ability to fork, not to 840 since the PIDs limit only affects a process's ability to fork, not to
830 attach to a cgroup. 841 attach to a cgroup.
831 842
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 7019c1f05cab..bd3921b1514b 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1530,7 +1530,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
1530 1530
1531 /* "typedef void new_void", "const void"...etc */ 1531 /* "typedef void new_void", "const void"...etc */
1532 if (!btf_type_is_void(next_type) && 1532 if (!btf_type_is_void(next_type) &&
1533 !btf_type_is_fwd(next_type)) { 1533 !btf_type_is_fwd(next_type) &&
1534 !btf_type_is_func_proto(next_type)) {
1534 btf_verifier_log_type(env, v->t, "Invalid type_id"); 1535 btf_verifier_log_type(env, v->t, "Invalid type_id");
1535 return -EINVAL; 1536 return -EINVAL;
1536 } 1537 }
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index d78cfec5807d..4e807973aa80 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -573,7 +573,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
573 bpf_compute_and_save_data_end(skb, &saved_data_end); 573 bpf_compute_and_save_data_end(skb, &saved_data_end);
574 574
575 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 575 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
576 bpf_prog_run_save_cb); 576 __bpf_prog_run_save_cb);
577 bpf_restore_data_end(skb, saved_data_end); 577 bpf_restore_data_end(skb, saved_data_end);
578 __skb_pull(skb, offset); 578 __skb_pull(skb, offset);
579 skb->sk = save_sk; 579 skb->sk = save_sk;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 937776531998..fed15cf94dca 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
686 } 686 }
687 687
688 if (htab_is_prealloc(htab)) { 688 if (htab_is_prealloc(htab)) {
689 pcpu_freelist_push(&htab->freelist, &l->fnode); 689 __pcpu_freelist_push(&htab->freelist, &l->fnode);
690 } else { 690 } else {
691 atomic_dec(&htab->count); 691 atomic_dec(&htab->count);
692 l->htab = htab; 692 l->htab = htab;
@@ -739,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
739 } else { 739 } else {
740 struct pcpu_freelist_node *l; 740 struct pcpu_freelist_node *l;
741 741
742 l = pcpu_freelist_pop(&htab->freelist); 742 l = __pcpu_freelist_pop(&htab->freelist);
743 if (!l) 743 if (!l)
744 return ERR_PTR(-E2BIG); 744 return ERR_PTR(-E2BIG);
745 l_new = container_of(l, struct htab_elem, fnode); 745 l_new = container_of(l, struct htab_elem, fnode);
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 673fa6fe2d73..0c1b4ba9e90e 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
28 free_percpu(s->freelist); 28 free_percpu(s->freelist);
29} 29}
30 30
31static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, 31static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
32 struct pcpu_freelist_node *node) 32 struct pcpu_freelist_node *node)
33{ 33{
34 raw_spin_lock(&head->lock); 34 raw_spin_lock(&head->lock);
35 node->next = head->first; 35 node->next = head->first;
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
37 raw_spin_unlock(&head->lock); 37 raw_spin_unlock(&head->lock);
38} 38}
39 39
40void pcpu_freelist_push(struct pcpu_freelist *s, 40void __pcpu_freelist_push(struct pcpu_freelist *s,
41 struct pcpu_freelist_node *node) 41 struct pcpu_freelist_node *node)
42{ 42{
43 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); 43 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
44 44
45 __pcpu_freelist_push(head, node); 45 ___pcpu_freelist_push(head, node);
46}
47
48void pcpu_freelist_push(struct pcpu_freelist *s,
49 struct pcpu_freelist_node *node)
50{
51 unsigned long flags;
52
53 local_irq_save(flags);
54 __pcpu_freelist_push(s, node);
55 local_irq_restore(flags);
46} 56}
47 57
48void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 58void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
63 for_each_possible_cpu(cpu) { 73 for_each_possible_cpu(cpu) {
64again: 74again:
65 head = per_cpu_ptr(s->freelist, cpu); 75 head = per_cpu_ptr(s->freelist, cpu);
66 __pcpu_freelist_push(head, buf); 76 ___pcpu_freelist_push(head, buf);
67 i++; 77 i++;
68 buf += elem_size; 78 buf += elem_size;
69 if (i == nr_elems) 79 if (i == nr_elems)
@@ -74,14 +84,12 @@ again:
74 local_irq_restore(flags); 84 local_irq_restore(flags);
75} 85}
76 86
77struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) 87struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
78{ 88{
79 struct pcpu_freelist_head *head; 89 struct pcpu_freelist_head *head;
80 struct pcpu_freelist_node *node; 90 struct pcpu_freelist_node *node;
81 unsigned long flags;
82 int orig_cpu, cpu; 91 int orig_cpu, cpu;
83 92
84 local_irq_save(flags);
85 orig_cpu = cpu = raw_smp_processor_id(); 93 orig_cpu = cpu = raw_smp_processor_id();
86 while (1) { 94 while (1) {
87 head = per_cpu_ptr(s->freelist, cpu); 95 head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
89 node = head->first; 97 node = head->first;
90 if (node) { 98 if (node) {
91 head->first = node->next; 99 head->first = node->next;
92 raw_spin_unlock_irqrestore(&head->lock, flags); 100 raw_spin_unlock(&head->lock);
93 return node; 101 return node;
94 } 102 }
95 raw_spin_unlock(&head->lock); 103 raw_spin_unlock(&head->lock);
96 cpu = cpumask_next(cpu, cpu_possible_mask); 104 cpu = cpumask_next(cpu, cpu_possible_mask);
97 if (cpu >= nr_cpu_ids) 105 if (cpu >= nr_cpu_ids)
98 cpu = 0; 106 cpu = 0;
99 if (cpu == orig_cpu) { 107 if (cpu == orig_cpu)
100 local_irq_restore(flags);
101 return NULL; 108 return NULL;
102 }
103 } 109 }
104} 110}
111
112struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
113{
114 struct pcpu_freelist_node *ret;
115 unsigned long flags;
116
117 local_irq_save(flags);
118 ret = __pcpu_freelist_pop(s);
119 local_irq_restore(flags);
120 return ret;
121}
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index 3049aae8ea1e..c3960118e617 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
22 struct pcpu_freelist_node *next; 22 struct pcpu_freelist_node *next;
23}; 23};
24 24
25/* pcpu_freelist_* do spin_lock_irqsave. */
25void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); 26void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
26struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); 27struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
28/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
29void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
30struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
27void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, 31void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
28 u32 nr_elems); 32 u32 nr_elems);
29int pcpu_freelist_init(struct pcpu_freelist *); 33int pcpu_freelist_init(struct pcpu_freelist *);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0834958f1dc4..ec7c552af76b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -740,8 +740,13 @@ static int map_lookup_elem(union bpf_attr *attr)
740 740
741 if (bpf_map_is_dev_bound(map)) { 741 if (bpf_map_is_dev_bound(map)) {
742 err = bpf_map_offload_lookup_elem(map, key, value); 742 err = bpf_map_offload_lookup_elem(map, key, value);
743 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || 743 goto done;
744 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { 744 }
745
746 preempt_disable();
747 this_cpu_inc(bpf_prog_active);
748 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
749 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
745 err = bpf_percpu_hash_copy(map, key, value); 750 err = bpf_percpu_hash_copy(map, key, value);
746 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 751 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
747 err = bpf_percpu_array_copy(map, key, value); 752 err = bpf_percpu_array_copy(map, key, value);
@@ -777,7 +782,10 @@ static int map_lookup_elem(union bpf_attr *attr)
777 } 782 }
778 rcu_read_unlock(); 783 rcu_read_unlock();
779 } 784 }
785 this_cpu_dec(bpf_prog_active);
786 preempt_enable();
780 787
788done:
781 if (err) 789 if (err)
782 goto free_value; 790 goto free_value;
783 791
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 91d5c38eb7e5..d1c6d152da89 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { }
376 376
377#ifdef CONFIG_HOTPLUG_SMT 377#ifdef CONFIG_HOTPLUG_SMT
378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
379EXPORT_SYMBOL_GPL(cpu_smt_control);
380
381static bool cpu_smt_available __read_mostly;
382 379
383void __init cpu_smt_disable(bool force) 380void __init cpu_smt_disable(bool force)
384{ 381{
@@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force)
397 394
398/* 395/*
399 * The decision whether SMT is supported can only be done after the full 396 * The decision whether SMT is supported can only be done after the full
400 * CPU identification. Called from architecture code before non boot CPUs 397 * CPU identification. Called from architecture code.
401 * are brought up.
402 */
403void __init cpu_smt_check_topology_early(void)
404{
405 if (!topology_smt_supported())
406 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
407}
408
409/*
410 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
411 * brought online. This ensures the smt/l1tf sysfs entries are consistent
412 * with reality. cpu_smt_available is set to true during the bringup of non
413 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
414 * cpu_smt_control's previous setting.
415 */ 398 */
416void __init cpu_smt_check_topology(void) 399void __init cpu_smt_check_topology(void)
417{ 400{
418 if (!cpu_smt_available) 401 if (!topology_smt_supported())
419 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 402 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
420} 403}
421 404
@@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable);
428 411
429static inline bool cpu_smt_allowed(unsigned int cpu) 412static inline bool cpu_smt_allowed(unsigned int cpu)
430{ 413{
431 if (topology_is_primary_thread(cpu)) 414 if (cpu_smt_control == CPU_SMT_ENABLED)
432 return true; 415 return true;
433 416
434 /* 417 if (topology_is_primary_thread(cpu))
435 * If the CPU is not a 'primary' thread and the booted_once bit is
436 * set then the processor has SMT support. Store this information
437 * for the late check of SMT support in cpu_smt_check_topology().
438 */
439 if (per_cpu(cpuhp_state, cpu).booted_once)
440 cpu_smt_available = true;
441
442 if (cpu_smt_control == CPU_SMT_ENABLED)
443 return true; 418 return true;
444 419
445 /* 420 /*
@@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2090 */ 2065 */
2091 cpuhp_offline_cpu_device(cpu); 2066 cpuhp_offline_cpu_device(cpu);
2092 } 2067 }
2093 if (!ret) { 2068 if (!ret)
2094 cpu_smt_control = ctrlval; 2069 cpu_smt_control = ctrlval;
2095 arch_smt_update();
2096 }
2097 cpu_maps_update_done(); 2070 cpu_maps_update_done();
2098 return ret; 2071 return ret;
2099} 2072}
@@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void)
2104 2077
2105 cpu_maps_update_begin(); 2078 cpu_maps_update_begin();
2106 cpu_smt_control = CPU_SMT_ENABLED; 2079 cpu_smt_control = CPU_SMT_ENABLED;
2107 arch_smt_update();
2108 for_each_present_cpu(cpu) { 2080 for_each_present_cpu(cpu) {
2109 /* Skip online CPUs and CPUs on offline nodes */ 2081 /* Skip online CPUs and CPUs on offline nodes */
2110 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) 2082 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3cd13a30f732..e5ede6918050 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
436 void __user *buffer, size_t *lenp, 436 void __user *buffer, size_t *lenp,
437 loff_t *ppos) 437 loff_t *ppos)
438{ 438{
439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 439 int ret;
440 440 int perf_cpu = sysctl_perf_cpu_time_max_percent;
441 if (ret || !write)
442 return ret;
443
444 /* 441 /*
445 * If throttling is disabled don't allow the write: 442 * If throttling is disabled don't allow the write:
446 */ 443 */
447 if (sysctl_perf_cpu_time_max_percent == 100 || 444 if (write && (perf_cpu == 100 || perf_cpu == 0))
448 sysctl_perf_cpu_time_max_percent == 0)
449 return -EINVAL; 445 return -EINVAL;
450 446
447 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
448 if (ret || !write)
449 return ret;
450
451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 update_perf_cpu_limits(); 453 update_perf_cpu_limits();
diff --git a/kernel/exit.c b/kernel/exit.c
index 3fb7be001964..2639a30a8aa5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
558 return NULL; 558 return NULL;
559} 559}
560 560
561static struct task_struct *find_child_reaper(struct task_struct *father) 561static struct task_struct *find_child_reaper(struct task_struct *father,
562 struct list_head *dead)
562 __releases(&tasklist_lock) 563 __releases(&tasklist_lock)
563 __acquires(&tasklist_lock) 564 __acquires(&tasklist_lock)
564{ 565{
565 struct pid_namespace *pid_ns = task_active_pid_ns(father); 566 struct pid_namespace *pid_ns = task_active_pid_ns(father);
566 struct task_struct *reaper = pid_ns->child_reaper; 567 struct task_struct *reaper = pid_ns->child_reaper;
568 struct task_struct *p, *n;
567 569
568 if (likely(reaper != father)) 570 if (likely(reaper != father))
569 return reaper; 571 return reaper;
@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
579 panic("Attempted to kill init! exitcode=0x%08x\n", 581 panic("Attempted to kill init! exitcode=0x%08x\n",
580 father->signal->group_exit_code ?: father->exit_code); 582 father->signal->group_exit_code ?: father->exit_code);
581 } 583 }
584
585 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
586 list_del_init(&p->ptrace_entry);
587 release_task(p);
588 }
589
582 zap_pid_ns_processes(pid_ns); 590 zap_pid_ns_processes(pid_ns);
583 write_lock_irq(&tasklist_lock); 591 write_lock_irq(&tasklist_lock);
584 592
@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
668 exit_ptrace(father, dead); 676 exit_ptrace(father, dead);
669 677
670 /* Can drop and reacquire tasklist_lock */ 678 /* Can drop and reacquire tasklist_lock */
671 reaper = find_child_reaper(father); 679 reaper = find_child_reaper(father, dead);
672 if (list_empty(&father->children)) 680 if (list_empty(&father->children))
673 return; 681 return;
674 682
diff --git a/kernel/relay.c b/kernel/relay.c
index 04f248644e06..9e0f52375487 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
428 dentry = chan->cb->create_buf_file(tmpname, chan->parent, 428 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
429 S_IRUSR, buf, 429 S_IRUSR, buf,
430 &chan->is_global); 430 &chan->is_global);
431 if (IS_ERR(dentry))
432 dentry = NULL;
431 433
432 kfree(tmpname); 434 kfree(tmpname);
433 435
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
461 dentry = chan->cb->create_buf_file(NULL, NULL, 463 dentry = chan->cb->create_buf_file(NULL, NULL,
462 S_IRUSR, buf, 464 S_IRUSR, buf,
463 &chan->is_global); 465 &chan->is_global);
464 if (WARN_ON(dentry)) 466 if (IS_ERR_OR_NULL(dentry))
465 goto free_buf; 467 goto free_buf;
466 } 468 }
467 469
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50aa2aba69bd..310d0637fe4b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
5980 5980
5981#ifdef CONFIG_SCHED_SMT 5981#ifdef CONFIG_SCHED_SMT
5982DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5982DEFINE_STATIC_KEY_FALSE(sched_smt_present);
5983EXPORT_SYMBOL_GPL(sched_smt_present);
5983 5984
5984static inline void set_idle_cores(int cpu, int val) 5985static inline void set_idle_cores(int cpu, int val)
5985{ 5986{
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index fe24de3fbc93..c3484785b179 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -124,6 +124,7 @@
124 * sampling of the aggregate task states would be. 124 * sampling of the aggregate task states would be.
125 */ 125 */
126 126
127#include "../workqueue_internal.h"
127#include <linux/sched/loadavg.h> 128#include <linux/sched/loadavg.h>
128#include <linux/seq_file.h> 129#include <linux/seq_file.h>
129#include <linux/proc_fs.h> 130#include <linux/proc_fs.h>
@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
480 groupc->tasks[t]++; 481 groupc->tasks[t]++;
481 482
482 write_seqcount_end(&groupc->seq); 483 write_seqcount_end(&groupc->seq);
483
484 if (!delayed_work_pending(&group->clock_work))
485 schedule_delayed_work(&group->clock_work, PSI_FREQ);
486} 484}
487 485
488static struct psi_group *iterate_groups(struct task_struct *task, void **iter) 486static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
513{ 511{
514 int cpu = task_cpu(task); 512 int cpu = task_cpu(task);
515 struct psi_group *group; 513 struct psi_group *group;
514 bool wake_clock = true;
516 void *iter = NULL; 515 void *iter = NULL;
517 516
518 if (!task->pid) 517 if (!task->pid)
@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
530 task->psi_flags &= ~clear; 529 task->psi_flags &= ~clear;
531 task->psi_flags |= set; 530 task->psi_flags |= set;
532 531
533 while ((group = iterate_groups(task, &iter))) 532 /*
533 * Periodic aggregation shuts off if there is a period of no
534 * task changes, so we wake it back up if necessary. However,
535 * don't do this if the task change is the aggregation worker
536 * itself going to sleep, or we'll ping-pong forever.
537 */
538 if (unlikely((clear & TSK_RUNNING) &&
539 (task->flags & PF_WQ_WORKER) &&
540 wq_worker_last_func(task) == psi_update_work))
541 wake_clock = false;
542
543 while ((group = iterate_groups(task, &iter))) {
534 psi_group_change(group, cpu, clear, set); 544 psi_group_change(group, cpu, clear, set);
545 if (wake_clock && !delayed_work_pending(&group->clock_work))
546 schedule_delayed_work(&group->clock_work, PSI_FREQ);
547 }
535} 548}
536 549
537void psi_memstall_tick(struct task_struct *task, int cpu) 550void psi_memstall_tick(struct task_struct *task, int cpu)
diff --git a/kernel/smp.c b/kernel/smp.c
index 163c451af42e..f4cf1b0bb3b8 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -584,8 +584,6 @@ void __init smp_init(void)
584 num_nodes, (num_nodes > 1 ? "s" : ""), 584 num_nodes, (num_nodes > 1 ? "s" : ""),
585 num_cpus, (num_cpus > 1 ? "s" : "")); 585 num_cpus, (num_cpus > 1 ? "s" : ""));
586 586
587 /* Final decision about SMT support */
588 cpu_smt_check_topology();
589 /* Any cleanup work */ 587 /* Any cleanup work */
590 smp_cpus_done(setup_max_cpus); 588 smp_cpus_done(setup_max_cpus);
591} 589}
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 8b068adb9da1..f1a86a0d881d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
1204 1204
1205int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1205int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1206{ 1206{
1207 int err; 1207 return __bpf_probe_register(btp, prog);
1208
1209 mutex_lock(&bpf_event_mutex);
1210 err = __bpf_probe_register(btp, prog);
1211 mutex_unlock(&bpf_event_mutex);
1212 return err;
1213} 1208}
1214 1209
1215int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 1210int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1216{ 1211{
1217 int err; 1212 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1218
1219 mutex_lock(&bpf_event_mutex);
1220 err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1221 mutex_unlock(&bpf_event_mutex);
1222 return err;
1223} 1213}
1224 1214
1225int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 1215int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e335576b9411..9bde07c06362 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -5,7 +5,7 @@
5 * Copyright (C) IBM Corporation, 2010-2012 5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */ 7 */
8#define pr_fmt(fmt) "trace_kprobe: " fmt 8#define pr_fmt(fmt) "trace_uprobe: " fmt
9 9
10#include <linux/ctype.h> 10#include <linux/ctype.h>
11#include <linux/module.h> 11#include <linux/module.h>
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
160 if (ret >= 0) { 160 if (ret >= 0) {
161 if (ret == maxlen) 161 if (ret == maxlen)
162 dst[ret - 1] = '\0'; 162 dst[ret - 1] = '\0';
163 else
164 /*
165 * Include the terminating null byte. In this case it
166 * was copied by strncpy_from_user but not accounted
167 * for in ret.
168 */
169 ret++;
163 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); 170 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
164 } 171 }
165 172
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 392be4b252f6..fc5d23d752a5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
910} 910}
911 911
912/** 912/**
913 * wq_worker_last_func - retrieve worker's last work function
914 *
915 * Determine the last function a worker executed. This is called from
916 * the scheduler to get a worker's last known identity.
917 *
918 * CONTEXT:
919 * spin_lock_irq(rq->lock)
920 *
921 * Return:
922 * The last work function %current executed as a worker, NULL if it
923 * hasn't executed any work yet.
924 */
925work_func_t wq_worker_last_func(struct task_struct *task)
926{
927 struct worker *worker = kthread_data(task);
928
929 return worker->last_func;
930}
931
932/**
913 * worker_set_flags - set worker flags and adjust nr_running accordingly 933 * worker_set_flags - set worker flags and adjust nr_running accordingly
914 * @worker: self 934 * @worker: self
915 * @flags: flags to set 935 * @flags: flags to set
@@ -2184,6 +2204,9 @@ __acquires(&pool->lock)
2184 if (unlikely(cpu_intensive)) 2204 if (unlikely(cpu_intensive))
2185 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2205 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2186 2206
2207 /* tag the worker for identification in schedule() */
2208 worker->last_func = worker->current_func;
2209
2187 /* we're done with it, release */ 2210 /* we're done with it, release */
2188 hash_del(&worker->hentry); 2211 hash_del(&worker->hentry);
2189 worker->current_work = NULL; 2212 worker->current_work = NULL;
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 66fbb5a9e633..cb68b03ca89a 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@ struct worker {
53 53
54 /* used only by rescuers to point to the target workqueue */ 54 /* used only by rescuers to point to the target workqueue */
55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
56
57 /* used by the scheduler to determine a worker's last known identity */
58 work_func_t last_func;
56}; 59};
57 60
58/** 61/**
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
67 70
68/* 71/*
69 * Scheduler hooks for concurrency managed workqueue. Only to be used from 72 * Scheduler hooks for concurrency managed workqueue. Only to be used from
70 * sched/core.c and workqueue.c. 73 * sched/ and workqueue.c.
71 */ 74 */
72void wq_worker_waking_up(struct task_struct *task, int cpu); 75void wq_worker_waking_up(struct task_struct *task, int cpu);
73struct task_struct *wq_worker_sleeping(struct task_struct *task); 76struct task_struct *wq_worker_sleeping(struct task_struct *task);
77work_func_t wq_worker_last_func(struct task_struct *task);
74 78
75#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ 79#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022111e0..9cf77628fc91 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
632 config->test_driver = NULL; 632 config->test_driver = NULL;
633 633
634 kfree_const(config->test_fs); 634 kfree_const(config->test_fs);
635 config->test_driver = NULL; 635 config->test_fs = NULL;
636} 636}
637 637
638static void kmod_config_free(struct kmod_test_device *test_dev) 638static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 6a8ac7626797..e52f8cafe227 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
541static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, 541static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
542 int cnt, bool slow) 542 int cnt, bool slow)
543{ 543{
544 struct rhltable rhlt; 544 struct rhltable *rhlt;
545 unsigned int i, ret; 545 unsigned int i, ret;
546 const char *key; 546 const char *key;
547 int err = 0; 547 int err = 0;
548 548
549 err = rhltable_init(&rhlt, &test_rht_params_dup); 549 rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
550 if (WARN_ON(err)) 550 if (WARN_ON(!rhlt))
551 return -EINVAL;
552
553 err = rhltable_init(rhlt, &test_rht_params_dup);
554 if (WARN_ON(err)) {
555 kfree(rhlt);
551 return err; 556 return err;
557 }
552 558
553 for (i = 0; i < cnt; i++) { 559 for (i = 0; i < cnt; i++) {
554 rhl_test_objects[i].value.tid = i; 560 rhl_test_objects[i].value.tid = i;
555 key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); 561 key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
556 key += test_rht_params_dup.key_offset; 562 key += test_rht_params_dup.key_offset;
557 563
558 if (slow) { 564 if (slow) {
559 err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, 565 err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
560 &rhl_test_objects[i].list_node.rhead)); 566 &rhl_test_objects[i].list_node.rhead));
561 if (err == -EAGAIN) 567 if (err == -EAGAIN)
562 err = 0; 568 err = 0;
563 } else 569 } else
564 err = rhltable_insert(&rhlt, 570 err = rhltable_insert(rhlt,
565 &rhl_test_objects[i].list_node, 571 &rhl_test_objects[i].list_node,
566 test_rht_params_dup); 572 test_rht_params_dup);
567 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) 573 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
568 goto skip_print; 574 goto skip_print;
569 } 575 }
570 576
571 ret = print_ht(&rhlt); 577 ret = print_ht(rhlt);
572 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); 578 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
573 579
574skip_print: 580skip_print:
575 rhltable_destroy(&rhlt); 581 rhltable_destroy(rhlt);
582 kfree(rhlt);
576 583
577 return 0; 584 return 0;
578} 585}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index df2e7dd5ff17..afef61656c1e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4268,7 +4268,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4268 break; 4268 break;
4269 } 4269 }
4270 if (ret & VM_FAULT_RETRY) { 4270 if (ret & VM_FAULT_RETRY) {
4271 if (nonblocking) 4271 if (nonblocking &&
4272 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4272 *nonblocking = 0; 4273 *nonblocking = 0;
4273 *nr_pages = 0; 4274 *nr_pages = 0;
4274 /* 4275 /*
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 0a14fcff70ed..e2bb06c1b45e 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,6 +5,7 @@ UBSAN_SANITIZE_generic.o := n
5UBSAN_SANITIZE_tags.o := n 5UBSAN_SANITIZE_tags.o := n
6KCOV_INSTRUMENT := n 6KCOV_INSTRUMENT := n
7 7
8CFLAGS_REMOVE_common.o = -pg
8CFLAGS_REMOVE_generic.o = -pg 9CFLAGS_REMOVE_generic.o = -pg
9# Function splitter causes unnecessary splits in __asan_load1/__asan_store1 10# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
10# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 11# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7c72f2a95785..831be5ff5f4d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
372 if (fail || tk->addr_valid == 0) { 372 if (fail || tk->addr_valid == 0) {
373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
374 pfn, tk->tsk->comm, tk->tsk->pid); 374 pfn, tk->tsk->comm, tk->tsk->pid);
375 force_sig(SIGKILL, tk->tsk); 375 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
376 tk->tsk, PIDTYPE_PID);
376 } 377 }
377 378
378 /* 379 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b9a667d36c55..124e794867c5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1233,7 +1233,8 @@ static bool is_pageblock_removable_nolock(struct page *page)
1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1234{ 1234{
1235 struct page *page = pfn_to_page(start_pfn); 1235 struct page *page = pfn_to_page(start_pfn);
1236 struct page *end_page = page + nr_pages; 1236 unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
1237 struct page *end_page = pfn_to_page(end_pfn);
1237 1238
1238 /* Check the starting page of each pageblock within the range */ 1239 /* Check the starting page of each pageblock within the range */
1239 for (; page < end_page; page = next_active_pageblock(page)) { 1240 for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1273,6 +1274,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1273 i++; 1274 i++;
1274 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 1275 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1275 continue; 1276 continue;
1277 /* Check if we got outside of the zone */
1278 if (zone && !zone_spans_pfn(zone, pfn + i))
1279 return 0;
1276 page = pfn_to_page(pfn + i); 1280 page = pfn_to_page(pfn + i);
1277 if (zone && page_zone(page) != zone) 1281 if (zone && page_zone(page) != zone)
1278 return 0; 1282 return 0;
@@ -1301,23 +1305,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1301static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1305static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1302{ 1306{
1303 unsigned long pfn; 1307 unsigned long pfn;
1304 struct page *page; 1308
1305 for (pfn = start; pfn < end; pfn++) { 1309 for (pfn = start; pfn < end; pfn++) {
1306 if (pfn_valid(pfn)) { 1310 struct page *page, *head;
1307 page = pfn_to_page(pfn); 1311 unsigned long skip;
1308 if (PageLRU(page)) 1312
1309 return pfn; 1313 if (!pfn_valid(pfn))
1310 if (__PageMovable(page)) 1314 continue;
1311 return pfn; 1315 page = pfn_to_page(pfn);
1312 if (PageHuge(page)) { 1316 if (PageLRU(page))
1313 if (hugepage_migration_supported(page_hstate(page)) && 1317 return pfn;
1314 page_huge_active(page)) 1318 if (__PageMovable(page))
1315 return pfn; 1319 return pfn;
1316 else 1320
1317 pfn = round_up(pfn + 1, 1321 if (!PageHuge(page))
1318 1 << compound_order(page)) - 1; 1322 continue;
1319 } 1323 head = compound_head(page);
1320 } 1324 if (hugepage_migration_supported(page_hstate(head)) &&
1325 page_huge_active(head))
1326 return pfn;
1327 skip = (1 << compound_order(head)) - (page - head);
1328 pfn += skip - 1;
1321 } 1329 }
1322 return 0; 1330 return 0;
1323} 1331}
@@ -1344,7 +1352,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1344{ 1352{
1345 unsigned long pfn; 1353 unsigned long pfn;
1346 struct page *page; 1354 struct page *page;
1347 int not_managed = 0;
1348 int ret = 0; 1355 int ret = 0;
1349 LIST_HEAD(source); 1356 LIST_HEAD(source);
1350 1357
@@ -1392,7 +1399,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1392 else 1399 else
1393 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 1400 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1394 if (!ret) { /* Success */ 1401 if (!ret) { /* Success */
1395 put_page(page);
1396 list_add_tail(&page->lru, &source); 1402 list_add_tail(&page->lru, &source);
1397 if (!__PageMovable(page)) 1403 if (!__PageMovable(page))
1398 inc_node_page_state(page, NR_ISOLATED_ANON + 1404 inc_node_page_state(page, NR_ISOLATED_ANON +
@@ -1401,22 +1407,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1401 } else { 1407 } else {
1402 pr_warn("failed to isolate pfn %lx\n", pfn); 1408 pr_warn("failed to isolate pfn %lx\n", pfn);
1403 dump_page(page, "isolation failed"); 1409 dump_page(page, "isolation failed");
1404 put_page(page);
1405 /* Because we don't have big zone->lock. we should
1406 check this again here. */
1407 if (page_count(page)) {
1408 not_managed++;
1409 ret = -EBUSY;
1410 break;
1411 }
1412 } 1410 }
1411 put_page(page);
1413 } 1412 }
1414 if (!list_empty(&source)) { 1413 if (!list_empty(&source)) {
1415 if (not_managed) {
1416 putback_movable_pages(&source);
1417 goto out;
1418 }
1419
1420 /* Allocate a new page from the nearest neighbor node */ 1414 /* Allocate a new page from the nearest neighbor node */
1421 ret = migrate_pages(&source, new_node_page, NULL, 0, 1415 ret = migrate_pages(&source, new_node_page, NULL, 0,
1422 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1416 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
@@ -1429,7 +1423,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1429 putback_movable_pages(&source); 1423 putback_movable_pages(&source);
1430 } 1424 }
1431 } 1425 }
1432out: 1426
1433 return ret; 1427 return ret;
1434} 1428}
1435 1429
@@ -1576,7 +1570,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576 we assume this for now. .*/ 1570 we assume this for now. .*/
1577 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, 1571 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
1578 &valid_end)) { 1572 &valid_end)) {
1579 mem_hotplug_done();
1580 ret = -EINVAL; 1573 ret = -EINVAL;
1581 reason = "multizone range"; 1574 reason = "multizone range";
1582 goto failed_removal; 1575 goto failed_removal;
@@ -1591,7 +1584,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1591 MIGRATE_MOVABLE, 1584 MIGRATE_MOVABLE,
1592 SKIP_HWPOISON | REPORT_FAILURE); 1585 SKIP_HWPOISON | REPORT_FAILURE);
1593 if (ret) { 1586 if (ret) {
1594 mem_hotplug_done();
1595 reason = "failure to isolate range"; 1587 reason = "failure to isolate range";
1596 goto failed_removal; 1588 goto failed_removal;
1597 } 1589 }
diff --git a/mm/migrate.c b/mm/migrate.c
index a16b15090df3..d4fd680be3b0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
709 /* Simple case, sync compaction */ 709 /* Simple case, sync compaction */
710 if (mode != MIGRATE_ASYNC) { 710 if (mode != MIGRATE_ASYNC) {
711 do { 711 do {
712 get_bh(bh);
713 lock_buffer(bh); 712 lock_buffer(bh);
714 bh = bh->b_this_page; 713 bh = bh->b_this_page;
715 714
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
720 719
721 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 720 /* async case, we cannot block on lock_buffer so use trylock_buffer */
722 do { 721 do {
723 get_bh(bh);
724 if (!trylock_buffer(bh)) { 722 if (!trylock_buffer(bh)) {
725 /* 723 /*
726 * We failed to lock the buffer and cannot stall in 724 * We failed to lock the buffer and cannot stall in
727 * async migration. Release the taken locks 725 * async migration. Release the taken locks
728 */ 726 */
729 struct buffer_head *failed_bh = bh; 727 struct buffer_head *failed_bh = bh;
730 put_bh(failed_bh);
731 bh = head; 728 bh = head;
732 while (bh != failed_bh) { 729 while (bh != failed_bh) {
733 unlock_buffer(bh); 730 unlock_buffer(bh);
734 put_bh(bh);
735 bh = bh->b_this_page; 731 bh = bh->b_this_page;
736 } 732 }
737 return false; 733 return false;
@@ -818,7 +814,6 @@ unlock_buffers:
818 bh = head; 814 bh = head;
819 do { 815 do {
820 unlock_buffer(bh); 816 unlock_buffer(bh);
821 put_bh(bh);
822 bh = bh->b_this_page; 817 bh = bh->b_this_page;
823 818
824 } while (bh != head); 819 } while (bh != head);
@@ -1135,10 +1130,13 @@ out:
1135 * If migration is successful, decrease refcount of the newpage 1130 * If migration is successful, decrease refcount of the newpage
1136 * which will not free the page because new page owner increased 1131 * which will not free the page because new page owner increased
1137 * refcounter. As well, if it is LRU page, add the page to LRU 1132 * refcounter. As well, if it is LRU page, add the page to LRU
1138 * list in here. 1133 * list in here. Use the old state of the isolated source page to
1134 * determine if we migrated a LRU page. newpage was already unlocked
1135 * and possibly modified by its owner - don't rely on the page
1136 * state.
1139 */ 1137 */
1140 if (rc == MIGRATEPAGE_SUCCESS) { 1138 if (rc == MIGRATEPAGE_SUCCESS) {
1141 if (unlikely(__PageMovable(newpage))) 1139 if (unlikely(!is_lru))
1142 put_page(newpage); 1140 put_page(newpage);
1143 else 1141 else
1144 putback_lru_page(newpage); 1142 putback_lru_page(newpage);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f0e8cd9edb1a..26ea8636758f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -647,8 +647,8 @@ static int oom_reaper(void *unused)
647 647
648static void wake_oom_reaper(struct task_struct *tsk) 648static void wake_oom_reaper(struct task_struct *tsk)
649{ 649{
650 /* tsk is already queued? */ 650 /* mm is already queued? */
651 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 651 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
652 return; 652 return;
653 653
654 get_task_struct(tsk); 654 get_task_struct(tsk);
@@ -975,6 +975,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
975 * still freeing memory. 975 * still freeing memory.
976 */ 976 */
977 read_lock(&tasklist_lock); 977 read_lock(&tasklist_lock);
978
979 /*
980 * The task 'p' might have already exited before reaching here. The
981 * put_task_struct() will free task_struct 'p' while the loop still try
982 * to access the field of 'p', so, get an extra reference.
983 */
984 get_task_struct(p);
978 for_each_thread(p, t) { 985 for_each_thread(p, t) {
979 list_for_each_entry(child, &t->children, sibling) { 986 list_for_each_entry(child, &t->children, sibling) {
980 unsigned int child_points; 987 unsigned int child_points;
@@ -994,6 +1001,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
994 } 1001 }
995 } 1002 }
996 } 1003 }
1004 put_task_struct(p);
997 read_unlock(&tasklist_lock); 1005 read_unlock(&tasklist_lock);
998 1006
999 /* 1007 /*
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 7b80f6f8d4dc..a9b7919c9de5 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
104 104
105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); 105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
106 106
107 /* free the TID stats immediately */
108 cfg80211_sinfo_release_content(&sinfo);
109
107 dev_put(real_netdev); 110 dev_put(real_netdev);
108 if (ret == -ENOENT) { 111 if (ret == -ENOENT) {
109 /* Node is not associated anymore! It would be 112 /* Node is not associated anymore! It would be
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 28c1fb8d1af0..96ef7c70b4d9 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -20,7 +20,6 @@
20#include "main.h" 20#include "main.h"
21 21
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <linux/bug.h>
24#include <linux/byteorder/generic.h> 23#include <linux/byteorder/generic.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/gfp.h> 25#include <linux/gfp.h>
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
179 parent_dev = __dev_get_by_index((struct net *)parent_net, 178 parent_dev = __dev_get_by_index((struct net *)parent_net,
180 dev_get_iflink(net_dev)); 179 dev_get_iflink(net_dev));
181 /* if we got a NULL parent_dev there is something broken.. */ 180 /* if we got a NULL parent_dev there is something broken.. */
182 if (WARN(!parent_dev, "Cannot find parent device")) 181 if (!parent_dev) {
182 pr_err("Cannot find parent device\n");
183 return false; 183 return false;
184 }
184 185
185 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) 186 if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
186 return false; 187 return false;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index b14fb3462af7..93a5975c21a4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -222,6 +222,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
222 222
223 netif_trans_update(soft_iface); 223 netif_trans_update(soft_iface);
224 vid = batadv_get_vid(skb, 0); 224 vid = batadv_get_vid(skb, 0);
225
226 skb_reset_mac_header(skb);
225 ethhdr = eth_hdr(skb); 227 ethhdr = eth_hdr(skb);
226 228
227 proto = ethhdr->h_proto; 229 proto = ethhdr->h_proto;
diff --git a/net/core/filter.c b/net/core/filter.c
index 3a49f68eda10..b5a002d7b263 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4112 /* Only some socketops are supported */ 4112 /* Only some socketops are supported */
4113 switch (optname) { 4113 switch (optname) {
4114 case SO_RCVBUF: 4114 case SO_RCVBUF:
4115 val = min_t(u32, val, sysctl_rmem_max);
4115 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 4116 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4116 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); 4117 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4117 break; 4118 break;
4118 case SO_SNDBUF: 4119 case SO_SNDBUF:
4120 val = min_t(u32, val, sysctl_wmem_max);
4119 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 4121 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4120 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4122 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4121 break; 4123 break;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index e76ed8df9f13..ae6f06e45737 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -554,8 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
554 struct sk_psock *psock = container_of(gc, struct sk_psock, gc); 554 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
555 555
556 /* No sk_callback_lock since already detached. */ 556 /* No sk_callback_lock since already detached. */
557 if (psock->parser.enabled) 557 strp_done(&psock->parser.strp);
558 strp_done(&psock->parser.strp);
559 558
560 cancel_work_sync(&psock->work); 559 cancel_work_sync(&psock->work);
561 560
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a47b5c..baaaeb2b2c42 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
202static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, 202static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
203 u8 pkt, u8 opt, u8 *val, u8 len) 203 u8 pkt, u8 opt, u8 *val, u8 len)
204{ 204{
205 if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) 205 if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
206 return 0; 206 return 0;
207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); 207 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
208} 208}
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
214static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, 214static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
215 u8 pkt, u8 opt, u8 *val, u8 len) 215 u8 pkt, u8 opt, u8 *val, u8 len)
216{ 216{
217 if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) 217 if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
218 return 0; 218 return 0;
219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); 219 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
220} 220}
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 79e97d2f2d9b..c58f33931be1 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -248,6 +248,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
248 rtnl_unlock(); 248 rtnl_unlock();
249} 249}
250 250
251static struct lock_class_key dsa_master_addr_list_lock_key;
252
251int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) 253int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
252{ 254{
253 int ret; 255 int ret;
@@ -261,6 +263,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
261 wmb(); 263 wmb();
262 264
263 dev->dsa_ptr = cpu_dp; 265 dev->dsa_ptr = cpu_dp;
266 lockdep_set_class(&dev->addr_list_lock,
267 &dsa_master_addr_list_lock_key);
264 268
265 ret = dsa_master_ethtool_setup(dev); 269 ret = dsa_master_ethtool_setup(dev);
266 if (ret) 270 if (ret)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 70395a0ae52e..2e5e7c04821b 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
140static void dsa_slave_change_rx_flags(struct net_device *dev, int change) 140static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
141{ 141{
142 struct net_device *master = dsa_slave_to_master(dev); 142 struct net_device *master = dsa_slave_to_master(dev);
143 143 if (dev->flags & IFF_UP) {
144 if (change & IFF_ALLMULTI) 144 if (change & IFF_ALLMULTI)
145 dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); 145 dev_set_allmulti(master,
146 if (change & IFF_PROMISC) 146 dev->flags & IFF_ALLMULTI ? 1 : -1);
147 dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); 147 if (change & IFF_PROMISC)
148 dev_set_promiscuity(master,
149 dev->flags & IFF_PROMISC ? 1 : -1);
150 }
148} 151}
149 152
150static void dsa_slave_set_rx_mode(struct net_device *dev) 153static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -644,7 +647,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
644 int ret; 647 int ret;
645 648
646 /* Port's PHY and MAC both need to be EEE capable */ 649 /* Port's PHY and MAC both need to be EEE capable */
647 if (!dev->phydev && !dp->pl) 650 if (!dev->phydev || !dp->pl)
648 return -ENODEV; 651 return -ENODEV;
649 652
650 if (!ds->ops->set_mac_eee) 653 if (!ds->ops->set_mac_eee)
@@ -664,7 +667,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
664 int ret; 667 int ret;
665 668
666 /* Port's PHY and MAC both need to be EEE capable */ 669 /* Port's PHY and MAC both need to be EEE capable */
667 if (!dev->phydev && !dp->pl) 670 if (!dev->phydev || !dp->pl)
668 return -ENODEV; 671 return -ENODEV;
669 672
670 if (!ds->ops->get_mac_eee) 673 if (!ds->ops->get_mac_eee)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d1cef66820d3..ccee9411dae1 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1381,12 +1381,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1381{ 1381{
1382 struct ip_tunnel *t = netdev_priv(dev); 1382 struct ip_tunnel *t = netdev_priv(dev);
1383 struct ip_tunnel_parm *p = &t->parms; 1383 struct ip_tunnel_parm *p = &t->parms;
1384 __be16 o_flags = p->o_flags;
1385
1386 if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
1387 !t->collect_md)
1388 o_flags |= TUNNEL_KEY;
1384 1389
1385 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 1390 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1386 nla_put_be16(skb, IFLA_GRE_IFLAGS, 1391 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1387 gre_tnl_flags_to_gre_flags(p->i_flags)) || 1392 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1388 nla_put_be16(skb, IFLA_GRE_OFLAGS, 1393 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1389 gre_tnl_flags_to_gre_flags(p->o_flags)) || 1394 gre_tnl_flags_to_gre_flags(o_flags)) ||
1390 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 1395 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1391 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 1396 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1392 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 1397 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e081e69d534e..65a4f96dc462 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -2098,12 +2098,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2098{ 2098{
2099 struct ip6_tnl *t = netdev_priv(dev); 2099 struct ip6_tnl *t = netdev_priv(dev);
2100 struct __ip6_tnl_parm *p = &t->parms; 2100 struct __ip6_tnl_parm *p = &t->parms;
2101 __be16 o_flags = p->o_flags;
2102
2103 if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
2104 !p->collect_md)
2105 o_flags |= TUNNEL_KEY;
2101 2106
2102 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 2107 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2103 nla_put_be16(skb, IFLA_GRE_IFLAGS, 2108 nla_put_be16(skb, IFLA_GRE_IFLAGS,
2104 gre_tnl_flags_to_gre_flags(p->i_flags)) || 2109 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2105 nla_put_be16(skb, IFLA_GRE_OFLAGS, 2110 nla_put_be16(skb, IFLA_GRE_OFLAGS,
2106 gre_tnl_flags_to_gre_flags(p->o_flags)) || 2111 gre_tnl_flags_to_gre_flags(o_flags)) ||
2107 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 2112 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2108 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 2113 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2109 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || 2114 nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8b075f0bc351..6d0b1f3e927b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
23 struct sock *sk = sk_to_full_sk(skb->sk); 23 struct sock *sk = sk_to_full_sk(skb->sk);
24 unsigned int hh_len; 24 unsigned int hh_len;
25 struct dst_entry *dst; 25 struct dst_entry *dst;
26 int strict = (ipv6_addr_type(&iph->daddr) &
27 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
26 struct flowi6 fl6 = { 28 struct flowi6 fl6 = {
27 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : 29 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
28 rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, 30 strict ? skb_dst(skb)->dev->ifindex : 0,
29 .flowi6_mark = skb->mark, 31 .flowi6_mark = skb->mark,
30 .flowi6_uid = sock_net_uid(net, sk), 32 .flowi6_uid = sock_net_uid(net, sk),
31 .daddr = iph->daddr, 33 .daddr = iph->daddr,
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 8181ee7e1e27..ee5403cbe655 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
146 } else { 146 } else {
147 ip6_flow_hdr(hdr, 0, flowlabel); 147 ip6_flow_hdr(hdr, 0, flowlabel);
148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); 148 hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
149
150 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
149 } 151 }
150 152
151 hdr->nexthdr = NEXTHDR_ROUTING; 153 hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1e03305c0549..e8a1dabef803 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
546 } 546 }
547 547
548 err = 0; 548 err = 0;
549 if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) 549 if (__in6_dev_get(skb->dev) &&
550 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
550 goto out; 551 goto out;
551 552
552 if (t->parms.iph.daddr == 0) 553 if (t->parms.iph.daddr == 0)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 26f1d435696a..fed6becc5daf 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
83#define L2TP_SLFLAG_S 0x40000000 83#define L2TP_SLFLAG_S 0x40000000
84#define L2TP_SL_SEQ_MASK 0x00ffffff 84#define L2TP_SL_SEQ_MASK 0x00ffffff
85 85
86#define L2TP_HDR_SIZE_SEQ 10 86#define L2TP_HDR_SIZE_MAX 14
87#define L2TP_HDR_SIZE_NOSEQ 6
88 87
89/* Default trace flags */ 88/* Default trace flags */
90#define L2TP_DEFAULT_DEBUG_FLAGS 0 89#define L2TP_DEFAULT_DEBUG_FLAGS 0
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
808 __skb_pull(skb, sizeof(struct udphdr)); 807 __skb_pull(skb, sizeof(struct udphdr));
809 808
810 /* Short packet? */ 809 /* Short packet? */
811 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { 810 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
812 l2tp_info(tunnel, L2TP_MSG_DATA, 811 l2tp_info(tunnel, L2TP_MSG_DATA,
813 "%s: recv short packet (len=%d)\n", 812 "%s: recv short packet (len=%d)\n",
814 tunnel->name, skb->len); 813 tunnel->name, skb->len);
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
884 goto error; 883 goto error;
885 } 884 }
886 885
886 if (tunnel->version == L2TP_HDR_VER_3 &&
887 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
888 goto error;
889
887 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); 890 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
888 l2tp_session_dec_refcount(session); 891 l2tp_session_dec_refcount(session);
889 892
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9c9afe94d389..b2ce90260c35 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
301} 301}
302#endif 302#endif
303 303
304static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
305 unsigned char **ptr, unsigned char **optr)
306{
307 int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
308
309 if (opt_len > 0) {
310 int off = *ptr - *optr;
311
312 if (!pskb_may_pull(skb, off + opt_len))
313 return -1;
314
315 if (skb->data != *optr) {
316 *optr = skb->data;
317 *ptr = skb->data + off;
318 }
319 }
320
321 return 0;
322}
323
304#define l2tp_printk(ptr, type, func, fmt, ...) \ 324#define l2tp_printk(ptr, type, func, fmt, ...) \
305do { \ 325do { \
306 if (((ptr)->debug) & (type)) \ 326 if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 35f6f86d4dcc..d4c60523c549 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 165 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
166 } 166 }
167 167
168 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
169 goto discard_sess;
170
168 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 171 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
169 l2tp_session_dec_refcount(session); 172 l2tp_session_dec_refcount(session);
170 173
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 237f1a4a0b0c..0ae6899edac0 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 178 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
179 } 179 }
180 180
181 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
182 goto discard_sess;
183
181 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 184 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
182 l2tp_session_dec_refcount(session); 185 l2tp_session_dec_refcount(session);
183 186
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 61c7ea9de2cc..8a49a74c0a37 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1945,9 +1945,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1945 int head_need, bool may_encrypt) 1945 int head_need, bool may_encrypt)
1946{ 1946{
1947 struct ieee80211_local *local = sdata->local; 1947 struct ieee80211_local *local = sdata->local;
1948 struct ieee80211_hdr *hdr;
1949 bool enc_tailroom;
1948 int tail_need = 0; 1950 int tail_need = 0;
1949 1951
1950 if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { 1952 hdr = (struct ieee80211_hdr *) skb->data;
1953 enc_tailroom = may_encrypt &&
1954 (sdata->crypto_tx_tailroom_needed_cnt ||
1955 ieee80211_is_mgmt(hdr->frame_control));
1956
1957 if (enc_tailroom) {
1951 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1958 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1952 tail_need -= skb_tailroom(skb); 1959 tail_need -= skb_tailroom(skb);
1953 tail_need = max_t(int, tail_need, 0); 1960 tail_need = max_t(int, tail_need, 0);
@@ -1955,8 +1962,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1955 1962
1956 if (skb_cloned(skb) && 1963 if (skb_cloned(skb) &&
1957 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || 1964 (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1958 !skb_clone_writable(skb, ETH_HLEN) || 1965 !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1959 (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
1960 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1966 I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1961 else if (head_need || tail_need) 1967 else if (head_need || tail_need)
1962 I802_DEBUG_INC(local->tx_expand_skb_head); 1968 I802_DEBUG_INC(local->tx_expand_skb_head);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 815956ac5a76..08ee03407ace 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1042,6 +1042,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1042 } 1042 }
1043 1043
1044 if (nf_ct_key_equal(h, tuple, zone, net)) { 1044 if (nf_ct_key_equal(h, tuple, zone, net)) {
1045 /* Tuple is taken already, so caller will need to find
1046 * a new source port to use.
1047 *
1048 * Only exception:
1049 * If the *original tuples* are identical, then both
1050 * conntracks refer to the same flow.
1051 * This is a rare situation, it can occur e.g. when
1052 * more than one UDP packet is sent from same socket
1053 * in different threads.
1054 *
1055 * Let nf_ct_resolve_clash() deal with this later.
1056 */
1057 if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1058 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1059 continue;
1060
1045 NF_CT_STAT_INC_ATOMIC(net, found); 1061 NF_CT_STAT_INC_ATOMIC(net, found);
1046 rcu_read_unlock(); 1062 rcu_read_unlock();
1047 return 1; 1063 return 1;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e92bedd09cde..5ca5ec8f3cf0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -131,6 +131,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
131 kfree(trans); 131 kfree(trans);
132} 132}
133 133
134static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
135{
136 struct net *net = ctx->net;
137 struct nft_trans *trans;
138
139 if (!nft_set_is_anonymous(set))
140 return;
141
142 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
143 if (trans->msg_type == NFT_MSG_NEWSET &&
144 nft_trans_set(trans) == set) {
145 nft_trans_set_bound(trans) = true;
146 break;
147 }
148 }
149}
150
134static int nf_tables_register_hook(struct net *net, 151static int nf_tables_register_hook(struct net *net,
135 const struct nft_table *table, 152 const struct nft_table *table,
136 struct nft_chain *chain) 153 struct nft_chain *chain)
@@ -226,18 +243,6 @@ static int nft_delchain(struct nft_ctx *ctx)
226 return err; 243 return err;
227} 244}
228 245
229/* either expr ops provide both activate/deactivate, or neither */
230static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
231{
232 if (!ops)
233 return true;
234
235 if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
236 return false;
237
238 return true;
239}
240
241static void nft_rule_expr_activate(const struct nft_ctx *ctx, 246static void nft_rule_expr_activate(const struct nft_ctx *ctx,
242 struct nft_rule *rule) 247 struct nft_rule *rule)
243{ 248{
@@ -253,14 +258,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
253} 258}
254 259
255static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, 260static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
256 struct nft_rule *rule) 261 struct nft_rule *rule,
262 enum nft_trans_phase phase)
257{ 263{
258 struct nft_expr *expr; 264 struct nft_expr *expr;
259 265
260 expr = nft_expr_first(rule); 266 expr = nft_expr_first(rule);
261 while (expr != nft_expr_last(rule) && expr->ops) { 267 while (expr != nft_expr_last(rule) && expr->ops) {
262 if (expr->ops->deactivate) 268 if (expr->ops->deactivate)
263 expr->ops->deactivate(ctx, expr); 269 expr->ops->deactivate(ctx, expr, phase);
264 270
265 expr = nft_expr_next(expr); 271 expr = nft_expr_next(expr);
266 } 272 }
@@ -311,7 +317,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
311 nft_trans_destroy(trans); 317 nft_trans_destroy(trans);
312 return err; 318 return err;
313 } 319 }
314 nft_rule_expr_deactivate(ctx, rule); 320 nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
315 321
316 return 0; 322 return 0;
317} 323}
@@ -1972,9 +1978,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1972 */ 1978 */
1973int nft_register_expr(struct nft_expr_type *type) 1979int nft_register_expr(struct nft_expr_type *type)
1974{ 1980{
1975 if (!nft_expr_check_ops(type->ops))
1976 return -EINVAL;
1977
1978 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1981 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1979 if (type->family == NFPROTO_UNSPEC) 1982 if (type->family == NFPROTO_UNSPEC)
1980 list_add_tail_rcu(&type->list, &nf_tables_expressions); 1983 list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2122,10 +2125,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
2122 err = PTR_ERR(ops); 2125 err = PTR_ERR(ops);
2123 goto err1; 2126 goto err1;
2124 } 2127 }
2125 if (!nft_expr_check_ops(ops)) {
2126 err = -EINVAL;
2127 goto err1;
2128 }
2129 } else 2128 } else
2130 ops = type->ops; 2129 ops = type->ops;
2131 2130
@@ -2554,7 +2553,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2554static void nf_tables_rule_release(const struct nft_ctx *ctx, 2553static void nf_tables_rule_release(const struct nft_ctx *ctx,
2555 struct nft_rule *rule) 2554 struct nft_rule *rule)
2556{ 2555{
2557 nft_rule_expr_deactivate(ctx, rule); 2556 nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
2558 nf_tables_rule_destroy(ctx, rule); 2557 nf_tables_rule_destroy(ctx, rule);
2559} 2558}
2560 2559
@@ -3760,39 +3759,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
3760bind: 3759bind:
3761 binding->chain = ctx->chain; 3760 binding->chain = ctx->chain;
3762 list_add_tail_rcu(&binding->list, &set->bindings); 3761 list_add_tail_rcu(&binding->list, &set->bindings);
3762 nft_set_trans_bind(ctx, set);
3763
3763 return 0; 3764 return 0;
3764} 3765}
3765EXPORT_SYMBOL_GPL(nf_tables_bind_set); 3766EXPORT_SYMBOL_GPL(nf_tables_bind_set);
3766 3767
3767void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
3768 struct nft_set_binding *binding)
3769{
3770 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
3771 nft_is_active(ctx->net, set))
3772 list_add_tail_rcu(&set->list, &ctx->table->sets);
3773
3774 list_add_tail_rcu(&binding->list, &set->bindings);
3775}
3776EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
3777
3778void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 3768void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
3779 struct nft_set_binding *binding) 3769 struct nft_set_binding *binding, bool event)
3780{ 3770{
3781 list_del_rcu(&binding->list); 3771 list_del_rcu(&binding->list);
3782 3772
3783 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3773 if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
3784 nft_is_active(ctx->net, set))
3785 list_del_rcu(&set->list); 3774 list_del_rcu(&set->list);
3775 if (event)
3776 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
3777 GFP_KERNEL);
3778 }
3786} 3779}
3787EXPORT_SYMBOL_GPL(nf_tables_unbind_set); 3780EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
3788 3781
3789void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) 3782void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
3790{ 3783{
3791 if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && 3784 if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
3792 nft_is_active(ctx->net, set)) {
3793 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
3794 nft_set_destroy(set); 3785 nft_set_destroy(set);
3795 }
3796} 3786}
3797EXPORT_SYMBOL_GPL(nf_tables_destroy_set); 3787EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
3798 3788
@@ -6621,6 +6611,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6621 nf_tables_rule_notify(&trans->ctx, 6611 nf_tables_rule_notify(&trans->ctx,
6622 nft_trans_rule(trans), 6612 nft_trans_rule(trans),
6623 NFT_MSG_DELRULE); 6613 NFT_MSG_DELRULE);
6614 nft_rule_expr_deactivate(&trans->ctx,
6615 nft_trans_rule(trans),
6616 NFT_TRANS_COMMIT);
6624 break; 6617 break;
6625 case NFT_MSG_NEWSET: 6618 case NFT_MSG_NEWSET:
6626 nft_clear(net, nft_trans_set(trans)); 6619 nft_clear(net, nft_trans_set(trans));
@@ -6707,7 +6700,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
6707 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); 6700 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
6708 break; 6701 break;
6709 case NFT_MSG_NEWSET: 6702 case NFT_MSG_NEWSET:
6710 nft_set_destroy(nft_trans_set(trans)); 6703 if (!nft_trans_set_bound(trans))
6704 nft_set_destroy(nft_trans_set(trans));
6711 break; 6705 break;
6712 case NFT_MSG_NEWSETELEM: 6706 case NFT_MSG_NEWSETELEM:
6713 nft_set_elem_destroy(nft_trans_elem_set(trans), 6707 nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -6768,7 +6762,9 @@ static int __nf_tables_abort(struct net *net)
6768 case NFT_MSG_NEWRULE: 6762 case NFT_MSG_NEWRULE:
6769 trans->ctx.chain->use--; 6763 trans->ctx.chain->use--;
6770 list_del_rcu(&nft_trans_rule(trans)->list); 6764 list_del_rcu(&nft_trans_rule(trans)->list);
6771 nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); 6765 nft_rule_expr_deactivate(&trans->ctx,
6766 nft_trans_rule(trans),
6767 NFT_TRANS_ABORT);
6772 break; 6768 break;
6773 case NFT_MSG_DELRULE: 6769 case NFT_MSG_DELRULE:
6774 trans->ctx.chain->use++; 6770 trans->ctx.chain->use++;
@@ -6778,7 +6774,8 @@ static int __nf_tables_abort(struct net *net)
6778 break; 6774 break;
6779 case NFT_MSG_NEWSET: 6775 case NFT_MSG_NEWSET:
6780 trans->ctx.table->use--; 6776 trans->ctx.table->use--;
6781 list_del_rcu(&nft_trans_set(trans)->list); 6777 if (!nft_trans_set_bound(trans))
6778 list_del_rcu(&nft_trans_set(trans)->list);
6782 break; 6779 break;
6783 case NFT_MSG_DELSET: 6780 case NFT_MSG_DELSET:
6784 trans->ctx.table->use++; 6781 trans->ctx.table->use++;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 5eb269428832..fe64df848365 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -61,6 +61,21 @@ static struct nft_compat_net *nft_compat_pernet(struct net *net)
61 return net_generic(net, nft_compat_net_id); 61 return net_generic(net, nft_compat_net_id);
62} 62}
63 63
64static void nft_xt_get(struct nft_xt *xt)
65{
66 /* refcount_inc() warns on 0 -> 1 transition, but we can't
67 * init the reference count to 1 in .select_ops -- we can't
68 * undo such an increase when another expression inside the same
69 * rule fails afterwards.
70 */
71 if (xt->listcnt == 0)
72 refcount_set(&xt->refcnt, 1);
73 else
74 refcount_inc(&xt->refcnt);
75
76 xt->listcnt++;
77}
78
64static bool nft_xt_put(struct nft_xt *xt) 79static bool nft_xt_put(struct nft_xt *xt)
65{ 80{
66 if (refcount_dec_and_test(&xt->refcnt)) { 81 if (refcount_dec_and_test(&xt->refcnt)) {
@@ -291,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
291 return -EINVAL; 306 return -EINVAL;
292 307
293 nft_xt = container_of(expr->ops, struct nft_xt, ops); 308 nft_xt = container_of(expr->ops, struct nft_xt, ops);
294 refcount_inc(&nft_xt->refcnt); 309 nft_xt_get(nft_xt);
295 return 0; 310 return 0;
296} 311}
297 312
@@ -504,7 +519,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
504 return ret; 519 return ret;
505 520
506 nft_xt = container_of(expr->ops, struct nft_xt, ops); 521 nft_xt = container_of(expr->ops, struct nft_xt, ops);
507 refcount_inc(&nft_xt->refcnt); 522 nft_xt_get(nft_xt);
508 return 0; 523 return 0;
509} 524}
510 525
@@ -558,41 +573,16 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
558 __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); 573 __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
559} 574}
560 575
561static void nft_compat_activate(const struct nft_ctx *ctx,
562 const struct nft_expr *expr,
563 struct list_head *h)
564{
565 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
566
567 if (xt->listcnt == 0)
568 list_add(&xt->head, h);
569
570 xt->listcnt++;
571}
572
573static void nft_compat_activate_mt(const struct nft_ctx *ctx,
574 const struct nft_expr *expr)
575{
576 struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
577
578 nft_compat_activate(ctx, expr, &cn->nft_match_list);
579}
580
581static void nft_compat_activate_tg(const struct nft_ctx *ctx,
582 const struct nft_expr *expr)
583{
584 struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
585
586 nft_compat_activate(ctx, expr, &cn->nft_target_list);
587}
588
589static void nft_compat_deactivate(const struct nft_ctx *ctx, 576static void nft_compat_deactivate(const struct nft_ctx *ctx,
590 const struct nft_expr *expr) 577 const struct nft_expr *expr,
578 enum nft_trans_phase phase)
591{ 579{
592 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); 580 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
593 581
594 if (--xt->listcnt == 0) 582 if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
595 list_del_init(&xt->head); 583 if (--xt->listcnt == 0)
584 list_del_init(&xt->head);
585 }
596} 586}
597 587
598static void 588static void
@@ -848,7 +838,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
848 nft_match->ops.eval = nft_match_eval; 838 nft_match->ops.eval = nft_match_eval;
849 nft_match->ops.init = nft_match_init; 839 nft_match->ops.init = nft_match_init;
850 nft_match->ops.destroy = nft_match_destroy; 840 nft_match->ops.destroy = nft_match_destroy;
851 nft_match->ops.activate = nft_compat_activate_mt;
852 nft_match->ops.deactivate = nft_compat_deactivate; 841 nft_match->ops.deactivate = nft_compat_deactivate;
853 nft_match->ops.dump = nft_match_dump; 842 nft_match->ops.dump = nft_match_dump;
854 nft_match->ops.validate = nft_match_validate; 843 nft_match->ops.validate = nft_match_validate;
@@ -866,7 +855,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
866 855
867 nft_match->ops.size = matchsize; 856 nft_match->ops.size = matchsize;
868 857
869 nft_match->listcnt = 1; 858 nft_match->listcnt = 0;
870 list_add(&nft_match->head, &cn->nft_match_list); 859 list_add(&nft_match->head, &cn->nft_match_list);
871 860
872 return &nft_match->ops; 861 return &nft_match->ops;
@@ -953,7 +942,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
953 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 942 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
954 nft_target->ops.init = nft_target_init; 943 nft_target->ops.init = nft_target_init;
955 nft_target->ops.destroy = nft_target_destroy; 944 nft_target->ops.destroy = nft_target_destroy;
956 nft_target->ops.activate = nft_compat_activate_tg;
957 nft_target->ops.deactivate = nft_compat_deactivate; 945 nft_target->ops.deactivate = nft_compat_deactivate;
958 nft_target->ops.dump = nft_target_dump; 946 nft_target->ops.dump = nft_target_dump;
959 nft_target->ops.validate = nft_target_validate; 947 nft_target->ops.validate = nft_target_validate;
@@ -964,7 +952,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
964 else 952 else
965 nft_target->ops.eval = nft_target_eval_xt; 953 nft_target->ops.eval = nft_target_eval_xt;
966 954
967 nft_target->listcnt = 1; 955 nft_target->listcnt = 0;
968 list_add(&nft_target->head, &cn->nft_target_list); 956 list_add(&nft_target->head, &cn->nft_target_list);
969 957
970 return &nft_target->ops; 958 return &nft_target->ops;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 9658493d37d4..a8a74a16f9c4 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -234,20 +234,17 @@ err1:
234 return err; 234 return err;
235} 235}
236 236
237static void nft_dynset_activate(const struct nft_ctx *ctx,
238 const struct nft_expr *expr)
239{
240 struct nft_dynset *priv = nft_expr_priv(expr);
241
242 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
243}
244
245static void nft_dynset_deactivate(const struct nft_ctx *ctx, 237static void nft_dynset_deactivate(const struct nft_ctx *ctx,
246 const struct nft_expr *expr) 238 const struct nft_expr *expr,
239 enum nft_trans_phase phase)
247{ 240{
248 struct nft_dynset *priv = nft_expr_priv(expr); 241 struct nft_dynset *priv = nft_expr_priv(expr);
249 242
250 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 243 if (phase == NFT_TRANS_PREPARE)
244 return;
245
246 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
247 phase == NFT_TRANS_COMMIT);
251} 248}
252 249
253static void nft_dynset_destroy(const struct nft_ctx *ctx, 250static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -295,7 +292,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
295 .eval = nft_dynset_eval, 292 .eval = nft_dynset_eval,
296 .init = nft_dynset_init, 293 .init = nft_dynset_init,
297 .destroy = nft_dynset_destroy, 294 .destroy = nft_dynset_destroy,
298 .activate = nft_dynset_activate,
299 .deactivate = nft_dynset_deactivate, 295 .deactivate = nft_dynset_deactivate,
300 .dump = nft_dynset_dump, 296 .dump = nft_dynset_dump,
301}; 297};
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 3e5ed787b1d4..5ec43124cbca 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
72} 72}
73 73
74static void nft_immediate_deactivate(const struct nft_ctx *ctx, 74static void nft_immediate_deactivate(const struct nft_ctx *ctx,
75 const struct nft_expr *expr) 75 const struct nft_expr *expr,
76 enum nft_trans_phase phase)
76{ 77{
77 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 78 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
78 79
80 if (phase == NFT_TRANS_COMMIT)
81 return;
82
79 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); 83 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
80} 84}
81 85
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 227b2b15a19c..14496da5141d 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
121 return 0; 121 return 0;
122} 122}
123 123
124static void nft_lookup_activate(const struct nft_ctx *ctx,
125 const struct nft_expr *expr)
126{
127 struct nft_lookup *priv = nft_expr_priv(expr);
128
129 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
130}
131
132static void nft_lookup_deactivate(const struct nft_ctx *ctx, 124static void nft_lookup_deactivate(const struct nft_ctx *ctx,
133 const struct nft_expr *expr) 125 const struct nft_expr *expr,
126 enum nft_trans_phase phase)
134{ 127{
135 struct nft_lookup *priv = nft_expr_priv(expr); 128 struct nft_lookup *priv = nft_expr_priv(expr);
136 129
137 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 130 if (phase == NFT_TRANS_PREPARE)
131 return;
132
133 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
134 phase == NFT_TRANS_COMMIT);
138} 135}
139 136
140static void nft_lookup_destroy(const struct nft_ctx *ctx, 137static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
225 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), 222 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
226 .eval = nft_lookup_eval, 223 .eval = nft_lookup_eval,
227 .init = nft_lookup_init, 224 .init = nft_lookup_init,
228 .activate = nft_lookup_activate,
229 .deactivate = nft_lookup_deactivate, 225 .deactivate = nft_lookup_deactivate,
230 .destroy = nft_lookup_destroy, 226 .destroy = nft_lookup_destroy,
231 .dump = nft_lookup_dump, 227 .dump = nft_lookup_dump,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index c1f2adf198a0..79ef074c18ca 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -156,20 +156,17 @@ nla_put_failure:
156 return -1; 156 return -1;
157} 157}
158 158
159static void nft_objref_map_activate(const struct nft_ctx *ctx,
160 const struct nft_expr *expr)
161{
162 struct nft_objref_map *priv = nft_expr_priv(expr);
163
164 nf_tables_rebind_set(ctx, priv->set, &priv->binding);
165}
166
167static void nft_objref_map_deactivate(const struct nft_ctx *ctx, 159static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
168 const struct nft_expr *expr) 160 const struct nft_expr *expr,
161 enum nft_trans_phase phase)
169{ 162{
170 struct nft_objref_map *priv = nft_expr_priv(expr); 163 struct nft_objref_map *priv = nft_expr_priv(expr);
171 164
172 nf_tables_unbind_set(ctx, priv->set, &priv->binding); 165 if (phase == NFT_TRANS_PREPARE)
166 return;
167
168 nf_tables_unbind_set(ctx, priv->set, &priv->binding,
169 phase == NFT_TRANS_COMMIT);
173} 170}
174 171
175static void nft_objref_map_destroy(const struct nft_ctx *ctx, 172static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -186,7 +183,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
186 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), 183 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
187 .eval = nft_objref_map_eval, 184 .eval = nft_objref_map_eval,
188 .init = nft_objref_map_init, 185 .init = nft_objref_map_init,
189 .activate = nft_objref_map_activate,
190 .deactivate = nft_objref_map_deactivate, 186 .deactivate = nft_objref_map_deactivate,
191 .destroy = nft_objref_map_destroy, 187 .destroy = nft_objref_map_destroy,
192 .dump = nft_objref_map_dump, 188 .dump = nft_objref_map_dump,
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6788a3..17c9d9f0c848 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
78 __rds_create_bind_key(key, addr, port, scope_id); 78 __rds_create_bind_key(key, addr, port, scope_id);
79 rcu_read_lock(); 79 rcu_read_lock();
80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); 80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
81 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 81 if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
82 rds_sock_addref(rs); 82 !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
83 else
84 rs = NULL; 83 rs = NULL;
84
85 rcu_read_unlock(); 85 rcu_read_unlock();
86 86
87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, 87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index eaf19ebaa964..3f7bb11f3290 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -596,6 +596,7 @@ error_requeue_call:
596 } 596 }
597error_no_call: 597error_no_call:
598 release_sock(&rx->sk); 598 release_sock(&rx->sk);
599error_trace:
599 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); 600 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
600 return ret; 601 return ret;
601 602
@@ -604,7 +605,7 @@ wait_interrupted:
604wait_error: 605wait_error:
605 finish_wait(sk_sleep(&rx->sk), &wait); 606 finish_wait(sk_sleep(&rx->sk), &wait);
606 call = NULL; 607 call = NULL;
607 goto error_no_call; 608 goto error_trace;
608} 609}
609 610
610/** 611/**
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c5d1db3a3db7..6a341287a527 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1385,7 +1385,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1385 if (!tc_skip_hw(fnew->flags)) { 1385 if (!tc_skip_hw(fnew->flags)) {
1386 err = fl_hw_replace_filter(tp, fnew, extack); 1386 err = fl_hw_replace_filter(tp, fnew, extack);
1387 if (err) 1387 if (err)
1388 goto errout_mask; 1388 goto errout_mask_ht;
1389 } 1389 }
1390 1390
1391 if (!tc_in_hw(fnew->flags)) 1391 if (!tc_in_hw(fnew->flags))
@@ -1415,6 +1415,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1415 kfree(mask); 1415 kfree(mask);
1416 return 0; 1416 return 0;
1417 1417
1418errout_mask_ht:
1419 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1420 fnew->mask->filter_ht_params);
1421
1418errout_mask: 1422errout_mask:
1419 fl_mask_put(head, fnew->mask, false); 1423 fl_mask_put(head, fnew->mask, false);
1420 1424
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9644bdc8e85c..a78e55a1bb9c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep; 2027 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
2028 struct sctp_transport *transport = NULL; 2028 struct sctp_transport *transport = NULL;
2029 struct sctp_sndrcvinfo _sinfo, *sinfo; 2029 struct sctp_sndrcvinfo _sinfo, *sinfo;
2030 struct sctp_association *asoc; 2030 struct sctp_association *asoc, *tmp;
2031 struct sctp_cmsgs cmsgs; 2031 struct sctp_cmsgs cmsgs;
2032 union sctp_addr *daddr; 2032 union sctp_addr *daddr;
2033 bool new = false; 2033 bool new = false;
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2053 2053
2054 /* SCTP_SENDALL process */ 2054 /* SCTP_SENDALL process */
2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { 2055 if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
2056 list_for_each_entry(asoc, &ep->asocs, asocs) { 2056 list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg, 2057 err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
2058 msg_len); 2058 msg_len);
2059 if (err == 0) 2059 if (err == 0)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 80e0ae5534ec..f24633114dfd 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
84 } 84 }
85} 85}
86 86
87static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
88{
89 size_t index = 0;
90
91 while (count--) {
92 if (elem == flex_array_get(fa, index))
93 break;
94 index++;
95 }
96
97 return index;
98}
99
87/* Migrates chunks from stream queues to new stream queues if needed, 100/* Migrates chunks from stream queues to new stream queues if needed,
88 * but not across associations. Also, removes those chunks to streams 101 * but not across associations. Also, removes those chunks to streams
89 * higher than the new max. 102 * higher than the new max.
@@ -147,6 +160,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
147 160
148 if (stream->out) { 161 if (stream->out) {
149 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); 162 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
163 if (stream->out_curr) {
164 size_t index = fa_index(stream->out, stream->out_curr,
165 stream->outcnt);
166
167 BUG_ON(index == stream->outcnt);
168 stream->out_curr = flex_array_get(out, index);
169 }
150 fa_free(stream->out); 170 fa_free(stream->out);
151 } 171 }
152 172
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 48ea7669161f..46fa9f3016cc 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1523,6 +1523,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1523 1523
1524 smc = smc_sk(sk); 1524 smc = smc_sk(sk);
1525 lock_sock(sk); 1525 lock_sock(sk);
1526 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1527 /* socket was connected before, no more data to read */
1528 rc = 0;
1529 goto out;
1530 }
1526 if ((sk->sk_state == SMC_INIT) || 1531 if ((sk->sk_state == SMC_INIT) ||
1527 (sk->sk_state == SMC_LISTEN) || 1532 (sk->sk_state == SMC_LISTEN) ||
1528 (sk->sk_state == SMC_CLOSED)) 1533 (sk->sk_state == SMC_CLOSED))
@@ -1858,7 +1863,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1858 1863
1859 smc = smc_sk(sk); 1864 smc = smc_sk(sk);
1860 lock_sock(sk); 1865 lock_sock(sk);
1861 1866 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1867 /* socket was connected before, no more data to read */
1868 rc = 0;
1869 goto out;
1870 }
1862 if (sk->sk_state == SMC_INIT || 1871 if (sk->sk_state == SMC_INIT ||
1863 sk->sk_state == SMC_LISTEN || 1872 sk->sk_state == SMC_LISTEN ||
1864 sk->sk_state == SMC_CLOSED) 1873 sk->sk_state == SMC_CLOSED)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index db83332ac1c8..a712c9f8699b 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -21,13 +21,6 @@
21 21
22/********************************** send *************************************/ 22/********************************** send *************************************/
23 23
24struct smc_cdc_tx_pend {
25 struct smc_connection *conn; /* socket connection */
26 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
27 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
28 u16 ctrl_seq; /* conn. tx sequence # */
29};
30
31/* handler for send/transmission completion of a CDC msg */ 24/* handler for send/transmission completion of a CDC msg */
32static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, 25static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
33 struct smc_link *link, 26 struct smc_link *link,
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
61 54
62int smc_cdc_get_free_slot(struct smc_connection *conn, 55int smc_cdc_get_free_slot(struct smc_connection *conn,
63 struct smc_wr_buf **wr_buf, 56 struct smc_wr_buf **wr_buf,
57 struct smc_rdma_wr **wr_rdma_buf,
64 struct smc_cdc_tx_pend **pend) 58 struct smc_cdc_tx_pend **pend)
65{ 59{
66 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; 60 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
67 int rc; 61 int rc;
68 62
69 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, 63 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
64 wr_rdma_buf,
70 (struct smc_wr_tx_pend_priv **)pend); 65 (struct smc_wr_tx_pend_priv **)pend);
71 if (!conn->alert_token_local) 66 if (!conn->alert_token_local)
72 /* abnormal termination */ 67 /* abnormal termination */
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
96 struct smc_wr_buf *wr_buf, 91 struct smc_wr_buf *wr_buf,
97 struct smc_cdc_tx_pend *pend) 92 struct smc_cdc_tx_pend *pend)
98{ 93{
94 union smc_host_cursor cfed;
99 struct smc_link *link; 95 struct smc_link *link;
100 int rc; 96 int rc;
101 97
@@ -107,10 +103,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
107 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; 103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
108 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, 104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
109 &conn->local_tx_ctrl, conn); 105 &conn->local_tx_ctrl, conn);
106 smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
110 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); 107 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
111 if (!rc) 108 if (!rc)
112 smc_curs_copy(&conn->rx_curs_confirmed, 109 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
113 &conn->local_tx_ctrl.cons, conn);
114 110
115 return rc; 111 return rc;
116} 112}
@@ -121,11 +117,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
121 struct smc_wr_buf *wr_buf; 117 struct smc_wr_buf *wr_buf;
122 int rc; 118 int rc;
123 119
124 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); 120 rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
125 if (rc) 121 if (rc)
126 return rc; 122 return rc;
127 123
128 return smc_cdc_msg_send(conn, wr_buf, pend); 124 spin_lock_bh(&conn->send_lock);
125 rc = smc_cdc_msg_send(conn, wr_buf, pend);
126 spin_unlock_bh(&conn->send_lock);
127 return rc;
129} 128}
130 129
131int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) 130int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index e8c214b992b6..e3b6b367f3b6 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
160#endif 160#endif
161} 161}
162 162
163/* calculate cursor difference between old and new, where old <= new */ 163/* calculate cursor difference between old and new, where old <= new and
164 * difference cannot exceed size
165 */
164static inline int smc_curs_diff(unsigned int size, 166static inline int smc_curs_diff(unsigned int size,
165 union smc_host_cursor *old, 167 union smc_host_cursor *old,
166 union smc_host_cursor *new) 168 union smc_host_cursor *new)
@@ -185,6 +187,28 @@ static inline int smc_curs_comp(unsigned int size,
185 return smc_curs_diff(size, old, new); 187 return smc_curs_diff(size, old, new);
186} 188}
187 189
190/* calculate cursor difference between old and new, where old <= new and
191 * difference may exceed size
192 */
193static inline int smc_curs_diff_large(unsigned int size,
194 union smc_host_cursor *old,
195 union smc_host_cursor *new)
196{
197 if (old->wrap < new->wrap)
198 return min_t(int,
199 (size - old->count) + new->count +
200 (new->wrap - old->wrap - 1) * size,
201 size);
202
203 if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
204 return min_t(int,
205 (size - old->count) + new->count +
206 (new->wrap + 0xffff - old->wrap) * size,
207 size);
208
209 return max_t(int, 0, (new->count - old->count));
210}
211
188static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, 212static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
189 union smc_host_cursor *local, 213 union smc_host_cursor *local,
190 struct smc_connection *conn) 214 struct smc_connection *conn)
@@ -271,10 +295,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
271 smcr_cdc_msg_to_host(local, peer, conn); 295 smcr_cdc_msg_to_host(local, peer, conn);
272} 296}
273 297
274struct smc_cdc_tx_pend; 298struct smc_cdc_tx_pend {
299 struct smc_connection *conn; /* socket connection */
300 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
301 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
302 u16 ctrl_seq; /* conn. tx sequence # */
303};
275 304
276int smc_cdc_get_free_slot(struct smc_connection *conn, 305int smc_cdc_get_free_slot(struct smc_connection *conn,
277 struct smc_wr_buf **wr_buf, 306 struct smc_wr_buf **wr_buf,
307 struct smc_rdma_wr **wr_rdma_buf,
278 struct smc_cdc_tx_pend **pend); 308 struct smc_cdc_tx_pend **pend);
279void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); 309void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
280int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, 310int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 776e9dfc915d..d53fd588d1f5 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
378 vec.iov_len = sizeof(struct smc_clc_msg_decline); 378 vec.iov_len = sizeof(struct smc_clc_msg_decline);
379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, 379 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
380 sizeof(struct smc_clc_msg_decline)); 380 sizeof(struct smc_clc_msg_decline));
381 if (len < sizeof(struct smc_clc_msg_decline)) 381 if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
382 len = -EPROTO; 382 len = -EPROTO;
383 return len > 0 ? 0 : len; 383 return len > 0 ? 0 : len;
384} 384}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 0e60dd741698..2ad37e998509 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work)
345 345
346 switch (sk->sk_state) { 346 switch (sk->sk_state) {
347 case SMC_INIT: 347 case SMC_INIT:
348 if (atomic_read(&conn->bytes_to_rcv) || 348 sk->sk_state = SMC_APPCLOSEWAIT1;
349 (rxflags->peer_done_writing &&
350 !smc_cdc_rxed_any_close(conn))) {
351 sk->sk_state = SMC_APPCLOSEWAIT1;
352 } else {
353 sk->sk_state = SMC_CLOSED;
354 sock_put(sk); /* passive closing */
355 }
356 break; 349 break;
357 case SMC_ACTIVE: 350 case SMC_ACTIVE:
358 sk->sk_state = SMC_APPCLOSEWAIT1; 351 sk->sk_state = SMC_APPCLOSEWAIT1;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index a1a6d351ae1b..349d789a9728 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -127,6 +127,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
127{ 127{
128 struct smc_link_group *lgr = conn->lgr; 128 struct smc_link_group *lgr = conn->lgr;
129 129
130 if (!lgr)
131 return;
130 write_lock_bh(&lgr->conns_lock); 132 write_lock_bh(&lgr->conns_lock);
131 if (conn->alert_token_local) { 133 if (conn->alert_token_local) {
132 __smc_lgr_unregister_conn(conn); 134 __smc_lgr_unregister_conn(conn);
@@ -299,13 +301,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
299 conn->sndbuf_desc->used = 0; 301 conn->sndbuf_desc->used = 0;
300 if (conn->rmb_desc) { 302 if (conn->rmb_desc) {
301 if (!conn->rmb_desc->regerr) { 303 if (!conn->rmb_desc->regerr) {
302 conn->rmb_desc->used = 0;
303 if (!lgr->is_smcd) { 304 if (!lgr->is_smcd) {
304 /* unregister rmb with peer */ 305 /* unregister rmb with peer */
305 smc_llc_do_delete_rkey( 306 smc_llc_do_delete_rkey(
306 &lgr->lnk[SMC_SINGLE_LINK], 307 &lgr->lnk[SMC_SINGLE_LINK],
307 conn->rmb_desc); 308 conn->rmb_desc);
308 } 309 }
310 conn->rmb_desc->used = 0;
309 } else { 311 } else {
310 /* buf registration failed, reuse not possible */ 312 /* buf registration failed, reuse not possible */
311 write_lock_bh(&lgr->rmbs_lock); 313 write_lock_bh(&lgr->rmbs_lock);
@@ -629,6 +631,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
629 local_contact = SMC_REUSE_CONTACT; 631 local_contact = SMC_REUSE_CONTACT;
630 conn->lgr = lgr; 632 conn->lgr = lgr;
631 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 633 smc_lgr_register_conn(conn); /* add smc conn to lgr */
634 if (delayed_work_pending(&lgr->free_work))
635 cancel_delayed_work(&lgr->free_work);
632 write_unlock_bh(&lgr->conns_lock); 636 write_unlock_bh(&lgr->conns_lock);
633 break; 637 break;
634 } 638 }
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index b00287989a3d..8806d2afa6ed 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -52,6 +52,24 @@ enum smc_wr_reg_state {
52 FAILED /* ib_wr_reg_mr response: failure */ 52 FAILED /* ib_wr_reg_mr response: failure */
53}; 53};
54 54
55struct smc_rdma_sge { /* sges for RDMA writes */
56 struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
57};
58
59#define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per
60 * message send
61 */
62
63struct smc_rdma_sges { /* sges per message send */
64 struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
65};
66
67struct smc_rdma_wr { /* work requests per message
68 * send
69 */
70 struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES];
71};
72
55struct smc_link { 73struct smc_link {
56 struct smc_ib_device *smcibdev; /* ib-device */ 74 struct smc_ib_device *smcibdev; /* ib-device */
57 u8 ibport; /* port - values 1 | 2 */ 75 u8 ibport; /* port - values 1 | 2 */
@@ -64,6 +82,8 @@ struct smc_link {
64 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ 82 struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */
65 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ 83 struct ib_send_wr *wr_tx_ibs; /* WR send meta data */
66 struct ib_sge *wr_tx_sges; /* WR send gather meta data */ 84 struct ib_sge *wr_tx_sges; /* WR send gather meta data */
85 struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
86 struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */
67 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ 87 struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
68 /* above four vectors have wr_tx_cnt elements and use the same index */ 88 /* above four vectors have wr_tx_cnt elements and use the same index */
69 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ 89 dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index e519ef29c0ff..76487a16934e 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
289 289
290static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) 290static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
291{ 291{
292 struct smc_ib_device *smcibdev = 292 struct smc_link *lnk = (struct smc_link *)priv;
293 (struct smc_ib_device *)ibevent->device; 293 struct smc_ib_device *smcibdev = lnk->smcibdev;
294 u8 port_idx; 294 u8 port_idx;
295 295
296 switch (ibevent->event) { 296 switch (ibevent->event) {
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
298 case IB_EVENT_GID_CHANGE: 298 case IB_EVENT_GID_CHANGE:
299 case IB_EVENT_PORT_ERR: 299 case IB_EVENT_PORT_ERR:
300 case IB_EVENT_QP_ACCESS_ERR: 300 case IB_EVENT_QP_ACCESS_ERR:
301 port_idx = ibevent->element.port_num - 1; 301 port_idx = ibevent->element.qp->port - 1;
302 set_bit(port_idx, &smcibdev->port_event_mask); 302 set_bit(port_idx, &smcibdev->port_event_mask);
303 schedule_work(&smcibdev->port_event_work); 303 schedule_work(&smcibdev->port_event_work);
304 break; 304 break;
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index a6d3623d06f4..4fd60c522802 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link,
166{ 166{
167 int rc; 167 int rc;
168 168
169 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); 169 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
170 pend);
170 if (rc < 0) 171 if (rc < 0)
171 return rc; 172 return rc;
172 BUILD_BUG_ON_MSG( 173 BUILD_BUG_ON_MSG(
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 7cb3e4f07c10..632c3109dee5 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -27,7 +27,7 @@
27static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { 27static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
28 [SMC_PNETID_NAME] = { 28 [SMC_PNETID_NAME] = {
29 .type = NLA_NUL_STRING, 29 .type = NLA_NUL_STRING,
30 .len = SMC_MAX_PNETID_LEN - 1 30 .len = SMC_MAX_PNETID_LEN
31 }, 31 },
32 [SMC_PNETID_ETHNAME] = { 32 [SMC_PNETID_ETHNAME] = {
33 .type = NLA_NUL_STRING, 33 .type = NLA_NUL_STRING,
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index d8366ed51757..f93f3580c100 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
166 166
167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
168 if (send_done)
169 return send_done;
168 rc = smc_tx_wait(smc, msg->msg_flags); 170 rc = smc_tx_wait(smc, msg->msg_flags);
169 if (rc) { 171 if (rc)
170 if (send_done)
171 return send_done;
172 goto out_err; 172 goto out_err;
173 }
174 continue; 173 continue;
175 } 174 }
176 175
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
267 266
268/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 267/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
269static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 268static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
270 int num_sges, struct ib_sge sges[]) 269 int num_sges, struct ib_rdma_wr *rdma_wr)
271{ 270{
272 struct smc_link_group *lgr = conn->lgr; 271 struct smc_link_group *lgr = conn->lgr;
273 struct ib_rdma_wr rdma_wr;
274 struct smc_link *link; 272 struct smc_link *link;
275 int rc; 273 int rc;
276 274
277 memset(&rdma_wr, 0, sizeof(rdma_wr));
278 link = &lgr->lnk[SMC_SINGLE_LINK]; 275 link = &lgr->lnk[SMC_SINGLE_LINK];
279 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); 276 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
280 rdma_wr.wr.sg_list = sges; 277 rdma_wr->wr.num_sge = num_sges;
281 rdma_wr.wr.num_sge = num_sges; 278 rdma_wr->remote_addr =
282 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
283 rdma_wr.remote_addr =
284 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + 279 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
285 /* RMBE within RMB */ 280 /* RMBE within RMB */
286 conn->tx_off + 281 conn->tx_off +
287 /* offset within RMBE */ 282 /* offset within RMBE */
288 peer_rmbe_offset; 283 peer_rmbe_offset;
289 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 284 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
290 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); 285 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
291 if (rc) { 286 if (rc) {
292 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 287 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
293 smc_lgr_terminate(lgr); 288 smc_lgr_terminate(lgr);
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
314/* SMC-R helper for smc_tx_rdma_writes() */ 309/* SMC-R helper for smc_tx_rdma_writes() */
315static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 310static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
316 size_t src_off, size_t src_len, 311 size_t src_off, size_t src_len,
317 size_t dst_off, size_t dst_len) 312 size_t dst_off, size_t dst_len,
313 struct smc_rdma_wr *wr_rdma_buf)
318{ 314{
319 dma_addr_t dma_addr = 315 dma_addr_t dma_addr =
320 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); 316 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
321 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
322 int src_len_sum = src_len, dst_len_sum = dst_len; 317 int src_len_sum = src_len, dst_len_sum = dst_len;
323 struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
324 int sent_count = src_off; 318 int sent_count = src_off;
325 int srcchunk, dstchunk; 319 int srcchunk, dstchunk;
326 int num_sges; 320 int num_sges;
327 int rc; 321 int rc;
328 322
329 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 323 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
324 struct ib_sge *sge =
325 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
326
330 num_sges = 0; 327 num_sges = 0;
331 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 328 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
332 sges[srcchunk].addr = dma_addr + src_off; 329 sge[srcchunk].addr = dma_addr + src_off;
333 sges[srcchunk].length = src_len; 330 sge[srcchunk].length = src_len;
334 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
335 num_sges++; 331 num_sges++;
336 332
337 src_off += src_len; 333 src_off += src_len;
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
344 src_len = dst_len - src_len; /* remainder */ 340 src_len = dst_len - src_len; /* remainder */
345 src_len_sum += src_len; 341 src_len_sum += src_len;
346 } 342 }
347 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); 343 rc = smc_tx_rdma_write(conn, dst_off, num_sges,
344 &wr_rdma_buf->wr_tx_rdma[dstchunk]);
348 if (rc) 345 if (rc)
349 return rc; 346 return rc;
350 if (dst_len_sum == len) 347 if (dst_len_sum == len)
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
403/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 400/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
404 * usable snd_wnd as max transmit 401 * usable snd_wnd as max transmit
405 */ 402 */
406static int smc_tx_rdma_writes(struct smc_connection *conn) 403static int smc_tx_rdma_writes(struct smc_connection *conn,
404 struct smc_rdma_wr *wr_rdma_buf)
407{ 405{
408 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 406 size_t len, src_len, dst_off, dst_len; /* current chunk values */
409 union smc_host_cursor sent, prep, prod, cons; 407 union smc_host_cursor sent, prep, prod, cons;
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
464 dst_off, dst_len); 462 dst_off, dst_len);
465 else 463 else
466 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 464 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
467 dst_off, dst_len); 465 dst_off, dst_len, wr_rdma_buf);
468 if (rc) 466 if (rc)
469 return rc; 467 return rc;
470 468
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
485static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 483static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
486{ 484{
487 struct smc_cdc_producer_flags *pflags; 485 struct smc_cdc_producer_flags *pflags;
486 struct smc_rdma_wr *wr_rdma_buf;
488 struct smc_cdc_tx_pend *pend; 487 struct smc_cdc_tx_pend *pend;
489 struct smc_wr_buf *wr_buf; 488 struct smc_wr_buf *wr_buf;
490 int rc; 489 int rc;
491 490
492 spin_lock_bh(&conn->send_lock); 491 rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend);
493 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
494 if (rc < 0) { 492 if (rc < 0) {
495 if (rc == -EBUSY) { 493 if (rc == -EBUSY) {
496 struct smc_sock *smc = 494 struct smc_sock *smc =
497 container_of(conn, struct smc_sock, conn); 495 container_of(conn, struct smc_sock, conn);
498 496
499 if (smc->sk.sk_err == ECONNABORTED) { 497 if (smc->sk.sk_err == ECONNABORTED)
500 rc = sock_error(&smc->sk); 498 return sock_error(&smc->sk);
501 goto out_unlock;
502 }
503 rc = 0; 499 rc = 0;
504 if (conn->alert_token_local) /* connection healthy */ 500 if (conn->alert_token_local) /* connection healthy */
505 mod_delayed_work(system_wq, &conn->tx_work, 501 mod_delayed_work(system_wq, &conn->tx_work,
506 SMC_TX_WORK_DELAY); 502 SMC_TX_WORK_DELAY);
507 } 503 }
508 goto out_unlock; 504 return rc;
509 } 505 }
510 506
507 spin_lock_bh(&conn->send_lock);
511 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { 508 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
512 rc = smc_tx_rdma_writes(conn); 509 rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
513 if (rc) { 510 if (rc) {
514 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], 511 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
515 (struct smc_wr_tx_pend_priv *)pend); 512 (struct smc_wr_tx_pend_priv *)pend);
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
536 533
537 spin_lock_bh(&conn->send_lock); 534 spin_lock_bh(&conn->send_lock);
538 if (!pflags->urg_data_present) 535 if (!pflags->urg_data_present)
539 rc = smc_tx_rdma_writes(conn); 536 rc = smc_tx_rdma_writes(conn, NULL);
540 if (!rc) 537 if (!rc)
541 rc = smcd_cdc_msg_send(conn); 538 rc = smcd_cdc_msg_send(conn);
542 539
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
598 if (to_confirm > conn->rmbe_update_limit) { 595 if (to_confirm > conn->rmbe_update_limit) {
599 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 596 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
600 sender_free = conn->rmb_desc->len - 597 sender_free = conn->rmb_desc->len -
601 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); 598 smc_curs_diff_large(conn->rmb_desc->len,
599 &cfed, &prod);
602 } 600 }
603 601
604 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 602 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index c2694750a6a8..253aa75dc2b6 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
160 * @link: Pointer to smc_link used to later send the message. 160 * @link: Pointer to smc_link used to later send the message.
161 * @handler: Send completion handler function pointer. 161 * @handler: Send completion handler function pointer.
162 * @wr_buf: Out value returns pointer to message buffer. 162 * @wr_buf: Out value returns pointer to message buffer.
163 * @wr_rdma_buf: Out value returns pointer to rdma work request.
163 * @wr_pend_priv: Out value returns pointer serving as handler context. 164 * @wr_pend_priv: Out value returns pointer serving as handler context.
164 * 165 *
165 * Return: 0 on success, or -errno on error. 166 * Return: 0 on success, or -errno on error.
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
167int smc_wr_tx_get_free_slot(struct smc_link *link, 168int smc_wr_tx_get_free_slot(struct smc_link *link,
168 smc_wr_tx_handler handler, 169 smc_wr_tx_handler handler,
169 struct smc_wr_buf **wr_buf, 170 struct smc_wr_buf **wr_buf,
171 struct smc_rdma_wr **wr_rdma_buf,
170 struct smc_wr_tx_pend_priv **wr_pend_priv) 172 struct smc_wr_tx_pend_priv **wr_pend_priv)
171{ 173{
172 struct smc_wr_tx_pend *wr_pend; 174 struct smc_wr_tx_pend *wr_pend;
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
204 wr_ib = &link->wr_tx_ibs[idx]; 206 wr_ib = &link->wr_tx_ibs[idx];
205 wr_ib->wr_id = wr_id; 207 wr_ib->wr_id = wr_id;
206 *wr_buf = &link->wr_tx_bufs[idx]; 208 *wr_buf = &link->wr_tx_bufs[idx];
209 if (wr_rdma_buf)
210 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
207 *wr_pend_priv = &wr_pend->priv; 211 *wr_pend_priv = &wr_pend->priv;
208 return 0; 212 return 0;
209} 213}
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link,
218 u32 idx = pend->idx; 222 u32 idx = pend->idx;
219 223
220 /* clear the full struct smc_wr_tx_pend including .priv */ 224 /* clear the full struct smc_wr_tx_pend including .priv */
221 memset(&link->wr_tx_pends[pend->idx], 0, 225 memset(&link->wr_tx_pends[idx], 0,
222 sizeof(link->wr_tx_pends[pend->idx])); 226 sizeof(link->wr_tx_pends[idx]));
223 memset(&link->wr_tx_bufs[pend->idx], 0, 227 memset(&link->wr_tx_bufs[idx], 0,
224 sizeof(link->wr_tx_bufs[pend->idx])); 228 sizeof(link->wr_tx_bufs[idx]));
225 test_and_clear_bit(idx, link->wr_tx_mask); 229 test_and_clear_bit(idx, link->wr_tx_mask);
226 return 1; 230 return 1;
227 } 231 }
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk)
465 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; 469 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
466 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; 470 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
467 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; 471 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
472 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
473 lnk->roce_pd->local_dma_lkey;
474 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
475 lnk->roce_pd->local_dma_lkey;
476 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
477 lnk->roce_pd->local_dma_lkey;
478 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
479 lnk->roce_pd->local_dma_lkey;
468 lnk->wr_tx_ibs[i].next = NULL; 480 lnk->wr_tx_ibs[i].next = NULL;
469 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; 481 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
470 lnk->wr_tx_ibs[i].num_sge = 1; 482 lnk->wr_tx_ibs[i].num_sge = 1;
471 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; 483 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
472 lnk->wr_tx_ibs[i].send_flags = 484 lnk->wr_tx_ibs[i].send_flags =
473 IB_SEND_SIGNALED | IB_SEND_SOLICITED; 485 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
486 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
487 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
488 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
489 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
490 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
491 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
474 } 492 }
475 for (i = 0; i < lnk->wr_rx_cnt; i++) { 493 for (i = 0; i < lnk->wr_rx_cnt; i++) {
476 lnk->wr_rx_sges[i].addr = 494 lnk->wr_rx_sges[i].addr =
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
521 lnk->wr_tx_mask = NULL; 539 lnk->wr_tx_mask = NULL;
522 kfree(lnk->wr_tx_sges); 540 kfree(lnk->wr_tx_sges);
523 lnk->wr_tx_sges = NULL; 541 lnk->wr_tx_sges = NULL;
542 kfree(lnk->wr_tx_rdma_sges);
543 lnk->wr_tx_rdma_sges = NULL;
524 kfree(lnk->wr_rx_sges); 544 kfree(lnk->wr_rx_sges);
525 lnk->wr_rx_sges = NULL; 545 lnk->wr_rx_sges = NULL;
546 kfree(lnk->wr_tx_rdmas);
547 lnk->wr_tx_rdmas = NULL;
526 kfree(lnk->wr_rx_ibs); 548 kfree(lnk->wr_rx_ibs);
527 lnk->wr_rx_ibs = NULL; 549 lnk->wr_rx_ibs = NULL;
528 kfree(lnk->wr_tx_ibs); 550 kfree(lnk->wr_tx_ibs);
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
552 GFP_KERNEL); 574 GFP_KERNEL);
553 if (!link->wr_rx_ibs) 575 if (!link->wr_rx_ibs)
554 goto no_mem_wr_tx_ibs; 576 goto no_mem_wr_tx_ibs;
577 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
578 sizeof(link->wr_tx_rdmas[0]),
579 GFP_KERNEL);
580 if (!link->wr_tx_rdmas)
581 goto no_mem_wr_rx_ibs;
582 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
583 sizeof(link->wr_tx_rdma_sges[0]),
584 GFP_KERNEL);
585 if (!link->wr_tx_rdma_sges)
586 goto no_mem_wr_tx_rdmas;
555 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), 587 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
556 GFP_KERNEL); 588 GFP_KERNEL);
557 if (!link->wr_tx_sges) 589 if (!link->wr_tx_sges)
558 goto no_mem_wr_rx_ibs; 590 goto no_mem_wr_tx_rdma_sges;
559 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, 591 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
560 sizeof(link->wr_rx_sges[0]), 592 sizeof(link->wr_rx_sges[0]),
561 GFP_KERNEL); 593 GFP_KERNEL);
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges:
579 kfree(link->wr_rx_sges); 611 kfree(link->wr_rx_sges);
580no_mem_wr_tx_sges: 612no_mem_wr_tx_sges:
581 kfree(link->wr_tx_sges); 613 kfree(link->wr_tx_sges);
614no_mem_wr_tx_rdma_sges:
615 kfree(link->wr_tx_rdma_sges);
616no_mem_wr_tx_rdmas:
617 kfree(link->wr_tx_rdmas);
582no_mem_wr_rx_ibs: 618no_mem_wr_rx_ibs:
583 kfree(link->wr_rx_ibs); 619 kfree(link->wr_rx_ibs);
584no_mem_wr_tx_ibs: 620no_mem_wr_tx_ibs:
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 1d85bb14fd6f..09bf32fd3959 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev);
85 85
86int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, 86int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
87 struct smc_wr_buf **wr_buf, 87 struct smc_wr_buf **wr_buf,
88 struct smc_rdma_wr **wrs,
88 struct smc_wr_tx_pend_priv **wr_pend_priv); 89 struct smc_wr_tx_pend_priv **wr_pend_priv);
89int smc_wr_tx_put_slot(struct smc_link *link, 90int smc_wr_tx_put_slot(struct smc_link *link,
90 struct smc_wr_tx_pend_priv *wr_pend_priv); 91 struct smc_wr_tx_pend_priv *wr_pend_priv);
diff --git a/net/socket.c b/net/socket.c
index d51930689b98..643a1648fcc2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -963,8 +963,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
963EXPORT_SYMBOL(dlci_ioctl_set); 963EXPORT_SYMBOL(dlci_ioctl_set);
964 964
965static long sock_do_ioctl(struct net *net, struct socket *sock, 965static long sock_do_ioctl(struct net *net, struct socket *sock,
966 unsigned int cmd, unsigned long arg, 966 unsigned int cmd, unsigned long arg)
967 unsigned int ifreq_size)
968{ 967{
969 int err; 968 int err;
970 void __user *argp = (void __user *)arg; 969 void __user *argp = (void __user *)arg;
@@ -990,11 +989,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
990 } else { 989 } else {
991 struct ifreq ifr; 990 struct ifreq ifr;
992 bool need_copyout; 991 bool need_copyout;
993 if (copy_from_user(&ifr, argp, ifreq_size)) 992 if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
994 return -EFAULT; 993 return -EFAULT;
995 err = dev_ioctl(net, cmd, &ifr, &need_copyout); 994 err = dev_ioctl(net, cmd, &ifr, &need_copyout);
996 if (!err && need_copyout) 995 if (!err && need_copyout)
997 if (copy_to_user(argp, &ifr, ifreq_size)) 996 if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
998 return -EFAULT; 997 return -EFAULT;
999 } 998 }
1000 return err; 999 return err;
@@ -1093,8 +1092,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1093 err = open_related_ns(&net->ns, get_net_ns); 1092 err = open_related_ns(&net->ns, get_net_ns);
1094 break; 1093 break;
1095 default: 1094 default:
1096 err = sock_do_ioctl(net, sock, cmd, arg, 1095 err = sock_do_ioctl(net, sock, cmd, arg);
1097 sizeof(struct ifreq));
1098 break; 1096 break;
1099 } 1097 }
1100 return err; 1098 return err;
@@ -2802,8 +2800,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2802 int err; 2800 int err;
2803 2801
2804 set_fs(KERNEL_DS); 2802 set_fs(KERNEL_DS);
2805 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, 2803 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2806 sizeof(struct compat_ifreq));
2807 set_fs(old_fs); 2804 set_fs(old_fs);
2808 if (!err) 2805 if (!err)
2809 err = compat_put_timeval(&ktv, up); 2806 err = compat_put_timeval(&ktv, up);
@@ -2819,8 +2816,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2819 int err; 2816 int err;
2820 2817
2821 set_fs(KERNEL_DS); 2818 set_fs(KERNEL_DS);
2822 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, 2819 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2823 sizeof(struct compat_ifreq));
2824 set_fs(old_fs); 2820 set_fs(old_fs);
2825 if (!err) 2821 if (!err)
2826 err = compat_put_timespec(&kts, up); 2822 err = compat_put_timespec(&kts, up);
@@ -3016,6 +3012,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
3016 return dev_ioctl(net, cmd, &ifreq, NULL); 3012 return dev_ioctl(net, cmd, &ifreq, NULL);
3017} 3013}
3018 3014
3015static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
3016 unsigned int cmd,
3017 struct compat_ifreq __user *uifr32)
3018{
3019 struct ifreq __user *uifr;
3020 int err;
3021
3022 /* Handle the fact that while struct ifreq has the same *layout* on
3023 * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
3024 * which are handled elsewhere, it still has different *size* due to
3025 * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
3026 * resulting in struct ifreq being 32 and 40 bytes respectively).
3027 * As a result, if the struct happens to be at the end of a page and
3028 * the next page isn't readable/writable, we get a fault. To prevent
3029 * that, copy back and forth to the full size.
3030 */
3031
3032 uifr = compat_alloc_user_space(sizeof(*uifr));
3033 if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
3034 return -EFAULT;
3035
3036 err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
3037
3038 if (!err) {
3039 switch (cmd) {
3040 case SIOCGIFFLAGS:
3041 case SIOCGIFMETRIC:
3042 case SIOCGIFMTU:
3043 case SIOCGIFMEM:
3044 case SIOCGIFHWADDR:
3045 case SIOCGIFINDEX:
3046 case SIOCGIFADDR:
3047 case SIOCGIFBRDADDR:
3048 case SIOCGIFDSTADDR:
3049 case SIOCGIFNETMASK:
3050 case SIOCGIFPFLAGS:
3051 case SIOCGIFTXQLEN:
3052 case SIOCGMIIPHY:
3053 case SIOCGMIIREG:
3054 case SIOCGIFNAME:
3055 if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
3056 err = -EFAULT;
3057 break;
3058 }
3059 }
3060 return err;
3061}
3062
3019static int compat_sioc_ifmap(struct net *net, unsigned int cmd, 3063static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
3020 struct compat_ifreq __user *uifr32) 3064 struct compat_ifreq __user *uifr32)
3021{ 3065{
@@ -3131,8 +3175,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
3131 } 3175 }
3132 3176
3133 set_fs(KERNEL_DS); 3177 set_fs(KERNEL_DS);
3134 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, 3178 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
3135 sizeof(struct compat_ifreq));
3136 set_fs(old_fs); 3179 set_fs(old_fs);
3137 3180
3138out: 3181out:
@@ -3232,21 +3275,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3232 case SIOCSIFTXQLEN: 3275 case SIOCSIFTXQLEN:
3233 case SIOCBRADDIF: 3276 case SIOCBRADDIF:
3234 case SIOCBRDELIF: 3277 case SIOCBRDELIF:
3278 case SIOCGIFNAME:
3235 case SIOCSIFNAME: 3279 case SIOCSIFNAME:
3236 case SIOCGMIIPHY: 3280 case SIOCGMIIPHY:
3237 case SIOCGMIIREG: 3281 case SIOCGMIIREG:
3238 case SIOCSMIIREG: 3282 case SIOCSMIIREG:
3239 case SIOCSARP:
3240 case SIOCGARP:
3241 case SIOCDARP:
3242 case SIOCATMARK:
3243 case SIOCBONDENSLAVE: 3283 case SIOCBONDENSLAVE:
3244 case SIOCBONDRELEASE: 3284 case SIOCBONDRELEASE:
3245 case SIOCBONDSETHWADDR: 3285 case SIOCBONDSETHWADDR:
3246 case SIOCBONDCHANGEACTIVE: 3286 case SIOCBONDCHANGEACTIVE:
3247 case SIOCGIFNAME: 3287 return compat_ifreq_ioctl(net, sock, cmd, argp);
3248 return sock_do_ioctl(net, sock, cmd, arg, 3288
3249 sizeof(struct compat_ifreq)); 3289 case SIOCSARP:
3290 case SIOCGARP:
3291 case SIOCDARP:
3292 case SIOCATMARK:
3293 return sock_do_ioctl(net, sock, cmd, arg);
3250 } 3294 }
3251 3295
3252 return -ENOIOCTLCMD; 3296 return -ENOIOCTLCMD;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index cf51b8f9b15f..1f200119268c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
537 DMA_TO_DEVICE); 537 DMA_TO_DEVICE);
538} 538}
539 539
540/* If the xdr_buf has more elements than the device can
541 * transmit in a single RDMA Send, then the reply will
542 * have to be copied into a bounce buffer.
543 */
544static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
545 struct xdr_buf *xdr,
546 __be32 *wr_lst)
547{
548 int elements;
549
550 /* xdr->head */
551 elements = 1;
552
553 /* xdr->pages */
554 if (!wr_lst) {
555 unsigned int remaining;
556 unsigned long pageoff;
557
558 pageoff = xdr->page_base & ~PAGE_MASK;
559 remaining = xdr->page_len;
560 while (remaining) {
561 ++elements;
562 remaining -= min_t(u32, PAGE_SIZE - pageoff,
563 remaining);
564 pageoff = 0;
565 }
566 }
567
568 /* xdr->tail */
569 if (xdr->tail[0].iov_len)
570 ++elements;
571
572 /* assume 1 SGE is needed for the transport header */
573 return elements >= rdma->sc_max_send_sges;
574}
575
576/* The device is not capable of sending the reply directly.
577 * Assemble the elements of @xdr into the transport header
578 * buffer.
579 */
580static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
581 struct svc_rdma_send_ctxt *ctxt,
582 struct xdr_buf *xdr, __be32 *wr_lst)
583{
584 unsigned char *dst, *tailbase;
585 unsigned int taillen;
586
587 dst = ctxt->sc_xprt_buf;
588 dst += ctxt->sc_sges[0].length;
589
590 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
591 dst += xdr->head[0].iov_len;
592
593 tailbase = xdr->tail[0].iov_base;
594 taillen = xdr->tail[0].iov_len;
595 if (wr_lst) {
596 u32 xdrpad;
597
598 xdrpad = xdr_padsize(xdr->page_len);
599 if (taillen && xdrpad) {
600 tailbase += xdrpad;
601 taillen -= xdrpad;
602 }
603 } else {
604 unsigned int len, remaining;
605 unsigned long pageoff;
606 struct page **ppages;
607
608 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
609 pageoff = xdr->page_base & ~PAGE_MASK;
610 remaining = xdr->page_len;
611 while (remaining) {
612 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
613
614 memcpy(dst, page_address(*ppages), len);
615 remaining -= len;
616 dst += len;
617 pageoff = 0;
618 }
619 }
620
621 if (taillen)
622 memcpy(dst, tailbase, taillen);
623
624 ctxt->sc_sges[0].length += xdr->len;
625 ib_dma_sync_single_for_device(rdma->sc_pd->device,
626 ctxt->sc_sges[0].addr,
627 ctxt->sc_sges[0].length,
628 DMA_TO_DEVICE);
629
630 return 0;
631}
632
540/* svc_rdma_map_reply_msg - Map the buffer holding RPC message 633/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
541 * @rdma: controlling transport 634 * @rdma: controlling transport
542 * @ctxt: send_ctxt for the Send WR 635 * @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
559 u32 xdr_pad; 652 u32 xdr_pad;
560 int ret; 653 int ret;
561 654
562 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 655 if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
563 return -EIO; 656 return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
657
658 ++ctxt->sc_cur_sge_no;
564 ret = svc_rdma_dma_map_buf(rdma, ctxt, 659 ret = svc_rdma_dma_map_buf(rdma, ctxt,
565 xdr->head[0].iov_base, 660 xdr->head[0].iov_base,
566 xdr->head[0].iov_len); 661 xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
591 while (remaining) { 686 while (remaining) {
592 len = min_t(u32, PAGE_SIZE - page_off, remaining); 687 len = min_t(u32, PAGE_SIZE - page_off, remaining);
593 688
594 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 689 ++ctxt->sc_cur_sge_no;
595 return -EIO;
596 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, 690 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
597 page_off, len); 691 page_off, len);
598 if (ret < 0) 692 if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
606 len = xdr->tail[0].iov_len; 700 len = xdr->tail[0].iov_len;
607tail: 701tail:
608 if (len) { 702 if (len) {
609 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 703 ++ctxt->sc_cur_sge_no;
610 return -EIO;
611 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); 704 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
612 if (ret < 0) 705 if (ret < 0)
613 return ret; 706 return ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 924c17d46903..57f86c63a463 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
419 /* Transport header, head iovec, tail iovec */ 419 /* Transport header, head iovec, tail iovec */
420 newxprt->sc_max_send_sges = 3; 420 newxprt->sc_max_send_sges = 3;
421 /* Add one SGE per page list entry */ 421 /* Add one SGE per page list entry */
422 newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; 422 newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { 423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
424 pr_err("svcrdma: too few Send SGEs available (%d needed)\n", 424 newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
425 newxprt->sc_max_send_sges);
426 goto errout;
427 }
428 newxprt->sc_max_req_size = svcrdma_max_req_size; 425 newxprt->sc_max_req_size = svcrdma_max_req_size;
429 newxprt->sc_max_requests = svcrdma_max_requests; 426 newxprt->sc_max_requests = svcrdma_max_requests;
430 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; 427 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9e8744..15eb5d3d4750 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
75{ 75{
76 struct virtio_vsock *vsock = virtio_vsock_get(); 76 struct virtio_vsock *vsock = virtio_vsock_get();
77 77
78 if (!vsock)
79 return VMADDR_CID_ANY;
80
78 return vsock->guest_cid; 81 return vsock->guest_cid;
79} 82}
80 83
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
584 587
585 virtio_vsock_update_guest_cid(vsock); 588 virtio_vsock_update_guest_cid(vsock);
586 589
587 ret = vsock_core_init(&virtio_transport.transport);
588 if (ret < 0)
589 goto out_vqs;
590
591 vsock->rx_buf_nr = 0; 590 vsock->rx_buf_nr = 0;
592 vsock->rx_buf_max_nr = 0; 591 vsock->rx_buf_max_nr = 0;
593 atomic_set(&vsock->queued_replies, 0); 592 atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
618 mutex_unlock(&the_virtio_vsock_mutex); 617 mutex_unlock(&the_virtio_vsock_mutex);
619 return 0; 618 return 0;
620 619
621out_vqs:
622 vsock->vdev->config->del_vqs(vsock->vdev);
623out: 620out:
624 kfree(vsock); 621 kfree(vsock);
625 mutex_unlock(&the_virtio_vsock_mutex); 622 mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
637 flush_work(&vsock->event_work); 634 flush_work(&vsock->event_work);
638 flush_work(&vsock->send_pkt_work); 635 flush_work(&vsock->send_pkt_work);
639 636
637 /* Reset all connected sockets when the device disappear */
638 vsock_for_each_connected_socket(virtio_vsock_reset_sock);
639
640 vdev->config->reset(vdev); 640 vdev->config->reset(vdev);
641 641
642 mutex_lock(&vsock->rx_lock); 642 mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
669 669
670 mutex_lock(&the_virtio_vsock_mutex); 670 mutex_lock(&the_virtio_vsock_mutex);
671 the_virtio_vsock = NULL; 671 the_virtio_vsock = NULL;
672 vsock_core_exit();
673 mutex_unlock(&the_virtio_vsock_mutex); 672 mutex_unlock(&the_virtio_vsock_mutex);
674 673
675 vdev->config->del_vqs(vdev); 674 vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
702 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); 701 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
703 if (!virtio_vsock_workqueue) 702 if (!virtio_vsock_workqueue)
704 return -ENOMEM; 703 return -ENOMEM;
704
705 ret = register_virtio_driver(&virtio_vsock_driver); 705 ret = register_virtio_driver(&virtio_vsock_driver);
706 if (ret) 706 if (ret)
707 destroy_workqueue(virtio_vsock_workqueue); 707 goto out_wq;
708
709 ret = vsock_core_init(&virtio_transport.transport);
710 if (ret)
711 goto out_vdr;
712
713 return 0;
714
715out_vdr:
716 unregister_virtio_driver(&virtio_vsock_driver);
717out_wq:
718 destroy_workqueue(virtio_vsock_workqueue);
708 return ret; 719 return ret;
720
709} 721}
710 722
711static void __exit virtio_vsock_exit(void) 723static void __exit virtio_vsock_exit(void)
712{ 724{
725 vsock_core_exit();
713 unregister_virtio_driver(&virtio_vsock_driver); 726 unregister_virtio_driver(&virtio_vsock_driver);
714 destroy_workqueue(virtio_vsock_workqueue); 727 destroy_workqueue(virtio_vsock_workqueue);
715} 728}
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 882d97bdc6bf..550ac9d827fe 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
41 cfg80211_sched_dfs_chan_update(rdev); 41 cfg80211_sched_dfs_chan_update(rdev);
42 } 42 }
43 43
44 schedule_work(&cfg80211_disconnect_work);
45
44 return err; 46 return err;
45} 47}
46 48
diff --git a/net/wireless/core.h b/net/wireless/core.h
index c5d6f3418601..f6b40563dc63 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
445bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, 445bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
446 u32 center_freq_khz, u32 bw_khz); 446 u32 center_freq_khz, u32 bw_khz);
447 447
448extern struct work_struct cfg80211_disconnect_work;
449
448/** 450/**
449 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable 451 * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
450 * @wiphy: the wiphy to validate against 452 * @wiphy: the wiphy to validate against
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f741d8376a46..7d34cb884840 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
667 rtnl_unlock(); 667 rtnl_unlock();
668} 668}
669 669
670static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); 670DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
671 671
672 672
673/* 673/*
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 33e67bd1dc34..32234481ad7d 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
117 117
118 me->verbose = verbose; 118 me->verbose = verbose;
119 119
120 me->fd = open("/dev/mei", O_RDWR); 120 me->fd = open("/dev/mei0", O_RDWR);
121 if (me->fd == -1) { 121 if (me->fd == -1) {
122 mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); 122 mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
123 goto err; 123 goto err;
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de0ffda..11975ec8d566 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1444,7 +1444,10 @@ check:
1444 new = aa_label_merge(label, target, GFP_KERNEL); 1444 new = aa_label_merge(label, target, GFP_KERNEL);
1445 if (IS_ERR_OR_NULL(new)) { 1445 if (IS_ERR_OR_NULL(new)) {
1446 info = "failed to build target label"; 1446 info = "failed to build target label";
1447 error = PTR_ERR(new); 1447 if (!new)
1448 error = -ENOMEM;
1449 else
1450 error = PTR_ERR(new);
1448 new = NULL; 1451 new = NULL;
1449 perms.allow = 0; 1452 perms.allow = 0;
1450 goto audit; 1453 goto audit;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 2c010874329f..8db1731d046a 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1599,12 +1599,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
1599 return apparmor_ip_postroute(priv, skb, state); 1599 return apparmor_ip_postroute(priv, skb, state);
1600} 1600}
1601 1601
1602#if IS_ENABLED(CONFIG_IPV6)
1602static unsigned int apparmor_ipv6_postroute(void *priv, 1603static unsigned int apparmor_ipv6_postroute(void *priv,
1603 struct sk_buff *skb, 1604 struct sk_buff *skb,
1604 const struct nf_hook_state *state) 1605 const struct nf_hook_state *state)
1605{ 1606{
1606 return apparmor_ip_postroute(priv, skb, state); 1607 return apparmor_ip_postroute(priv, skb, state);
1607} 1608}
1609#endif
1608 1610
1609static const struct nf_hook_ops apparmor_nf_ops[] = { 1611static const struct nf_hook_ops apparmor_nf_ops[] = {
1610 { 1612 {
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 40013b26f671..6c99fa8ac5fa 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2112,6 +2112,13 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2112 return 0; 2112 return 0;
2113} 2113}
2114 2114
2115/* allow waiting for a capture stream that hasn't been started */
2116#if IS_ENABLED(CONFIG_SND_PCM_OSS)
2117#define wait_capture_start(substream) ((substream)->oss.oss)
2118#else
2119#define wait_capture_start(substream) false
2120#endif
2121
2115/* the common loop for read/write data */ 2122/* the common loop for read/write data */
2116snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2123snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2117 void *data, bool interleaved, 2124 void *data, bool interleaved,
@@ -2182,7 +2189,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2182 err = snd_pcm_start(substream); 2189 err = snd_pcm_start(substream);
2183 if (err < 0) 2190 if (err < 0)
2184 goto _end_unlock; 2191 goto _end_unlock;
2185 } else { 2192 } else if (!wait_capture_start(substream)) {
2186 /* nothing to do */ 2193 /* nothing to do */
2187 err = 0; 2194 err = 0;
2188 goto _end_unlock; 2195 goto _end_unlock;
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 9174f1b3a987..1ec706ced75c 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev)
115 err = snd_hda_codec_build_controls(codec); 115 err = snd_hda_codec_build_controls(codec);
116 if (err < 0) 116 if (err < 0)
117 goto error_module; 117 goto error_module;
118 if (codec->card->registered) { 118 /* only register after the bus probe finished; otherwise it's racy */
119 if (!codec->bus->bus_probing && codec->card->registered) {
119 err = snd_card_register(codec->card); 120 err = snd_card_register(codec->card);
120 if (err < 0) 121 if (err < 0)
121 goto error_module; 122 goto error_module;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e784130ea4e0..e5c49003e75f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip)
2185 int dev = chip->dev_index; 2185 int dev = chip->dev_index;
2186 int err; 2186 int err;
2187 2187
2188 to_hda_bus(bus)->bus_probing = 1;
2188 hda->probe_continued = 1; 2189 hda->probe_continued = 1;
2189 2190
2190 /* bind with i915 if needed */ 2191 /* bind with i915 if needed */
@@ -2269,6 +2270,7 @@ out_free:
2269 if (err < 0) 2270 if (err < 0)
2270 hda->init_failed = 1; 2271 hda->init_failed = 1;
2271 complete_all(&hda->probe_wait); 2272 complete_all(&hda->probe_wait);
2273 to_hda_bus(bus)->bus_probing = 0;
2272 return err; 2274 return err;
2273} 2275}
2274 2276
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index e5bdbc245682..29882bda7632 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec)
8451 ca0132_exit_chip(codec); 8451 ca0132_exit_chip(codec);
8452 8452
8453 snd_hda_power_down(codec); 8453 snd_hda_power_down(codec);
8454 if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) 8454#ifdef CONFIG_PCI
8455 if (spec->mem_base)
8455 pci_iounmap(codec->bus->pci, spec->mem_base); 8456 pci_iounmap(codec->bus->pci, spec->mem_base);
8457#endif
8456 kfree(spec->spec_init_verbs); 8458 kfree(spec->spec_init_verbs);
8457 kfree(codec->spec); 8459 kfree(codec->spec);
8458} 8460}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b4f472157ebd..6df758adff84 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -117,6 +117,7 @@ struct alc_spec {
117 int codec_variant; /* flag for other variants */ 117 int codec_variant; /* flag for other variants */
118 unsigned int has_alc5505_dsp:1; 118 unsigned int has_alc5505_dsp:1;
119 unsigned int no_depop_delay:1; 119 unsigned int no_depop_delay:1;
120 unsigned int done_hp_init:1;
120 121
121 /* for PLL fix */ 122 /* for PLL fix */
122 hda_nid_t pll_nid; 123 hda_nid_t pll_nid;
@@ -514,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
514 } 515 }
515} 516}
516 517
518/* get a primary headphone pin if available */
519static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
520{
521 if (spec->gen.autocfg.hp_pins[0])
522 return spec->gen.autocfg.hp_pins[0];
523 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
524 return spec->gen.autocfg.line_out_pins[0];
525 return 0;
526}
517 527
518/* 528/*
519 * Realtek SSID verification 529 * Realtek SSID verification
@@ -724,9 +734,7 @@ do_sku:
724 * 15 : 1 --> enable the function "Mute internal speaker 734 * 15 : 1 --> enable the function "Mute internal speaker
725 * when the external headphone out jack is plugged" 735 * when the external headphone out jack is plugged"
726 */ 736 */
727 if (!spec->gen.autocfg.hp_pins[0] && 737 if (!alc_get_hp_pin(spec)) {
728 !(spec->gen.autocfg.line_out_pins[0] &&
729 spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
730 hda_nid_t nid; 738 hda_nid_t nid;
731 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 739 tmp = (ass >> 11) & 0x3; /* HP to chassis */
732 nid = ports[tmp]; 740 nid = ports[tmp];
@@ -2958,7 +2966,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
2958static void alc282_init(struct hda_codec *codec) 2966static void alc282_init(struct hda_codec *codec)
2959{ 2967{
2960 struct alc_spec *spec = codec->spec; 2968 struct alc_spec *spec = codec->spec;
2961 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 2969 hda_nid_t hp_pin = alc_get_hp_pin(spec);
2962 bool hp_pin_sense; 2970 bool hp_pin_sense;
2963 int coef78; 2971 int coef78;
2964 2972
@@ -2995,7 +3003,7 @@ static void alc282_init(struct hda_codec *codec)
2995static void alc282_shutup(struct hda_codec *codec) 3003static void alc282_shutup(struct hda_codec *codec)
2996{ 3004{
2997 struct alc_spec *spec = codec->spec; 3005 struct alc_spec *spec = codec->spec;
2998 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3006 hda_nid_t hp_pin = alc_get_hp_pin(spec);
2999 bool hp_pin_sense; 3007 bool hp_pin_sense;
3000 int coef78; 3008 int coef78;
3001 3009
@@ -3073,14 +3081,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
3073static void alc283_init(struct hda_codec *codec) 3081static void alc283_init(struct hda_codec *codec)
3074{ 3082{
3075 struct alc_spec *spec = codec->spec; 3083 struct alc_spec *spec = codec->spec;
3076 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3084 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3077 bool hp_pin_sense; 3085 bool hp_pin_sense;
3078 3086
3079 if (!spec->gen.autocfg.hp_outs) {
3080 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
3081 hp_pin = spec->gen.autocfg.line_out_pins[0];
3082 }
3083
3084 alc283_restore_default_value(codec); 3087 alc283_restore_default_value(codec);
3085 3088
3086 if (!hp_pin) 3089 if (!hp_pin)
@@ -3114,14 +3117,9 @@ static void alc283_init(struct hda_codec *codec)
3114static void alc283_shutup(struct hda_codec *codec) 3117static void alc283_shutup(struct hda_codec *codec)
3115{ 3118{
3116 struct alc_spec *spec = codec->spec; 3119 struct alc_spec *spec = codec->spec;
3117 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3120 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3118 bool hp_pin_sense; 3121 bool hp_pin_sense;
3119 3122
3120 if (!spec->gen.autocfg.hp_outs) {
3121 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
3122 hp_pin = spec->gen.autocfg.line_out_pins[0];
3123 }
3124
3125 if (!hp_pin) { 3123 if (!hp_pin) {
3126 alc269_shutup(codec); 3124 alc269_shutup(codec);
3127 return; 3125 return;
@@ -3155,7 +3153,7 @@ static void alc283_shutup(struct hda_codec *codec)
3155static void alc256_init(struct hda_codec *codec) 3153static void alc256_init(struct hda_codec *codec)
3156{ 3154{
3157 struct alc_spec *spec = codec->spec; 3155 struct alc_spec *spec = codec->spec;
3158 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3156 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3159 bool hp_pin_sense; 3157 bool hp_pin_sense;
3160 3158
3161 if (!hp_pin) 3159 if (!hp_pin)
@@ -3191,7 +3189,7 @@ static void alc256_init(struct hda_codec *codec)
3191static void alc256_shutup(struct hda_codec *codec) 3189static void alc256_shutup(struct hda_codec *codec)
3192{ 3190{
3193 struct alc_spec *spec = codec->spec; 3191 struct alc_spec *spec = codec->spec;
3194 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3192 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3195 bool hp_pin_sense; 3193 bool hp_pin_sense;
3196 3194
3197 if (!hp_pin) { 3195 if (!hp_pin) {
@@ -3227,7 +3225,7 @@ static void alc256_shutup(struct hda_codec *codec)
3227static void alc225_init(struct hda_codec *codec) 3225static void alc225_init(struct hda_codec *codec)
3228{ 3226{
3229 struct alc_spec *spec = codec->spec; 3227 struct alc_spec *spec = codec->spec;
3230 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3228 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3231 bool hp1_pin_sense, hp2_pin_sense; 3229 bool hp1_pin_sense, hp2_pin_sense;
3232 3230
3233 if (!hp_pin) 3231 if (!hp_pin)
@@ -3270,7 +3268,7 @@ static void alc225_init(struct hda_codec *codec)
3270static void alc225_shutup(struct hda_codec *codec) 3268static void alc225_shutup(struct hda_codec *codec)
3271{ 3269{
3272 struct alc_spec *spec = codec->spec; 3270 struct alc_spec *spec = codec->spec;
3273 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3271 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3274 bool hp1_pin_sense, hp2_pin_sense; 3272 bool hp1_pin_sense, hp2_pin_sense;
3275 3273
3276 if (!hp_pin) { 3274 if (!hp_pin) {
@@ -3314,7 +3312,7 @@ static void alc225_shutup(struct hda_codec *codec)
3314static void alc_default_init(struct hda_codec *codec) 3312static void alc_default_init(struct hda_codec *codec)
3315{ 3313{
3316 struct alc_spec *spec = codec->spec; 3314 struct alc_spec *spec = codec->spec;
3317 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3315 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3318 bool hp_pin_sense; 3316 bool hp_pin_sense;
3319 3317
3320 if (!hp_pin) 3318 if (!hp_pin)
@@ -3343,7 +3341,7 @@ static void alc_default_init(struct hda_codec *codec)
3343static void alc_default_shutup(struct hda_codec *codec) 3341static void alc_default_shutup(struct hda_codec *codec)
3344{ 3342{
3345 struct alc_spec *spec = codec->spec; 3343 struct alc_spec *spec = codec->spec;
3346 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3344 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3347 bool hp_pin_sense; 3345 bool hp_pin_sense;
3348 3346
3349 if (!hp_pin) { 3347 if (!hp_pin) {
@@ -3372,6 +3370,48 @@ static void alc_default_shutup(struct hda_codec *codec)
3372 snd_hda_shutup_pins(codec); 3370 snd_hda_shutup_pins(codec);
3373} 3371}
3374 3372
3373static void alc294_hp_init(struct hda_codec *codec)
3374{
3375 struct alc_spec *spec = codec->spec;
3376 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3377 int i, val;
3378
3379 if (!hp_pin)
3380 return;
3381
3382 snd_hda_codec_write(codec, hp_pin, 0,
3383 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
3384
3385 msleep(100);
3386
3387 snd_hda_codec_write(codec, hp_pin, 0,
3388 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
3389
3390 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
3391 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
3392
3393 /* Wait for depop procedure finish */
3394 val = alc_read_coefex_idx(codec, 0x58, 0x01);
3395 for (i = 0; i < 20 && val & 0x0080; i++) {
3396 msleep(50);
3397 val = alc_read_coefex_idx(codec, 0x58, 0x01);
3398 }
3399 /* Set HP depop to auto mode */
3400 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
3401 msleep(50);
3402}
3403
3404static void alc294_init(struct hda_codec *codec)
3405{
3406 struct alc_spec *spec = codec->spec;
3407
3408 if (!spec->done_hp_init) {
3409 alc294_hp_init(codec);
3410 spec->done_hp_init = true;
3411 }
3412 alc_default_init(codec);
3413}
3414
3375static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, 3415static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
3376 unsigned int val) 3416 unsigned int val)
3377{ 3417{
@@ -4737,7 +4777,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
4737 struct alc_spec *spec = codec->spec; 4777 struct alc_spec *spec = codec->spec;
4738 4778
4739 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; 4779 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
4740 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 4780 hda_nid_t hp_pin = alc_get_hp_pin(spec);
4741 4781
4742 int new_headset_mode; 4782 int new_headset_mode;
4743 4783
@@ -5016,7 +5056,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
5016static void alc_shutup_dell_xps13(struct hda_codec *codec) 5056static void alc_shutup_dell_xps13(struct hda_codec *codec)
5017{ 5057{
5018 struct alc_spec *spec = codec->spec; 5058 struct alc_spec *spec = codec->spec;
5019 int hp_pin = spec->gen.autocfg.hp_pins[0]; 5059 int hp_pin = alc_get_hp_pin(spec);
5020 5060
5021 /* Prevent pop noises when headphones are plugged in */ 5061 /* Prevent pop noises when headphones are plugged in */
5022 snd_hda_codec_write(codec, hp_pin, 0, 5062 snd_hda_codec_write(codec, hp_pin, 0,
@@ -5109,7 +5149,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
5109 5149
5110 if (action == HDA_FIXUP_ACT_PROBE) { 5150 if (action == HDA_FIXUP_ACT_PROBE) {
5111 int mic_pin = find_ext_mic_pin(codec); 5151 int mic_pin = find_ext_mic_pin(codec);
5112 int hp_pin = spec->gen.autocfg.hp_pins[0]; 5152 int hp_pin = alc_get_hp_pin(spec);
5113 5153
5114 if (snd_BUG_ON(!mic_pin || !hp_pin)) 5154 if (snd_BUG_ON(!mic_pin || !hp_pin))
5115 return; 5155 return;
@@ -5591,6 +5631,7 @@ enum {
5591 ALC294_FIXUP_ASUS_HEADSET_MIC, 5631 ALC294_FIXUP_ASUS_HEADSET_MIC,
5592 ALC294_FIXUP_ASUS_SPK, 5632 ALC294_FIXUP_ASUS_SPK,
5593 ALC225_FIXUP_HEADSET_JACK, 5633 ALC225_FIXUP_HEADSET_JACK,
5634 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5594}; 5635};
5595 5636
5596static const struct hda_fixup alc269_fixups[] = { 5637static const struct hda_fixup alc269_fixups[] = {
@@ -6537,6 +6578,15 @@ static const struct hda_fixup alc269_fixups[] = {
6537 .type = HDA_FIXUP_FUNC, 6578 .type = HDA_FIXUP_FUNC,
6538 .v.func = alc_fixup_headset_jack, 6579 .v.func = alc_fixup_headset_jack,
6539 }, 6580 },
6581 [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
6582 .type = HDA_FIXUP_PINS,
6583 .v.pins = (const struct hda_pintbl[]) {
6584 { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6585 { }
6586 },
6587 .chained = true,
6588 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6589 },
6540}; 6590};
6541 6591
6542static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6592static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6715,6 +6765,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6715 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 6765 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
6716 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 6766 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
6717 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), 6767 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
6768 SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
6718 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 6769 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
6719 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 6770 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
6720 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 6771 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -7373,37 +7424,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
7373 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 7424 alc_update_coef_idx(codec, 0x4, 0, 1<<11);
7374} 7425}
7375 7426
7376static void alc294_hp_init(struct hda_codec *codec)
7377{
7378 struct alc_spec *spec = codec->spec;
7379 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
7380 int i, val;
7381
7382 if (!hp_pin)
7383 return;
7384
7385 snd_hda_codec_write(codec, hp_pin, 0,
7386 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
7387
7388 msleep(100);
7389
7390 snd_hda_codec_write(codec, hp_pin, 0,
7391 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
7392
7393 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
7394 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
7395
7396 /* Wait for depop procedure finish */
7397 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7398 for (i = 0; i < 20 && val & 0x0080; i++) {
7399 msleep(50);
7400 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7401 }
7402 /* Set HP depop to auto mode */
7403 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
7404 msleep(50);
7405}
7406
7407/* 7427/*
7408 */ 7428 */
7409static int patch_alc269(struct hda_codec *codec) 7429static int patch_alc269(struct hda_codec *codec)
@@ -7529,7 +7549,7 @@ static int patch_alc269(struct hda_codec *codec)
7529 spec->codec_variant = ALC269_TYPE_ALC294; 7549 spec->codec_variant = ALC269_TYPE_ALC294;
7530 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7550 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
7531 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7551 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
7532 alc294_hp_init(codec); 7552 spec->init_hook = alc294_init;
7533 break; 7553 break;
7534 case 0x10ec0300: 7554 case 0x10ec0300:
7535 spec->codec_variant = ALC269_TYPE_ALC300; 7555 spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7541,7 +7561,7 @@ static int patch_alc269(struct hda_codec *codec)
7541 spec->codec_variant = ALC269_TYPE_ALC700; 7561 spec->codec_variant = ALC269_TYPE_ALC700;
7542 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ 7562 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7543 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ 7563 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7544 alc294_hp_init(codec); 7564 spec->init_hook = alc294_init;
7545 break; 7565 break;
7546 7566
7547 } 7567 }
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ebbadb3a7094..7e65fe853ee3 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1492,6 +1492,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1493 break; 1493 break;
1494 1494
1495 case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
1495 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ 1496 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
1496 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1497 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
1497 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ 1498 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
@@ -1566,6 +1567,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1566 case 0x20b1: /* XMOS based devices */ 1567 case 0x20b1: /* XMOS based devices */
1567 case 0x152a: /* Thesycon devices */ 1568 case 0x152a: /* Thesycon devices */
1568 case 0x25ce: /* Mytek devices */ 1569 case 0x25ce: /* Mytek devices */
1570 case 0x2ab6: /* T+A devices */
1569 if (fp->dsd_raw) 1571 if (fp->dsd_raw)
1570 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1572 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1571 break; 1573 break;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 897483457bf0..f7261fad45c1 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key)
297 snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); 297 snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
298 298
299 fdi = fopen(path, "r"); 299 fdi = fopen(path, "r");
300 if (!fdi) { 300 if (!fdi)
301 p_err("can't open fdinfo: %s", strerror(errno));
302 return NULL; 301 return NULL;
303 }
304 302
305 while ((n = getline(&line, &line_n, fdi)) > 0) { 303 while ((n = getline(&line, &line_n, fdi)) > 0) {
306 char *value; 304 char *value;
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key)
313 311
314 value = strchr(line, '\t'); 312 value = strchr(line, '\t');
315 if (!value || !value[1]) { 313 if (!value || !value[1]) {
316 p_err("malformed fdinfo!?");
317 free(line); 314 free(line);
318 return NULL; 315 return NULL;
319 } 316 }
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key)
326 return line; 323 return line;
327 } 324 }
328 325
329 p_err("key '%s' not found in fdinfo", key);
330 free(line); 326 free(line);
331 fclose(fdi); 327 fclose(fdi);
332 return NULL; 328 return NULL;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 2160a8ef17e5..e0c650d91784 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -358,6 +358,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
358 return argv + i; 358 return argv + i;
359} 359}
360 360
361/* on per cpu maps we must copy the provided value on all value instances */
362static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
363{
364 unsigned int i, n, step;
365
366 if (!map_is_per_cpu(info->type))
367 return;
368
369 n = get_possible_cpus();
370 step = round_up(info->value_size, 8);
371 for (i = 1; i < n; i++)
372 memcpy(value + i * step, value, info->value_size);
373}
374
361static int parse_elem(char **argv, struct bpf_map_info *info, 375static int parse_elem(char **argv, struct bpf_map_info *info,
362 void *key, void *value, __u32 key_size, __u32 value_size, 376 void *key, void *value, __u32 key_size, __u32 value_size,
363 __u32 *flags, __u32 **value_fd) 377 __u32 *flags, __u32 **value_fd)
@@ -440,6 +454,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
440 argv = parse_bytes(argv, "value", value, value_size); 454 argv = parse_bytes(argv, "value", value, value_size);
441 if (!argv) 455 if (!argv)
442 return -1; 456 return -1;
457
458 fill_per_cpu_value(info, value);
443 } 459 }
444 460
445 return parse_elem(argv, info, key, NULL, key_size, value_size, 461 return parse_elem(argv, info, key, NULL, key_size, value_size,
@@ -511,10 +527,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
511 jsonw_uint_field(json_wtr, "owner_prog_type", 527 jsonw_uint_field(json_wtr, "owner_prog_type",
512 prog_type); 528 prog_type);
513 } 529 }
514 if (atoi(owner_jited)) 530 if (owner_jited)
515 jsonw_bool_field(json_wtr, "owner_jited", true); 531 jsonw_bool_field(json_wtr, "owner_jited",
516 else 532 !!atoi(owner_jited));
517 jsonw_bool_field(json_wtr, "owner_jited", false);
518 533
519 free(owner_prog_type); 534 free(owner_prog_type);
520 free(owner_jited); 535 free(owner_jited);
@@ -567,7 +582,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
567 char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); 582 char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
568 char *owner_jited = get_fdinfo(fd, "owner_jited"); 583 char *owner_jited = get_fdinfo(fd, "owner_jited");
569 584
570 printf("\n\t"); 585 if (owner_prog_type || owner_jited)
586 printf("\n\t");
571 if (owner_prog_type) { 587 if (owner_prog_type) {
572 unsigned int prog_type = atoi(owner_prog_type); 588 unsigned int prog_type = atoi(owner_prog_type);
573 589
@@ -577,10 +593,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
577 else 593 else
578 printf("owner_prog_type %d ", prog_type); 594 printf("owner_prog_type %d ", prog_type);
579 } 595 }
580 if (atoi(owner_jited)) 596 if (owner_jited)
581 printf("owner jited"); 597 printf("owner%s jited",
582 else 598 atoi(owner_jited) ? "" : " not");
583 printf("owner not jited");
584 599
585 free(owner_prog_type); 600 free(owner_prog_type);
586 free(owner_jited); 601 free(owner_jited);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 0640e9bc0ada..33ed0806ccc0 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
78 78
79static int prog_fd_by_tag(unsigned char *tag) 79static int prog_fd_by_tag(unsigned char *tag)
80{ 80{
81 struct bpf_prog_info info = {};
82 __u32 len = sizeof(info);
83 unsigned int id = 0; 81 unsigned int id = 0;
84 int err; 82 int err;
85 int fd; 83 int fd;
86 84
87 while (true) { 85 while (true) {
86 struct bpf_prog_info info = {};
87 __u32 len = sizeof(info);
88
88 err = bpf_prog_get_next_id(id, &id); 89 err = bpf_prog_get_next_id(id, &id);
89 if (err) { 90 if (err) {
90 p_err("%s", strerror(errno)); 91 p_err("%s", strerror(errno));
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 3040830d7797..84545666a09c 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
330 330
331int main(int argc, char **argv) 331int main(int argc, char **argv)
332{ 332{
333 unsigned long long num_loops = 2; 333 long long num_loops = 2;
334 unsigned long timedelay = 1000000; 334 unsigned long timedelay = 1000000;
335 unsigned long buf_len = 128; 335 unsigned long buf_len = 128;
336 336
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d079f36d342d..ac221f137ed2 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1681,13 +1681,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
1681 .force_header = false, 1681 .force_header = false,
1682 }; 1682 };
1683 struct perf_evsel *ev2; 1683 struct perf_evsel *ev2;
1684 static bool init;
1685 u64 val; 1684 u64 val;
1686 1685
1687 if (!init) {
1688 perf_stat__init_shadow_stats();
1689 init = true;
1690 }
1691 if (!evsel->stats) 1686 if (!evsel->stats)
1692 perf_evlist__alloc_stats(script->session->evlist, false); 1687 perf_evlist__alloc_stats(script->session->evlist, false);
1693 if (evsel_script(evsel->leader)->gnum++ == 0) 1688 if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1794,7 +1789,7 @@ static void process_event(struct perf_script *script,
1794 return; 1789 return;
1795 } 1790 }
1796 1791
1797 if (PRINT_FIELD(TRACE)) { 1792 if (PRINT_FIELD(TRACE) && sample->raw_data) {
1798 event_format__fprintf(evsel->tp_format, sample->cpu, 1793 event_format__fprintf(evsel->tp_format, sample->cpu,
1799 sample->raw_data, sample->raw_size, fp); 1794 sample->raw_data, sample->raw_size, fp);
1800 } 1795 }
@@ -2359,6 +2354,8 @@ static int __cmd_script(struct perf_script *script)
2359 2354
2360 signal(SIGINT, sig_handler); 2355 signal(SIGINT, sig_handler);
2361 2356
2357 perf_stat__init_shadow_stats();
2358
2362 /* override event processing functions */ 2359 /* override event processing functions */
2363 if (script->show_task_events) { 2360 if (script->show_task_events) {
2364 script->tool.comm = process_comm_event; 2361 script->tool.comm = process_comm_event;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1d00e5ec7906..82e16bf84466 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -224,20 +224,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
224 return ret; 224 return ret;
225} 225}
226 226
227static int disasm__cmp(struct annotation_line *a, struct annotation_line *b) 227static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
228 int percent_type)
228{ 229{
229 int i; 230 int i;
230 231
231 for (i = 0; i < a->data_nr; i++) { 232 for (i = 0; i < a->data_nr; i++) {
232 if (a->data[i].percent == b->data[i].percent) 233 if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
233 continue; 234 continue;
234 return a->data[i].percent < b->data[i].percent; 235 return a->data[i].percent[percent_type] -
236 b->data[i].percent[percent_type];
235 } 237 }
236 return 0; 238 return 0;
237} 239}
238 240
239static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al) 241static void disasm_rb_tree__insert(struct annotate_browser *browser,
242 struct annotation_line *al)
240{ 243{
244 struct rb_root *root = &browser->entries;
241 struct rb_node **p = &root->rb_node; 245 struct rb_node **p = &root->rb_node;
242 struct rb_node *parent = NULL; 246 struct rb_node *parent = NULL;
243 struct annotation_line *l; 247 struct annotation_line *l;
@@ -246,7 +250,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line
246 parent = *p; 250 parent = *p;
247 l = rb_entry(parent, struct annotation_line, rb_node); 251 l = rb_entry(parent, struct annotation_line, rb_node);
248 252
249 if (disasm__cmp(al, l)) 253 if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
250 p = &(*p)->rb_left; 254 p = &(*p)->rb_left;
251 else 255 else
252 p = &(*p)->rb_right; 256 p = &(*p)->rb_right;
@@ -329,7 +333,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
329 RB_CLEAR_NODE(&pos->al.rb_node); 333 RB_CLEAR_NODE(&pos->al.rb_node);
330 continue; 334 continue;
331 } 335 }
332 disasm_rb_tree__insert(&browser->entries, &pos->al); 336 disasm_rb_tree__insert(browser, &pos->al);
333 } 337 }
334 pthread_mutex_unlock(&notes->lock); 338 pthread_mutex_unlock(&notes->lock);
335 339
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd3342069..383674f448fc 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
134 if (!cpu_list) 134 if (!cpu_list)
135 return cpu_map__read_all_cpu_map(); 135 return cpu_map__read_all_cpu_map();
136 136
137 if (!isdigit(*cpu_list)) 137 /*
138 * must handle the case of empty cpumap to cover
139 * TOPOLOGY header for NUMA nodes with no CPU
140 * ( e.g., because of CPU hotplug)
141 */
142 if (!isdigit(*cpu_list) && *cpu_list != '\0')
138 goto out; 143 goto out;
139 144
140 while (isdigit(*cpu_list)) { 145 while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
181 186
182 if (nr_cpus > 0) 187 if (nr_cpus > 0)
183 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); 188 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
184 else 189 else if (*cpu_list != '\0')
185 cpus = cpu_map__default_new(); 190 cpus = cpu_map__default_new();
191 else
192 cpus = cpu_map__dummy_new();
186invalid: 193invalid:
187 free(tmp_cpus); 194 free(tmp_cpus);
188out: 195out:
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 897589507d97..ea523d3b248f 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe)
391 * Current buffer might not have all the events allocated 391 * Current buffer might not have all the events allocated
392 * yet, we need to free only allocated ones ... 392 * yet, we need to free only allocated ones ...
393 */ 393 */
394 list_del(&oe->buffer->list); 394 if (oe->buffer) {
395 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); 395 list_del(&oe->buffer->list);
396 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
397 }
396 398
397 /* ... and continue with the rest */ 399 /* ... and continue with the rest */
398 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { 400 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 63f758c655d5..64d1f36dee99 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -17,6 +17,8 @@ if cc == "clang":
17 vars[var] = sub("-mcet", "", vars[var]) 17 vars[var] = sub("-mcet", "", vars[var])
18 if not clang_has_option("-fcf-protection"): 18 if not clang_has_option("-fcf-protection"):
19 vars[var] = sub("-fcf-protection", "", vars[var]) 19 vars[var] = sub("-fcf-protection", "", vars[var])
20 if not clang_has_option("-fstack-clash-protection"):
21 vars[var] = sub("-fstack-clash-protection", "", vars[var])
20 22
21from distutils.core import setup, Extension 23from distutils.core import setup, Extension
22 24
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 1a2bd15c5b6e..400ee81a3043 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf
10TARGETS += efivarfs 10TARGETS += efivarfs
11TARGETS += exec 11TARGETS += exec
12TARGETS += filesystems 12TARGETS += filesystems
13TARGETS += filesystems/binderfs
13TARGETS += firmware 14TARGETS += firmware
14TARGETS += ftrace 15TARGETS += ftrace
15TARGETS += futex 16TARGETS += futex
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44fa32af..84fd6f1bf33e 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
13 unsigned int start, end, possible_cpus = 0; 13 unsigned int start, end, possible_cpus = 0;
14 char buff[128]; 14 char buff[128];
15 FILE *fp; 15 FILE *fp;
16 int n; 16 int len, n, i, j = 0;
17 17
18 fp = fopen(fcpu, "r"); 18 fp = fopen(fcpu, "r");
19 if (!fp) { 19 if (!fp) {
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
21 exit(1); 21 exit(1);
22 } 22 }
23 23
24 while (fgets(buff, sizeof(buff), fp)) { 24 if (!fgets(buff, sizeof(buff), fp)) {
25 n = sscanf(buff, "%u-%u", &start, &end); 25 printf("Failed to read %s!\n", fcpu);
26 if (n == 0) { 26 exit(1);
27 printf("Failed to retrieve # possible CPUs!\n"); 27 }
28 exit(1); 28
29 } else if (n == 1) { 29 len = strlen(buff);
30 end = start; 30 for (i = 0; i <= len; i++) {
31 if (buff[i] == ',' || buff[i] == '\0') {
32 buff[i] = '\0';
33 n = sscanf(&buff[j], "%u-%u", &start, &end);
34 if (n <= 0) {
35 printf("Failed to retrieve # possible CPUs!\n");
36 exit(1);
37 } else if (n == 1) {
38 end = start;
39 }
40 possible_cpus += end - start + 1;
41 j = i + 1;
31 } 42 }
32 possible_cpus = start == 0 ? end + 1 : 0;
33 break;
34 } 43 }
44
35 fclose(fp); 45 fclose(fp);
36 46
37 return possible_cpus; 47 return possible_cpus;
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 447acc34db94..ee723774015a 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -1902,13 +1902,12 @@ static struct btf_raw_test raw_tests[] = {
1902}, 1902},
1903 1903
1904{ 1904{
1905 .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", 1905 .descr = "func proto (TYPEDEF=>FUNC_PROTO)",
1906 .raw_types = { 1906 .raw_types = {
1907 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 1907 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
1908 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ 1908 BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
1909 BTF_CONST_ENC(4), /* [3] */ 1909 BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
1910 BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ 1910 BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
1911 BTF_FUNC_PROTO_ENC(0, 2), /* [5] */
1912 BTF_FUNC_PROTO_ARG_ENC(0, 1), 1911 BTF_FUNC_PROTO_ARG_ENC(0, 1),
1913 BTF_FUNC_PROTO_ARG_ENC(0, 2), 1912 BTF_FUNC_PROTO_ARG_ENC(0, 2),
1914 BTF_END_RAW, 1913 BTF_END_RAW,
@@ -1922,8 +1921,6 @@ static struct btf_raw_test raw_tests[] = {
1922 .key_type_id = 1, 1921 .key_type_id = 1,
1923 .value_type_id = 1, 1922 .value_type_id = 1,
1924 .max_entries = 4, 1923 .max_entries = 4,
1925 .btf_load_err = true,
1926 .err_str = "Invalid type_id",
1927}, 1924},
1928 1925
1929{ 1926{
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd025a6..0d26b5e3f966 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@ prerequisite()
37 exit $ksft_skip 37 exit $ksft_skip
38 fi 38 fi
39 39
40 present_cpus=`cat $SYSFS/devices/system/cpu/present`
41 present_max=${present_cpus##*-}
42 echo "present_cpus = $present_cpus present_max = $present_max"
43
40 echo -e "\t Cpus in online state: $online_cpus" 44 echo -e "\t Cpus in online state: $online_cpus"
41 45
42 offline_cpus=`cat $SYSFS/devices/system/cpu/offline` 46 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@ online_cpus=0
151online_max=0 155online_max=0
152offline_cpus=0 156offline_cpus=0
153offline_max=0 157offline_max=0
158present_cpus=0
159present_max=0
154 160
155while getopts e:ahp: opt; do 161while getopts e:ahp: opt; do
156 case $opt in 162 case $opt in
@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
190 online_cpu_expect_success $online_max 196 online_cpu_expect_success $online_max
191 197
192 if [[ $offline_cpus -gt 0 ]]; then 198 if [[ $offline_cpus -gt 0 ]]; then
193 echo -e "\t offline to online to offline: cpu $offline_max" 199 echo -e "\t offline to online to offline: cpu $present_max"
194 online_cpu_expect_success $offline_max 200 online_cpu_expect_success $present_max
195 offline_cpu_expect_success $offline_max 201 offline_cpu_expect_success $present_max
202 online_cpu $present_max
196 fi 203 fi
197 exit 0 204 exit 0
198else 205else
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore
new file mode 100644
index 000000000000..8a5d9bf63dd4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/.gitignore
@@ -0,0 +1 @@
binderfs_test
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
new file mode 100644
index 000000000000..58cb659b56b4
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/Makefile
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0
2
3CFLAGS += -I../../../../../usr/include/
4TEST_GEN_PROGS := binderfs_test
5
6include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
new file mode 100644
index 000000000000..8c2ed962e1c7
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -0,0 +1,275 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#define _GNU_SOURCE
4#include <errno.h>
5#include <fcntl.h>
6#include <sched.h>
7#include <stdbool.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <sys/ioctl.h>
12#include <sys/mount.h>
13#include <sys/stat.h>
14#include <sys/types.h>
15#include <unistd.h>
16#include <linux/android/binder.h>
17#include <linux/android/binderfs.h>
18#include "../../kselftest.h"
19
20static ssize_t write_nointr(int fd, const void *buf, size_t count)
21{
22 ssize_t ret;
23again:
24 ret = write(fd, buf, count);
25 if (ret < 0 && errno == EINTR)
26 goto again;
27
28 return ret;
29}
30
31static void write_to_file(const char *filename, const void *buf, size_t count,
32 int allowed_errno)
33{
34 int fd, saved_errno;
35 ssize_t ret;
36
37 fd = open(filename, O_WRONLY | O_CLOEXEC);
38 if (fd < 0)
39 ksft_exit_fail_msg("%s - Failed to open file %s\n",
40 strerror(errno), filename);
41
42 ret = write_nointr(fd, buf, count);
43 if (ret < 0) {
44 if (allowed_errno && (errno == allowed_errno)) {
45 close(fd);
46 return;
47 }
48
49 goto on_error;
50 }
51
52 if ((size_t)ret != count)
53 goto on_error;
54
55 close(fd);
56 return;
57
58on_error:
59 saved_errno = errno;
60 close(fd);
61 errno = saved_errno;
62
63 if (ret < 0)
64 ksft_exit_fail_msg("%s - Failed to write to file %s\n",
65 strerror(errno), filename);
66
67 ksft_exit_fail_msg("Failed to write to file %s\n", filename);
68}
69
70static void change_to_userns(void)
71{
72 int ret;
73 uid_t uid;
74 gid_t gid;
75 /* {g,u}id_map files only allow a max of 4096 bytes written to them */
76 char idmap[4096];
77
78 uid = getuid();
79 gid = getgid();
80
81 ret = unshare(CLONE_NEWUSER);
82 if (ret < 0)
83 ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
84 strerror(errno));
85
86 write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT);
87
88 ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid);
89 if (ret < 0 || (size_t)ret >= sizeof(idmap))
90 ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
91 strerror(errno));
92
93 write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0);
94
95 ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid);
96 if (ret < 0 || (size_t)ret >= sizeof(idmap))
97 ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
98 strerror(errno));
99
100 write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0);
101
102 ret = setgid(0);
103 if (ret)
104 ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
105 strerror(errno));
106
107 ret = setuid(0);
108 if (ret)
109 ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
110 strerror(errno));
111}
112
113static void change_to_mountns(void)
114{
115 int ret;
116
117 ret = unshare(CLONE_NEWNS);
118 if (ret < 0)
119 ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n",
120 strerror(errno));
121
122 ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
123 if (ret < 0)
124 ksft_exit_fail_msg("%s - Failed to mount / as private\n",
125 strerror(errno));
126}
127
128static void rmdir_protect_errno(const char *dir)
129{
130 int saved_errno = errno;
131 (void)rmdir(dir);
132 errno = saved_errno;
133}
134
135static void __do_binderfs_test(void)
136{
137 int fd, ret, saved_errno;
138 size_t len;
139 ssize_t wret;
140 bool keep = false;
141 struct binderfs_device device = { 0 };
142 struct binder_version version = { 0 };
143
144 change_to_mountns();
145
146 ret = mkdir("/dev/binderfs", 0755);
147 if (ret < 0) {
148 if (errno != EEXIST)
149 ksft_exit_fail_msg(
150 "%s - Failed to create binderfs mountpoint\n",
151 strerror(errno));
152
153 keep = true;
154 }
155
156 ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
157 if (ret < 0) {
158 if (errno != ENODEV)
159 ksft_exit_fail_msg("%s - Failed to mount binderfs\n",
160 strerror(errno));
161
162 keep ? : rmdir_protect_errno("/dev/binderfs");
163 ksft_exit_skip(
164 "The Android binderfs filesystem is not available\n");
165 }
166
167 /* binderfs mount test passed */
168 ksft_inc_pass_cnt();
169
170 memcpy(device.name, "my-binder", strlen("my-binder"));
171
172 fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
173 if (fd < 0)
174 ksft_exit_fail_msg(
175 "%s - Failed to open binder-control device\n",
176 strerror(errno));
177
178 ret = ioctl(fd, BINDER_CTL_ADD, &device);
179 saved_errno = errno;
180 close(fd);
181 errno = saved_errno;
182 if (ret < 0) {
183 keep ? : rmdir_protect_errno("/dev/binderfs");
184 ksft_exit_fail_msg(
185 "%s - Failed to allocate new binder device\n",
186 strerror(errno));
187 }
188
189 ksft_print_msg(
190 "Allocated new binder device with major %d, minor %d, and name %s\n",
191 device.major, device.minor, device.name);
192
193 /* binder device allocation test passed */
194 ksft_inc_pass_cnt();
195
196 fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY);
197 if (fd < 0) {
198 keep ? : rmdir_protect_errno("/dev/binderfs");
199 ksft_exit_fail_msg("%s - Failed to open my-binder device\n",
200 strerror(errno));
201 }
202
203 ret = ioctl(fd, BINDER_VERSION, &version);
204 saved_errno = errno;
205 close(fd);
206 errno = saved_errno;
207 if (ret < 0) {
208 keep ? : rmdir_protect_errno("/dev/binderfs");
209 ksft_exit_fail_msg(
210 "%s - Failed to open perform BINDER_VERSION request\n",
211 strerror(errno));
212 }
213
214 ksft_print_msg("Detected binder version: %d\n",
215 version.protocol_version);
216
217 /* binder transaction with binderfs binder device passed */
218 ksft_inc_pass_cnt();
219
220 ret = unlink("/dev/binderfs/my-binder");
221 if (ret < 0) {
222 keep ? : rmdir_protect_errno("/dev/binderfs");
223 ksft_exit_fail_msg("%s - Failed to delete binder device\n",
224 strerror(errno));
225 }
226
227 /* binder device removal passed */
228 ksft_inc_pass_cnt();
229
230 ret = unlink("/dev/binderfs/binder-control");
231 if (!ret) {
232 keep ? : rmdir_protect_errno("/dev/binderfs");
233 ksft_exit_fail_msg("Managed to delete binder-control device\n");
234 } else if (errno != EPERM) {
235 keep ? : rmdir_protect_errno("/dev/binderfs");
236 ksft_exit_fail_msg(
237 "%s - Failed to delete binder-control device but exited with unexpected error code\n",
238 strerror(errno));
239 }
240
241 /* binder-control device removal failed as expected */
242 ksft_inc_xfail_cnt();
243
244on_error:
245 ret = umount2("/dev/binderfs", MNT_DETACH);
246 keep ?: rmdir_protect_errno("/dev/binderfs");
247 if (ret < 0)
248 ksft_exit_fail_msg("%s - Failed to unmount binderfs\n",
249 strerror(errno));
250
251 /* binderfs unmount test passed */
252 ksft_inc_pass_cnt();
253}
254
255static void binderfs_test_privileged()
256{
257 if (geteuid() != 0)
258 ksft_print_msg(
259 "Tests are not run as root. Skipping privileged tests\n");
260 else
261 __do_binderfs_test();
262}
263
264static void binderfs_test_unprivileged()
265{
266 change_to_userns();
267 __do_binderfs_test();
268}
269
270int main(int argc, char *argv[])
271{
272 binderfs_test_privileged();
273 binderfs_test_unprivileged();
274 ksft_exit_pass();
275}
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config
new file mode 100644
index 000000000000..02dd6cc9cf99
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/config
@@ -0,0 +1,3 @@
1CONFIG_ANDROID=y
2CONFIG_ANDROID_BINDERFS=y
3CONFIG_ANDROID_BINDER_IPC=y
diff --git a/tools/testing/selftests/ir/Makefile b/tools/testing/selftests/ir/Makefile
index f4ba8eb84b95..ad06489c22a5 100644
--- a/tools/testing/selftests/ir/Makefile
+++ b/tools/testing/selftests/ir/Makefile
@@ -1,5 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2TEST_PROGS := ir_loopback.sh 2TEST_PROGS := ir_loopback.sh
3TEST_GEN_PROGS_EXTENDED := ir_loopback 3TEST_GEN_PROGS_EXTENDED := ir_loopback
4APIDIR := ../../../include/uapi
5CFLAGS += -Wall -O2 -I$(APIDIR)
4 6
5include ../lib.mk 7include ../lib.mk
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index f8f3e90700c0..1e6d14d2825c 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
21KSFT_KHDR_INSTALL := 1 21KSFT_KHDR_INSTALL := 1
22include ../lib.mk 22include ../lib.mk
23 23
24$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma 24$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
25$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread 25$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
26$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread 26$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6cef93fb..c9ff2b47bd1c 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for netfilter selftests 2# Makefile for netfilter selftests
3 3
4TEST_PROGS := nft_trans_stress.sh 4TEST_PROGS := nft_trans_stress.sh nft_nat.sh
5 5
6include ../lib.mk 6include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313e41a8..59caa8f71cd8 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
1CONFIG_NET_NS=y 1CONFIG_NET_NS=y
2NF_TABLES_INET=y 2CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 000000000000..8ec76681605c
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
1#!/bin/bash
2#
3# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
4#
5
6# Kselftest framework requirement - SKIP code is 4.
7ksft_skip=4
8ret=0
9
10nft --version > /dev/null 2>&1
11if [ $? -ne 0 ];then
12 echo "SKIP: Could not run test without nft tool"
13 exit $ksft_skip
14fi
15
16ip -Version > /dev/null 2>&1
17if [ $? -ne 0 ];then
18 echo "SKIP: Could not run test without ip tool"
19 exit $ksft_skip
20fi
21
22ip netns add ns0
23ip netns add ns1
24ip netns add ns2
25
26ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
27ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
28
29ip -net ns0 link set lo up
30ip -net ns0 link set veth0 up
31ip -net ns0 addr add 10.0.1.1/24 dev veth0
32ip -net ns0 addr add dead:1::1/64 dev veth0
33
34ip -net ns0 link set veth1 up
35ip -net ns0 addr add 10.0.2.1/24 dev veth1
36ip -net ns0 addr add dead:2::1/64 dev veth1
37
38for i in 1 2; do
39 ip -net ns$i link set lo up
40 ip -net ns$i link set eth0 up
41 ip -net ns$i addr add 10.0.$i.99/24 dev eth0
42 ip -net ns$i route add default via 10.0.$i.1
43 ip -net ns$i addr add dead:$i::99/64 dev eth0
44 ip -net ns$i route add default via dead:$i::1
45done
46
47bad_counter()
48{
49 local ns=$1
50 local counter=$2
51 local expect=$3
52
53 echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
54 ip netns exec $ns nft list counter inet filter $counter 1>&2
55}
56
57check_counters()
58{
59 ns=$1
60 local lret=0
61
62 cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
63 if [ $? -ne 0 ]; then
64 bad_counter $ns ns0in "packets 1 bytes 84"
65 lret=1
66 fi
67 cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
68 if [ $? -ne 0 ]; then
69 bad_counter $ns ns0out "packets 1 bytes 84"
70 lret=1
71 fi
72
73 expect="packets 1 bytes 104"
74 cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
75 if [ $? -ne 0 ]; then
76 bad_counter $ns ns0in6 "$expect"
77 lret=1
78 fi
79 cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
80 if [ $? -ne 0 ]; then
81 bad_counter $ns ns0out6 "$expect"
82 lret=1
83 fi
84
85 return $lret
86}
87
88check_ns0_counters()
89{
90 local ns=$1
91 local lret=0
92
93 cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
94 if [ $? -ne 0 ]; then
95 bad_counter ns0 ns0in "packets 0 bytes 0"
96 lret=1
97 fi
98
99 cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
100 if [ $? -ne 0 ]; then
101 bad_counter ns0 ns0in6 "packets 0 bytes 0"
102 lret=1
103 fi
104
105 cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
106 if [ $? -ne 0 ]; then
107 bad_counter ns0 ns0out "packets 0 bytes 0"
108 lret=1
109 fi
110 cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
111 if [ $? -ne 0 ]; then
112 bad_counter ns0 ns0out6 "packets 0 bytes 0"
113 lret=1
114 fi
115
116 for dir in "in" "out" ; do
117 expect="packets 1 bytes 84"
118 cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
119 if [ $? -ne 0 ]; then
120 bad_counter ns0 $ns$dir "$expect"
121 lret=1
122 fi
123
124 expect="packets 1 bytes 104"
125 cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
126 if [ $? -ne 0 ]; then
127 bad_counter ns0 $ns$dir6 "$expect"
128 lret=1
129 fi
130 done
131
132 return $lret
133}
134
135reset_counters()
136{
137 for i in 0 1 2;do
138 ip netns exec ns$i nft reset counters inet > /dev/null
139 done
140}
141
142test_local_dnat6()
143{
144 local lret=0
145ip netns exec ns0 nft -f - <<EOF
146table ip6 nat {
147 chain output {
148 type nat hook output priority 0; policy accept;
149 ip6 daddr dead:1::99 dnat to dead:2::99
150 }
151}
152EOF
153 if [ $? -ne 0 ]; then
154 echo "SKIP: Could not add add ip6 dnat hook"
155 return $ksft_skip
156 fi
157
158 # ping netns1, expect rewrite to netns2
159 ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
160 if [ $? -ne 0 ]; then
161 lret=1
162 echo "ERROR: ping6 failed"
163 return $lret
164 fi
165
166 expect="packets 0 bytes 0"
167 for dir in "in6" "out6" ; do
168 cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
169 if [ $? -ne 0 ]; then
170 bad_counter ns0 ns1$dir "$expect"
171 lret=1
172 fi
173 done
174
175 expect="packets 1 bytes 104"
176 for dir in "in6" "out6" ; do
177 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
178 if [ $? -ne 0 ]; then
179 bad_counter ns0 ns2$dir "$expect"
180 lret=1
181 fi
182 done
183
184 # expect 0 count in ns1
185 expect="packets 0 bytes 0"
186 for dir in "in6" "out6" ; do
187 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
188 if [ $? -ne 0 ]; then
189 bad_counter ns1 ns0$dir "$expect"
190 lret=1
191 fi
192 done
193
194 # expect 1 packet in ns2
195 expect="packets 1 bytes 104"
196 for dir in "in6" "out6" ; do
197 cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
198 if [ $? -ne 0 ]; then
199 bad_counter ns2 ns0$dir "$expect"
200 lret=1
201 fi
202 done
203
204 test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
205 ip netns exec ns0 nft flush chain ip6 nat output
206
207 return $lret
208}
209
210test_local_dnat()
211{
212 local lret=0
213ip netns exec ns0 nft -f - <<EOF
214table ip nat {
215 chain output {
216 type nat hook output priority 0; policy accept;
217 ip daddr 10.0.1.99 dnat to 10.0.2.99
218 }
219}
220EOF
221 # ping netns1, expect rewrite to netns2
222 ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
223 if [ $? -ne 0 ]; then
224 lret=1
225 echo "ERROR: ping failed"
226 return $lret
227 fi
228
229 expect="packets 0 bytes 0"
230 for dir in "in" "out" ; do
231 cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
232 if [ $? -ne 0 ]; then
233 bad_counter ns0 ns1$dir "$expect"
234 lret=1
235 fi
236 done
237
238 expect="packets 1 bytes 84"
239 for dir in "in" "out" ; do
240 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
241 if [ $? -ne 0 ]; then
242 bad_counter ns0 ns2$dir "$expect"
243 lret=1
244 fi
245 done
246
247 # expect 0 count in ns1
248 expect="packets 0 bytes 0"
249 for dir in "in" "out" ; do
250 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
251 if [ $? -ne 0 ]; then
252 bad_counter ns1 ns0$dir "$expect"
253 lret=1
254 fi
255 done
256
257 # expect 1 packet in ns2
258 expect="packets 1 bytes 84"
259 for dir in "in" "out" ; do
260 cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
261 if [ $? -ne 0 ]; then
262 bad_counter ns2 ns0$dir "$expect"
263 lret=1
264 fi
265 done
266
267 test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
268
269 ip netns exec ns0 nft flush chain ip nat output
270
271 reset_counters
272 ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
273 if [ $? -ne 0 ]; then
274 lret=1
275 echo "ERROR: ping failed"
276 return $lret
277 fi
278
279 expect="packets 1 bytes 84"
280 for dir in "in" "out" ; do
281 cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
282 if [ $? -ne 0 ]; then
283 bad_counter ns1 ns1$dir "$expect"
284 lret=1
285 fi
286 done
287 expect="packets 0 bytes 0"
288 for dir in "in" "out" ; do
289 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
290 if [ $? -ne 0 ]; then
291 bad_counter ns0 ns2$dir "$expect"
292 lret=1
293 fi
294 done
295
296 # expect 1 count in ns1
297 expect="packets 1 bytes 84"
298 for dir in "in" "out" ; do
299 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
300 if [ $? -ne 0 ]; then
301 bad_counter ns0 ns0$dir "$expect"
302 lret=1
303 fi
304 done
305
306 # expect 0 packet in ns2
307 expect="packets 0 bytes 0"
308 for dir in "in" "out" ; do
309 cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
310 if [ $? -ne 0 ]; then
311 bad_counter ns2 ns2$dir "$expect"
312 lret=1
313 fi
314 done
315
316 test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
317
318 return $lret
319}
320
321
322test_masquerade6()
323{
324 local lret=0
325
326 ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
327
328 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
329 if [ $? -ne 0 ] ; then
330 echo "ERROR: cannot ping ns1 from ns2 via ipv6"
331 return 1
332 lret=1
333 fi
334
335 expect="packets 1 bytes 104"
336 for dir in "in6" "out6" ; do
337 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
338 if [ $? -ne 0 ]; then
339 bad_counter ns1 ns2$dir "$expect"
340 lret=1
341 fi
342
343 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
344 if [ $? -ne 0 ]; then
345 bad_counter ns2 ns1$dir "$expect"
346 lret=1
347 fi
348 done
349
350 reset_counters
351
352# add masquerading rule
353ip netns exec ns0 nft -f - <<EOF
354table ip6 nat {
355 chain postrouting {
356 type nat hook postrouting priority 0; policy accept;
357 meta oif veth0 masquerade
358 }
359}
360EOF
361 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
362 if [ $? -ne 0 ] ; then
363 echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
364 lret=1
365 fi
366
367 # ns1 should have seen packets from ns0, due to masquerade
368 expect="packets 1 bytes 104"
369 for dir in "in6" "out6" ; do
370
371 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
372 if [ $? -ne 0 ]; then
373 bad_counter ns1 ns0$dir "$expect"
374 lret=1
375 fi
376
377 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
378 if [ $? -ne 0 ]; then
379 bad_counter ns2 ns1$dir "$expect"
380 lret=1
381 fi
382 done
383
384 # ns1 should not have seen packets from ns2, due to masquerade
385 expect="packets 0 bytes 0"
386 for dir in "in6" "out6" ; do
387 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
388 if [ $? -ne 0 ]; then
389 bad_counter ns1 ns0$dir "$expect"
390 lret=1
391 fi
392
393 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
394 if [ $? -ne 0 ]; then
395 bad_counter ns2 ns1$dir "$expect"
396 lret=1
397 fi
398 done
399
400 ip netns exec ns0 nft flush chain ip6 nat postrouting
401 if [ $? -ne 0 ]; then
402 echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
403 lret=1
404 fi
405
406 test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
407
408 return $lret
409}
410
411test_masquerade()
412{
413 local lret=0
414
415 ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
416 ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
417
418 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
419 if [ $? -ne 0 ] ; then
420 echo "ERROR: canot ping ns1 from ns2"
421 lret=1
422 fi
423
424 expect="packets 1 bytes 84"
425 for dir in "in" "out" ; do
426 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
427 if [ $? -ne 0 ]; then
428 bad_counter ns1 ns2$dir "$expect"
429 lret=1
430 fi
431
432 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
433 if [ $? -ne 0 ]; then
434 bad_counter ns2 ns1$dir "$expect"
435 lret=1
436 fi
437 done
438
439 reset_counters
440
441# add masquerading rule
442ip netns exec ns0 nft -f - <<EOF
443table ip nat {
444 chain postrouting {
445 type nat hook postrouting priority 0; policy accept;
446 meta oif veth0 masquerade
447 }
448}
449EOF
450 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
451 if [ $? -ne 0 ] ; then
452 echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
453 lret=1
454 fi
455
456 # ns1 should have seen packets from ns0, due to masquerade
457 expect="packets 1 bytes 84"
458 for dir in "in" "out" ; do
459 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
460 if [ $? -ne 0 ]; then
461 bad_counter ns1 ns0$dir "$expect"
462 lret=1
463 fi
464
465 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
466 if [ $? -ne 0 ]; then
467 bad_counter ns2 ns1$dir "$expect"
468 lret=1
469 fi
470 done
471
472 # ns1 should not have seen packets from ns2, due to masquerade
473 expect="packets 0 bytes 0"
474 for dir in "in" "out" ; do
475 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
476 if [ $? -ne 0 ]; then
477 bad_counter ns1 ns0$dir "$expect"
478 lret=1
479 fi
480
481 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
482 if [ $? -ne 0 ]; then
483 bad_counter ns2 ns1$dir "$expect"
484 lret=1
485 fi
486 done
487
488 ip netns exec ns0 nft flush chain ip nat postrouting
489 if [ $? -ne 0 ]; then
490 echo "ERROR: Could not flush nat postrouting" 1>&2
491 lret=1
492 fi
493
494 test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
495
496 return $lret
497}
498
499test_redirect6()
500{
501 local lret=0
502
503 ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
504
505 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
506 if [ $? -ne 0 ] ; then
507 echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
508 lret=1
509 fi
510
511 expect="packets 1 bytes 104"
512 for dir in "in6" "out6" ; do
513 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
514 if [ $? -ne 0 ]; then
515 bad_counter ns1 ns2$dir "$expect"
516 lret=1
517 fi
518
519 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
520 if [ $? -ne 0 ]; then
521 bad_counter ns2 ns1$dir "$expect"
522 lret=1
523 fi
524 done
525
526 reset_counters
527
528# add redirect rule
529ip netns exec ns0 nft -f - <<EOF
530table ip6 nat {
531 chain prerouting {
532 type nat hook prerouting priority 0; policy accept;
533 meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
534 }
535}
536EOF
537 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
538 if [ $? -ne 0 ] ; then
539 echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
540 lret=1
541 fi
542
543 # ns1 should have seen no packets from ns2, due to redirection
544 expect="packets 0 bytes 0"
545 for dir in "in6" "out6" ; do
546 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
547 if [ $? -ne 0 ]; then
548 bad_counter ns1 ns0$dir "$expect"
549 lret=1
550 fi
551 done
552
553 # ns0 should have seen packets from ns2, due to masquerade
554 expect="packets 1 bytes 104"
555 for dir in "in6" "out6" ; do
556 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
557 if [ $? -ne 0 ]; then
558 bad_counter ns1 ns0$dir "$expect"
559 lret=1
560 fi
561 done
562
563 ip netns exec ns0 nft delete table ip6 nat
564 if [ $? -ne 0 ]; then
565 echo "ERROR: Could not delete ip6 nat table" 1>&2
566 lret=1
567 fi
568
569 test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
570
571 return $lret
572}
573
574test_redirect()
575{
576 local lret=0
577
578 ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
579 ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
580
581 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
582 if [ $? -ne 0 ] ; then
583 echo "ERROR: cannot ping ns1 from ns2"
584 lret=1
585 fi
586
587 expect="packets 1 bytes 84"
588 for dir in "in" "out" ; do
589 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
590 if [ $? -ne 0 ]; then
591 bad_counter ns1 ns2$dir "$expect"
592 lret=1
593 fi
594
595 cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
596 if [ $? -ne 0 ]; then
597 bad_counter ns2 ns1$dir "$expect"
598 lret=1
599 fi
600 done
601
602 reset_counters
603
604# add redirect rule
605ip netns exec ns0 nft -f - <<EOF
606table ip nat {
607 chain prerouting {
608 type nat hook prerouting priority 0; policy accept;
609 meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
610 }
611}
612EOF
613 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
614 if [ $? -ne 0 ] ; then
615 echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
616 lret=1
617 fi
618
619 # ns1 should have seen no packets from ns2, due to redirection
620 expect="packets 0 bytes 0"
621 for dir in "in" "out" ; do
622
623 cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
624 if [ $? -ne 0 ]; then
625 bad_counter ns1 ns0$dir "$expect"
626 lret=1
627 fi
628 done
629
630 # ns0 should have seen packets from ns2, due to masquerade
631 expect="packets 1 bytes 84"
632 for dir in "in" "out" ; do
633 cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
634 if [ $? -ne 0 ]; then
635 bad_counter ns1 ns0$dir "$expect"
636 lret=1
637 fi
638 done
639
640 ip netns exec ns0 nft delete table ip nat
641 if [ $? -ne 0 ]; then
642 echo "ERROR: Could not delete nat table" 1>&2
643 lret=1
644 fi
645
646 test $lret -eq 0 && echo "PASS: IP redirection for ns2"
647
648 return $lret
649}
650
651
652# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
653for i in 0 1 2; do
654ip netns exec ns$i nft -f - <<EOF
655table inet filter {
656 counter ns0in {}
657 counter ns1in {}
658 counter ns2in {}
659
660 counter ns0out {}
661 counter ns1out {}
662 counter ns2out {}
663
664 counter ns0in6 {}
665 counter ns1in6 {}
666 counter ns2in6 {}
667
668 counter ns0out6 {}
669 counter ns1out6 {}
670 counter ns2out6 {}
671
672 map nsincounter {
673 type ipv4_addr : counter
674 elements = { 10.0.1.1 : "ns0in",
675 10.0.2.1 : "ns0in",
676 10.0.1.99 : "ns1in",
677 10.0.2.99 : "ns2in" }
678 }
679
680 map nsincounter6 {
681 type ipv6_addr : counter
682 elements = { dead:1::1 : "ns0in6",
683 dead:2::1 : "ns0in6",
684 dead:1::99 : "ns1in6",
685 dead:2::99 : "ns2in6" }
686 }
687
688 map nsoutcounter {
689 type ipv4_addr : counter
690 elements = { 10.0.1.1 : "ns0out",
691 10.0.2.1 : "ns0out",
692 10.0.1.99: "ns1out",
693 10.0.2.99: "ns2out" }
694 }
695
696 map nsoutcounter6 {
697 type ipv6_addr : counter
698 elements = { dead:1::1 : "ns0out6",
699 dead:2::1 : "ns0out6",
700 dead:1::99 : "ns1out6",
701 dead:2::99 : "ns2out6" }
702 }
703
704 chain input {
705 type filter hook input priority 0; policy accept;
706 counter name ip saddr map @nsincounter
707 icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
708 }
709 chain output {
710 type filter hook output priority 0; policy accept;
711 counter name ip daddr map @nsoutcounter
712 icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
713 }
714}
715EOF
716done
717
718sleep 3
719# test basic connectivity
720for i in 1 2; do
721 ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
722 if [ $? -ne 0 ];then
723 echo "ERROR: Could not reach other namespace(s)" 1>&2
724 ret=1
725 fi
726
727 ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
728 if [ $? -ne 0 ];then
729 echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
730 ret=1
731 fi
732 check_counters ns$i
733 if [ $? -ne 0 ]; then
734 ret=1
735 fi
736
737 check_ns0_counters ns$i
738 if [ $? -ne 0 ]; then
739 ret=1
740 fi
741 reset_counters
742done
743
744if [ $ret -eq 0 ];then
745 echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
746fi
747
748reset_counters
749test_local_dnat
750test_local_dnat6
751
752reset_counters
753test_masquerade
754test_masquerade6
755
756reset_counters
757test_redirect
758test_redirect6
759
760for i in 0 1 2; do ip netns del ns$i;done
761
762exit $ret
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a81681f..29bac5ef9a93 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,4 +10,5 @@
10/proc-uptime-002 10/proc-uptime-002
11/read 11/read
12/self 12/self
13/setns-dcache
13/thread-self 14/thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34cf85d..434d033ee067 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
14TEST_GEN_PROGS += proc-uptime-002 14TEST_GEN_PROGS += proc-uptime-002
15TEST_GEN_PROGS += read 15TEST_GEN_PROGS += read
16TEST_GEN_PROGS += self 16TEST_GEN_PROGS += self
17TEST_GEN_PROGS += setns-dcache
17TEST_GEN_PROGS += thread-self 18TEST_GEN_PROGS += thread-self
18 19
19include ../lib.mk 20include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 000000000000..60ab197a73fc
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16/*
17 * Test that setns(CLONE_NEWNET) points to new /proc/net content even
18 * if old one is in dcache.
19 *
20 * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
21 */
22#undef NDEBUG
23#include <assert.h>
24#include <errno.h>
25#include <sched.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <unistd.h>
31#include <sys/types.h>
32#include <sys/stat.h>
33#include <fcntl.h>
34#include <sys/socket.h>
35
36static pid_t pid = -1;
37
38static void f(void)
39{
40 if (pid > 0) {
41 kill(pid, SIGTERM);
42 }
43}
44
45int main(void)
46{
47 int fd[2];
48 char _ = 0;
49 int nsfd;
50
51 atexit(f);
52
53 /* Check for priviledges and syscall availability straight away. */
54 if (unshare(CLONE_NEWNET) == -1) {
55 if (errno == ENOSYS || errno == EPERM) {
56 return 4;
57 }
58 return 1;
59 }
60 /* Distinguisher between two otherwise empty net namespaces. */
61 if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
62 return 1;
63 }
64
65 if (pipe(fd) == -1) {
66 return 1;
67 }
68
69 pid = fork();
70 if (pid == -1) {
71 return 1;
72 }
73
74 if (pid == 0) {
75 if (unshare(CLONE_NEWNET) == -1) {
76 return 1;
77 }
78
79 if (write(fd[1], &_, 1) != 1) {
80 return 1;
81 }
82
83 pause();
84
85 return 0;
86 }
87
88 if (read(fd[0], &_, 1) != 1) {
89 return 1;
90 }
91
92 {
93 char buf[64];
94 snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
95 nsfd = open(buf, O_RDONLY);
96 if (nsfd == -1) {
97 return 1;
98 }
99 }
100
101 /* Reliably pin dentry into dcache. */
102 (void)open("/proc/net/unix", O_RDONLY);
103
104 if (setns(nsfd, CLONE_NEWNET) == -1) {
105 return 1;
106 }
107
108 kill(pid, SIGTERM);
109 pid = 0;
110
111 {
112 char buf[4096];
113 ssize_t rv;
114 int fd;
115
116 fd = open("/proc/net/unix", O_RDONLY);
117 if (fd == -1) {
118 return 1;
119 }
120
121#define S "Num RefCount Protocol Flags Type St Inode Path\n"
122 rv = read(fd, buf, sizeof(buf));
123
124 assert(rv == strlen(S));
125 assert(memcmp(buf, S, strlen(S)) == 0);
126 }
127
128 return 0;
129}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 496a9a8c773a..7e632b465ab4 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
1608#ifdef SYSCALL_NUM_RET_SHARE_REG 1608#ifdef SYSCALL_NUM_RET_SHARE_REG
1609# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1609# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
1610#else 1610#else
1611# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) 1611# define EXPECT_SYSCALL_RETURN(val, action) \
1612 do { \
1613 errno = 0; \
1614 if (val < 0) { \
1615 EXPECT_EQ(-1, action); \
1616 EXPECT_EQ(-(val), errno); \
1617 } else { \
1618 EXPECT_EQ(val, action); \
1619 } \
1620 } while (0)
1612#endif 1621#endif
1613 1622
1614/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1623/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
1647 1656
1648/* Architecture-specific syscall changing routine. */ 1657/* Architecture-specific syscall changing routine. */
1649void change_syscall(struct __test_metadata *_metadata, 1658void change_syscall(struct __test_metadata *_metadata,
1650 pid_t tracee, int syscall) 1659 pid_t tracee, int syscall, int result)
1651{ 1660{
1652 int ret; 1661 int ret;
1653 ARCH_REGS regs; 1662 ARCH_REGS regs;
@@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata,
1706#ifdef SYSCALL_NUM_RET_SHARE_REG 1715#ifdef SYSCALL_NUM_RET_SHARE_REG
1707 TH_LOG("Can't modify syscall return on this architecture"); 1716 TH_LOG("Can't modify syscall return on this architecture");
1708#else 1717#else
1709 regs.SYSCALL_RET = EPERM; 1718 regs.SYSCALL_RET = result;
1710#endif 1719#endif
1711 1720
1712#ifdef HAVE_GETREGS 1721#ifdef HAVE_GETREGS
@@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
1734 case 0x1002: 1743 case 0x1002:
1735 /* change getpid to getppid. */ 1744 /* change getpid to getppid. */
1736 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1745 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
1737 change_syscall(_metadata, tracee, __NR_getppid); 1746 change_syscall(_metadata, tracee, __NR_getppid, 0);
1738 break; 1747 break;
1739 case 0x1003: 1748 case 0x1003:
1740 /* skip gettid. */ 1749 /* skip gettid with valid return code. */
1741 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 1750 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
1742 change_syscall(_metadata, tracee, -1); 1751 change_syscall(_metadata, tracee, -1, 45000);
1743 break; 1752 break;
1744 case 0x1004: 1753 case 0x1004:
1754 /* skip openat with error. */
1755 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
1756 change_syscall(_metadata, tracee, -1, -ESRCH);
1757 break;
1758 case 0x1005:
1745 /* do nothing (allow getppid) */ 1759 /* do nothing (allow getppid) */
1746 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 1760 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
1747 break; 1761 break;
@@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
1774 nr = get_syscall(_metadata, tracee); 1788 nr = get_syscall(_metadata, tracee);
1775 1789
1776 if (nr == __NR_getpid) 1790 if (nr == __NR_getpid)
1777 change_syscall(_metadata, tracee, __NR_getppid); 1791 change_syscall(_metadata, tracee, __NR_getppid, 0);
1792 if (nr == __NR_gettid)
1793 change_syscall(_metadata, tracee, -1, 45000);
1778 if (nr == __NR_openat) 1794 if (nr == __NR_openat)
1779 change_syscall(_metadata, tracee, -1); 1795 change_syscall(_metadata, tracee, -1, -ESRCH);
1780} 1796}
1781 1797
1782FIXTURE_DATA(TRACE_syscall) { 1798FIXTURE_DATA(TRACE_syscall) {
@@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall)
1793 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 1809 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
1794 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 1810 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
1795 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 1811 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
1796 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1812 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
1797 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 1813 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
1814 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1815 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
1798 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1816 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1799 }; 1817 };
1800 1818
@@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
1842 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1860 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1843} 1861}
1844 1862
1845TEST_F(TRACE_syscall, ptrace_syscall_dropped) 1863TEST_F(TRACE_syscall, ptrace_syscall_errno)
1864{
1865 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1866 teardown_trace_fixture(_metadata, self->tracer);
1867 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1868 true);
1869
1870 /* Tracer should skip the open syscall, resulting in ESRCH. */
1871 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1872}
1873
1874TEST_F(TRACE_syscall, ptrace_syscall_faked)
1846{ 1875{
1847 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1876 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1848 teardown_trace_fixture(_metadata, self->tracer); 1877 teardown_trace_fixture(_metadata, self->tracer);
1849 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1878 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1850 true); 1879 true);
1851 1880
1852 /* Tracer should skip the open syscall, resulting in EPERM. */ 1881 /* Tracer should skip the gettid syscall, resulting fake pid. */
1853 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); 1882 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1854} 1883}
1855 1884
1856TEST_F(TRACE_syscall, syscall_allowed) 1885TEST_F(TRACE_syscall, syscall_allowed)
@@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
1883 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1912 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1884} 1913}
1885 1914
1886TEST_F(TRACE_syscall, syscall_dropped) 1915TEST_F(TRACE_syscall, syscall_errno)
1916{
1917 long ret;
1918
1919 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1920 ASSERT_EQ(0, ret);
1921
1922 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1923 ASSERT_EQ(0, ret);
1924
1925 /* openat has been skipped and an errno return. */
1926 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1927}
1928
1929TEST_F(TRACE_syscall, syscall_faked)
1887{ 1930{
1888 long ret; 1931 long ret;
1889 1932
@@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
1894 ASSERT_EQ(0, ret); 1937 ASSERT_EQ(0, ret);
1895 1938
1896 /* gettid has been skipped and an altered return value stored. */ 1939 /* gettid has been skipped and an altered return value stored. */
1897 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); 1940 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1898 EXPECT_NE(self->mytid, syscall(__NR_gettid));
1899} 1941}
1900 1942
1901TEST_F(TRACE_syscall, skip_after_RET_TRACE) 1943TEST_F(TRACE_syscall, skip_after_RET_TRACE)
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683cfb6c9..7656c7ce79d9 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2CFLAGS += -O3 -Wl,-no-as-needed -Wall 2CFLAGS += -O3 -Wl,-no-as-needed -Wall
3LDFLAGS += -lrt -lpthread -lm 3LDLIBS += -lrt -lpthread -lm
4 4
5# these are all "safe" tests that don't modify 5# these are all "safe" tests that don't modify
6# system time or require escalated privileges 6# system time or require escalated privileges
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5ecea812cb6a..585845203db8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3000,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
3000 if (ops->init) 3000 if (ops->init)
3001 ops->init(dev); 3001 ops->init(dev);
3002 3002
3003 kvm_get_kvm(kvm);
3003 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3004 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
3004 if (ret < 0) { 3005 if (ret < 0) {
3006 kvm_put_kvm(kvm);
3005 mutex_lock(&kvm->lock); 3007 mutex_lock(&kvm->lock);
3006 list_del(&dev->vm_node); 3008 list_del(&dev->vm_node);
3007 mutex_unlock(&kvm->lock); 3009 mutex_unlock(&kvm->lock);
@@ -3009,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
3009 return ret; 3011 return ret;
3010 } 3012 }
3011 3013
3012 kvm_get_kvm(kvm);
3013 cd->fd = ret; 3014 cd->fd = ret;
3014 return 0; 3015 return 0;
3015} 3016}