summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/networking/tls-offload.rst4
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi8
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ev1.dts13
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts1
-rw-r--r--arch/arm/mach-sunxi/mc_smp.c6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi2
-rw-r--r--arch/arm64/include/asm/pgtable.h17
-rw-r--r--arch/arm64/include/asm/vdso/vsyscall.h7
-rw-r--r--arch/mips/include/asm/vdso/vsyscall.h7
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c13
-rw-r--r--arch/x86/kernel/apic/apic.c28
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c7
-rw-r--r--arch/x86/kernel/tsc.c3
-rw-r--r--block/blk-cgroup.c13
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/clk/at91/clk-main.c5
-rw-r--r--drivers/clk/at91/sam9x60.c1
-rw-r--r--drivers/clk/at91/sckc.c20
-rw-r--r--drivers/clk/clk-ast2600.c7
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c2
-rw-r--r--drivers/clk/meson/g12a.c13
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c27
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c4
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c6
-rw-r--r--drivers/clk/ti/clkctrl.c5
-rw-r--r--drivers/clocksource/sh_mtu2.c16
-rw-r--r--drivers/clocksource/timer-mediatek.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c4
-rw-r--r--drivers/gpio/gpio-merrifield.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c15
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c4
-rw-r--r--drivers/hid/wacom.h15
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hwtracing/intel_th/gth.c3
-rw-r--r--drivers/hwtracing/intel_th/msu.c11
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/iio/adc/stm32-adc.c4
-rw-r--r--drivers/iio/imu/adis16480.c5
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c9
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c15
-rw-r--r--drivers/iio/proximity/srf04.c29
-rw-r--r--drivers/interconnect/core.c4
-rw-r--r--drivers/interconnect/qcom/qcs404.c3
-rw-r--r--drivers/interconnect/qcom/sdm845.c3
-rw-r--r--drivers/net/bonding/bond_main.c44
-rw-r--r--drivers/net/can/c_can/c_can.c71
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/dev.c1
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/rx-offload.c102
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/ti_hecc.c232
-rw-r--r--drivers/net/can/usb/gs_usb.c1
-rw-r--r--drivers/net/can/usb/mcba_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c32
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/can/xilinx_can.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c35
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c145
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c134
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/core.c1
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/rdma.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c21
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c14
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c1
-rw-r--r--drivers/reset/core.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c8
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/sd_zbc.c29
-rw-r--r--drivers/soc/imx/gpc.c8
-rw-r--r--drivers/soundwire/Kconfig1
-rw-r--r--drivers/soundwire/intel.c4
-rw-r--r--drivers/soundwire/slave.c3
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/vboxsf/Kconfig10
-rw-r--r--drivers/staging/vboxsf/Makefile5
-rw-r--r--drivers/staging/vboxsf/TODO7
-rw-r--r--drivers/staging/vboxsf/dir.c418
-rw-r--r--drivers/staging/vboxsf/file.c370
-rw-r--r--drivers/staging/vboxsf/shfl_hostintf.h901
-rw-r--r--drivers/staging/vboxsf/super.c501
-rw-r--r--drivers/staging/vboxsf/utils.c551
-rw-r--r--drivers/staging/vboxsf/vboxsf_wrappers.c371
-rw-r--r--drivers/staging/vboxsf/vfsmod.h137
-rw-r--r--drivers/thunderbolt/nhi_ops.c1
-rw-r--r--drivers/thunderbolt/switch.c28
-rw-r--r--drivers/video/fbdev/c2p_core.h8
-rw-r--r--drivers/watchdog/bd70528_wdt.c1
-rw-r--r--drivers/watchdog/cpwd.c8
-rw-r--r--drivers/watchdog/imx_sc_wdt.c8
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c4
-rw-r--r--drivers/watchdog/pm8916_wdt.c15
-rw-r--r--fs/btrfs/inode.c15
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/space-info.c21
-rw-r--r--fs/btrfs/tree-checker.c8
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/ceph/caps.c10
-rw-r--r--fs/ceph/dir.c15
-rw-r--r--fs/ceph/file.c15
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/ceph/super.c11
-rw-r--r--fs/cifs/smb2pdu.h1
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/fs-writeback.c9
-rw-r--r--fs/ocfs2/file.c134
-rw-r--r--include/asm-generic/vdso/vsyscall.h7
-rw-r--r--include/drm/drm_gem_shmem_helper.h13
-rw-r--r--include/drm/drm_self_refresh_helper.h3
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/mm.h5
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/page-flags.h20
-rw-r--r--include/linux/radix-tree.h18
-rw-r--r--include/linux/reset-controller.h4
-rw-r--r--include/linux/reset.h2
-rw-r--r--include/linux/skmsg.h9
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/neighbour.h4
-rw-r--r--include/net/netfilter/nf_tables.h3
-rw-r--r--include/net/sch_generic.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tls.h5
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/can/bcm.h2
-rw-r--r--include/uapi/linux/can/error.h2
-rw-r--r--include/uapi/linux/can/gw.h2
-rw-r--r--include/uapi/linux/can/j1939.h2
-rw-r--r--include/uapi/linux/can/netlink.h2
-rw-r--r--include/uapi/linux/can/raw.h2
-rw-r--r--include/uapi/linux/can/vxcan.h2
-rw-r--r--include/uapi/linux/nvme_ioctl.h1
-rw-r--r--include/uapi/linux/sched.h4
-rw-r--r--kernel/bpf/cgroup.c4
-rw-r--r--kernel/bpf/syscall.c7
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/sched/core.c23
-rw-r--r--kernel/sched/deadline.c40
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/sched/idle.c9
-rw-r--r--kernel/sched/rt.c37
-rw-r--r--kernel/sched/sched.h30
-rw-r--r--kernel/sched/stop_task.c18
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/stacktrace.c6
-rw-r--r--kernel/time/vsyscall.c9
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/dump_stack.c7
-rw-r--r--lib/idr.c31
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/test_xarray.c24
-rw-r--r--lib/xarray.c4
-rw-r--r--mm/khugepaged.c7
-rw-r--r--mm/memcontrol.c23
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/slab.h4
-rw-r--r--mm/vmstat.c25
-rw-r--r--net/bridge/netfilter/ebt_dnat.c19
-rw-r--r--net/can/j1939/socket.c9
-rw-r--r--net/can/j1939/transport.c20
-rw-r--r--net/core/skmsg.c20
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c1
-rw-r--r--net/netfilter/nf_tables_api.c7
-rw-r--r--net/netfilter/nf_tables_offload.c3
-rw-r--r--net/netfilter/nft_bitwise.c5
-rw-r--r--net/netfilter/nft_cmp.c2
-rw-r--r--net/nfc/netlink.c2
-rw-r--r--net/sched/cls_api.c83
-rw-r--r--net/sched/sch_taprio.c5
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/tls/tls_device.c10
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/tls/tls_sw.c30
-rw-r--r--net/vmw_vsock/virtio_transport_common.c8
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--scripts/gdb/linux/symbols.py3
-rw-r--r--scripts/nsdeps6
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/core/timer.c6
-rw-r--r--sound/firewire/bebob/bebob_focusrite.c3
-rw-r--r--sound/pci/hda/patch_ca0132.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c13
-rw-r--r--sound/soc/codecs/hdac_hda.c2
-rw-r--r--sound/soc/codecs/hdmi-codec.c12
-rw-r--r--sound/soc/codecs/max98373.c4
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c4
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c11
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c7
-rw-r--r--sound/soc/sh/rcar/dma.c4
-rw-r--r--sound/soc/sof/debug.c6
-rw-r--r--sound/soc/sof/intel/hda-stream.c4
-rw-r--r--sound/soc/sof/ipc.c4
-rw-r--r--sound/soc/sof/topology.c11
-rw-r--r--sound/soc/stm/stm32_sai_sub.c12
-rw-r--r--sound/soc/ti/sdma-pcm.c2
-rw-r--r--tools/gpio/Makefile6
-rw-r--r--tools/perf/perf-sys.h6
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c8
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c9
-rw-r--r--tools/perf/util/trace-event-parse.c31
-rw-r--r--tools/perf/util/trace-event.h2
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c8
-rw-r--r--tools/testing/selftests/net/tls.c108
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c2
299 files changed, 5508 insertions, 1090 deletions
diff --git a/.mailmap b/.mailmap
index 83d7e750c2fc..fd6219293057 100644
--- a/.mailmap
+++ b/.mailmap
@@ -108,6 +108,10 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com>
108Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com> 108Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
109Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> 109Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
110<javier@osg.samsung.com> <javier.martinez@collabora.co.uk> 110<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
111Jayachandran C <c.jayachandran@gmail.com> <jayachandranc@netlogicmicro.com>
112Jayachandran C <c.jayachandran@gmail.com> <jchandra@broadcom.com>
113Jayachandran C <c.jayachandran@gmail.com> <jchandra@digeo.com>
114Jayachandran C <c.jayachandran@gmail.com> <jnair@caviumnetworks.com>
111Jean Tourrilhes <jt@hpl.hp.com> 115Jean Tourrilhes <jt@hpl.hp.com>
112<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com> 116<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
113Jeff Garzik <jgarzik@pretzel.yyz.us> 117Jeff Garzik <jgarzik@pretzel.yyz.us>
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index 0dd3f748239f..f914e81fd3a6 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -436,6 +436,10 @@ by the driver:
436 encryption. 436 encryption.
437 * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream 437 * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
438 but did not arrive in the expected order. 438 but did not arrive in the expected order.
439 * ``tx_tls_skip_no_sync_data`` - number of TX packets which were part of
440 a TLS stream and arrived out-of-order, but skipped the HW offload routine
441 and went to the regular transmit flow as they were retransmissions of the
442 connection handshake.
439 * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of 443 * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
440 a TLS stream dropped, because they arrived out of order and associated 444 a TLS stream dropped, because they arrived out of order and associated
441 record could not be found. 445 record could not be found.
diff --git a/MAINTAINERS b/MAINTAINERS
index cba1095547fd..eb19fad370d7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3053,6 +3053,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
3053R: Martin KaFai Lau <kafai@fb.com> 3053R: Martin KaFai Lau <kafai@fb.com>
3054R: Song Liu <songliubraving@fb.com> 3054R: Song Liu <songliubraving@fb.com>
3055R: Yonghong Song <yhs@fb.com> 3055R: Yonghong Song <yhs@fb.com>
3056R: Andrii Nakryiko <andriin@fb.com>
3056L: netdev@vger.kernel.org 3057L: netdev@vger.kernel.org
3057L: bpf@vger.kernel.org 3058L: bpf@vger.kernel.org
3058T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git 3059T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@@ -3737,7 +3738,6 @@ F: drivers/crypto/cavium/cpt/
3737 3738
3738CAVIUM THUNDERX2 ARM64 SOC 3739CAVIUM THUNDERX2 ARM64 SOC
3739M: Robert Richter <rrichter@cavium.com> 3740M: Robert Richter <rrichter@cavium.com>
3740M: Jayachandran C <jnair@caviumnetworks.com>
3741L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 3741L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
3742S: Maintained 3742S: Maintained
3743F: arch/arm64/boot/dts/cavium/thunder2-99xx* 3743F: arch/arm64/boot/dts/cavium/thunder2-99xx*
@@ -10519,8 +10519,12 @@ F: mm/memblock.c
10519F: Documentation/core-api/boot-time-mm.rst 10519F: Documentation/core-api/boot-time-mm.rst
10520 10520
10521MEMORY MANAGEMENT 10521MEMORY MANAGEMENT
10522M: Andrew Morton <akpm@linux-foundation.org>
10522L: linux-mm@kvack.org 10523L: linux-mm@kvack.org
10523W: http://www.linux-mm.org 10524W: http://www.linux-mm.org
10525T: quilt https://ozlabs.org/~akpm/mmotm/
10526T: quilt https://ozlabs.org/~akpm/mmots/
10527T: git git://github.com/hnaz/linux-mm.git
10524S: Maintained 10528S: Maintained
10525F: include/linux/mm.h 10529F: include/linux/mm.h
10526F: include/linux/gfp.h 10530F: include/linux/gfp.h
@@ -17334,6 +17338,12 @@ F: include/linux/vbox_utils.h
17334F: include/uapi/linux/vbox*.h 17338F: include/uapi/linux/vbox*.h
17335F: drivers/virt/vboxguest/ 17339F: drivers/virt/vboxguest/
17336 17340
17341VIRTUAL BOX SHARED FOLDER VFS DRIVER:
17342M: Hans de Goede <hdegoede@redhat.com>
17343L: linux-fsdevel@vger.kernel.org
17344S: Maintained
17345F: drivers/staging/vboxsf/*
17346
17337VIRTUAL SERIO DEVICE DRIVER 17347VIRTUAL SERIO DEVICE DRIVER
17338M: Stephen Chandler Paul <thatslyude@gmail.com> 17348M: Stephen Chandler Paul <thatslyude@gmail.com>
17339S: Maintained 17349S: Maintained
@@ -18034,6 +18044,7 @@ F: Documentation/vm/zsmalloc.rst
18034ZSWAP COMPRESSED SWAP CACHING 18044ZSWAP COMPRESSED SWAP CACHING
18035M: Seth Jennings <sjenning@redhat.com> 18045M: Seth Jennings <sjenning@redhat.com>
18036M: Dan Streetman <ddstreet@ieee.org> 18046M: Dan Streetman <ddstreet@ieee.org>
18047M: Vitaly Wool <vitaly.wool@konsulko.com>
18037L: linux-mm@kvack.org 18048L: linux-mm@kvack.org
18038S: Maintained 18049S: Maintained
18039F: mm/zswap.c 18050F: mm/zswap.c
diff --git a/Makefile b/Makefile
index b37d0e8fc61d..1d5298356ea8 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 4 3PATCHLEVEL = 4
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Kleptomaniac Octopus 6NAME = Kleptomaniac Octopus
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
index 2a6ce87071f9..9e027b9a5f91 100644
--- a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
+++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
@@ -328,6 +328,10 @@
328 pinctrl-0 = <&pinctrl_pwm3>; 328 pinctrl-0 = <&pinctrl_pwm3>;
329}; 329};
330 330
331&snvs_pwrkey {
332 status = "okay";
333};
334
331&ssi2 { 335&ssi2 {
332 status = "okay"; 336 status = "okay";
333}; 337};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index f3404dd10537..cf628465cd0a 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -230,6 +230,8 @@
230 accelerometer@1c { 230 accelerometer@1c {
231 compatible = "fsl,mma8451"; 231 compatible = "fsl,mma8451";
232 reg = <0x1c>; 232 reg = <0x1c>;
233 pinctrl-names = "default";
234 pinctrl-0 = <&pinctrl_mma8451_int>;
233 interrupt-parent = <&gpio6>; 235 interrupt-parent = <&gpio6>;
234 interrupts = <31 IRQ_TYPE_LEVEL_LOW>; 236 interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
235 }; 237 };
@@ -628,6 +630,12 @@
628 >; 630 >;
629 }; 631 };
630 632
633 pinctrl_mma8451_int: mma8451intgrp {
634 fsl,pins = <
635 MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0xb0b1
636 >;
637 };
638
631 pinctrl_pwm3: pwm1grp { 639 pinctrl_pwm3: pwm1grp {
632 fsl,pins = < 640 fsl,pins = <
633 MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1 641 MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index 89d29b50c3f4..91fc0a315c49 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -183,14 +183,12 @@
183 183
184 ov5640: camera@3c { 184 ov5640: camera@3c {
185 compatible = "ovti,ov5640"; 185 compatible = "ovti,ov5640";
186 pinctrl-names = "default";
187 pinctrl-0 = <&ov5640_pins>;
188 reg = <0x3c>; 186 reg = <0x3c>;
189 clocks = <&clk_ext_camera>; 187 clocks = <&clk_ext_camera>;
190 clock-names = "xclk"; 188 clock-names = "xclk";
191 DOVDD-supply = <&v2v8>; 189 DOVDD-supply = <&v2v8>;
192 powerdown-gpios = <&stmfx_pinctrl 18 GPIO_ACTIVE_HIGH>; 190 powerdown-gpios = <&stmfx_pinctrl 18 (GPIO_ACTIVE_HIGH | GPIO_PUSH_PULL)>;
193 reset-gpios = <&stmfx_pinctrl 19 GPIO_ACTIVE_LOW>; 191 reset-gpios = <&stmfx_pinctrl 19 (GPIO_ACTIVE_LOW | GPIO_PUSH_PULL)>;
194 rotation = <180>; 192 rotation = <180>;
195 status = "okay"; 193 status = "okay";
196 194
@@ -223,15 +221,8 @@
223 221
224 joystick_pins: joystick { 222 joystick_pins: joystick {
225 pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4"; 223 pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
226 drive-push-pull;
227 bias-pull-down; 224 bias-pull-down;
228 }; 225 };
229
230 ov5640_pins: camera {
231 pins = "agpio2", "agpio3"; /* stmfx pins 18 & 19 */
232 drive-push-pull;
233 output-low;
234 };
235 }; 226 };
236 }; 227 };
237}; 228};
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 9b11654a0a39..f98e0370c0bc 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -932,7 +932,7 @@
932 interrupt-names = "int0", "int1"; 932 interrupt-names = "int0", "int1";
933 clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>; 933 clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
934 clock-names = "hclk", "cclk"; 934 clock-names = "hclk", "cclk";
935 bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>; 935 bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>;
936 status = "disabled"; 936 status = "disabled";
937 }; 937 };
938 938
@@ -945,7 +945,7 @@
945 interrupt-names = "int0", "int1"; 945 interrupt-names = "int0", "int1";
946 clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>; 946 clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>;
947 clock-names = "hclk", "cclk"; 947 clock-names = "hclk", "cclk";
948 bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>; 948 bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>;
949 status = "disabled"; 949 status = "disabled";
950 }; 950 };
951 951
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 568b90ece342..3bec3e0a81b2 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -192,6 +192,7 @@
192 vqmmc-supply = <&reg_dldo1>; 192 vqmmc-supply = <&reg_dldo1>;
193 non-removable; 193 non-removable;
194 wakeup-source; 194 wakeup-source;
195 keep-power-in-suspend;
195 status = "okay"; 196 status = "okay";
196 197
197 brcmf: wifi@1 { 198 brcmf: wifi@1 {
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
index 239084cf8192..26cbce135338 100644
--- a/arch/arm/mach-sunxi/mc_smp.c
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -481,14 +481,18 @@ static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
481static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) 481static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
482{ 482{
483 u32 reg; 483 u32 reg;
484 int gating_bit = cpu;
484 485
485 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); 486 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
486 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) 487 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
487 return -EINVAL; 488 return -EINVAL;
488 489
490 if (is_a83t && cpu == 0)
491 gating_bit = 4;
492
489 /* gate processor power */ 493 /* gate processor power */
490 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); 494 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
491 reg |= PRCM_PWROFF_GATING_REG_CORE(cpu); 495 reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit);
492 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); 496 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
493 udelay(20); 497 udelay(20);
494 498
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index d98346da01df..078a5010228c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -127,7 +127,7 @@
127 status = "okay"; 127 status = "okay";
128 128
129 i2c-mux@77 { 129 i2c-mux@77 {
130 compatible = "nxp,pca9847"; 130 compatible = "nxp,pca9547";
131 reg = <0x77>; 131 reg = <0x77>;
132 #address-cells = <1>; 132 #address-cells = <1>;
133 #size-cells = <0>; 133 #size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 58b8cd06cae7..23c8fad7932b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -394,7 +394,7 @@
394 }; 394 };
395 395
396 sdma2: dma-controller@302c0000 { 396 sdma2: dma-controller@302c0000 {
397 compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma"; 397 compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
398 reg = <0x302c0000 0x10000>; 398 reg = <0x302c0000 0x10000>;
399 interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; 399 interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
400 clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>, 400 clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>,
@@ -405,7 +405,7 @@
405 }; 405 };
406 406
407 sdma3: dma-controller@302b0000 { 407 sdma3: dma-controller@302b0000 {
408 compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma"; 408 compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
409 reg = <0x302b0000 0x10000>; 409 reg = <0x302b0000 0x10000>;
410 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; 410 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
411 clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>, 411 clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>,
@@ -737,7 +737,7 @@
737 }; 737 };
738 738
739 sdma1: dma-controller@30bd0000 { 739 sdma1: dma-controller@30bd0000 {
740 compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma"; 740 compatible = "fsl,imx8mm-sdma", "fsl,imx8mq-sdma";
741 reg = <0x30bd0000 0x10000>; 741 reg = <0x30bd0000 0x10000>;
742 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; 742 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
743 clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>, 743 clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 98496f570720..43c4db312146 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -288,7 +288,7 @@
288 }; 288 };
289 289
290 sdma3: dma-controller@302b0000 { 290 sdma3: dma-controller@302b0000 {
291 compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma"; 291 compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
292 reg = <0x302b0000 0x10000>; 292 reg = <0x302b0000 0x10000>;
293 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; 293 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
294 clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>, 294 clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>,
@@ -299,7 +299,7 @@
299 }; 299 };
300 300
301 sdma2: dma-controller@302c0000 { 301 sdma2: dma-controller@302c0000 {
302 compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma"; 302 compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
303 reg = <0x302c0000 0x10000>; 303 reg = <0x302c0000 0x10000>;
304 interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; 304 interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
305 clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>, 305 clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>,
@@ -612,7 +612,7 @@
612 }; 612 };
613 613
614 sdma1: dma-controller@30bd0000 { 614 sdma1: dma-controller@30bd0000 {
615 compatible = "fsl,imx8mn-sdma", "fsl,imx7d-sdma"; 615 compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma";
616 reg = <0x30bd0000 0x10000>; 616 reg = <0x30bd0000 0x10000>;
617 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; 617 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
618 clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>, 618 clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 087b5b6ebe89..32ce14936b01 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -88,7 +88,7 @@
88 regulator-name = "0V9_ARM"; 88 regulator-name = "0V9_ARM";
89 regulator-min-microvolt = <900000>; 89 regulator-min-microvolt = <900000>;
90 regulator-max-microvolt = <1000000>; 90 regulator-max-microvolt = <1000000>;
91 gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; 91 gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
92 states = <1000000 0x1 92 states = <1000000 0x1
93 900000 0x0>; 93 900000 0x0>;
94 regulator-always-on; 94 regulator-always-on;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 8330810f699e..565aa45ef134 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -283,23 +283,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
283 set_pte(ptep, pte); 283 set_pte(ptep, pte);
284} 284}
285 285
286#define __HAVE_ARCH_PTE_SAME
287static inline int pte_same(pte_t pte_a, pte_t pte_b)
288{
289 pteval_t lhs, rhs;
290
291 lhs = pte_val(pte_a);
292 rhs = pte_val(pte_b);
293
294 if (pte_present(pte_a))
295 lhs &= ~PTE_RDONLY;
296
297 if (pte_present(pte_b))
298 rhs &= ~PTE_RDONLY;
299
300 return (lhs == rhs);
301}
302
303/* 286/*
304 * Huge pte definitions. 287 * Huge pte definitions.
305 */ 288 */
diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h
index 0c731bfc7c8c..0c20a7c1bee5 100644
--- a/arch/arm64/include/asm/vdso/vsyscall.h
+++ b/arch/arm64/include/asm/vdso/vsyscall.h
@@ -31,13 +31,6 @@ int __arm64_get_clock_mode(struct timekeeper *tk)
31#define __arch_get_clock_mode __arm64_get_clock_mode 31#define __arch_get_clock_mode __arm64_get_clock_mode
32 32
33static __always_inline 33static __always_inline
34int __arm64_use_vsyscall(struct vdso_data *vdata)
35{
36 return !vdata[CS_HRES_COARSE].clock_mode;
37}
38#define __arch_use_vsyscall __arm64_use_vsyscall
39
40static __always_inline
41void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) 34void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
42{ 35{
43 vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK; 36 vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h
index 195314732233..00d41b94ba31 100644
--- a/arch/mips/include/asm/vdso/vsyscall.h
+++ b/arch/mips/include/asm/vdso/vsyscall.h
@@ -28,13 +28,6 @@ int __mips_get_clock_mode(struct timekeeper *tk)
28} 28}
29#define __arch_get_clock_mode __mips_get_clock_mode 29#define __arch_get_clock_mode __mips_get_clock_mode
30 30
31static __always_inline
32int __mips_use_vsyscall(struct vdso_data *vdata)
33{
34 return (vdata[CS_HRES_COARSE].clock_mode != VDSO_CLOCK_NONE);
35}
36#define __arch_use_vsyscall __mips_use_vsyscall
37
38/* The asm-generic header needs to be included after the definitions above */ 31/* The asm-generic header needs to be included after the definitions above */
39#include <asm-generic/vdso/vsyscall.h> 32#include <asm-generic/vdso/vsyscall.h>
40 33
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 02a59946a78a..be3517ef0574 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -1142,6 +1142,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1142 } 1142 }
1143 1143
1144 /* 1144 /*
1145 * If we have seen a tail call, we need a second pass.
1146 * This is because bpf_jit_emit_common_epilogue() is called
1147 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
1148 */
1149 if (cgctx.seen & SEEN_TAILCALL) {
1150 cgctx.idx = 0;
1151 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1152 fp = org_fp;
1153 goto out_addrs;
1154 }
1155 }
1156
1157 /*
1145 * Pretend to build prologue, given the features we've seen. This will 1158 * Pretend to build prologue, given the features we've seen. This will
1146 * update ctgtx.idx as it pretends to output instructions, then we can 1159 * update ctgtx.idx as it pretends to output instructions, then we can
1147 * calculate total size from idx. 1160 * calculate total size from idx.
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 9e2dd2b296cd..2b0faf86da1b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1586,9 +1586,6 @@ static void setup_local_APIC(void)
1586{ 1586{
1587 int cpu = smp_processor_id(); 1587 int cpu = smp_processor_id();
1588 unsigned int value; 1588 unsigned int value;
1589#ifdef CONFIG_X86_32
1590 int logical_apicid, ldr_apicid;
1591#endif
1592 1589
1593 if (disable_apic) { 1590 if (disable_apic) {
1594 disable_ioapic_support(); 1591 disable_ioapic_support();
@@ -1626,16 +1623,21 @@ static void setup_local_APIC(void)
1626 apic->init_apic_ldr(); 1623 apic->init_apic_ldr();
1627 1624
1628#ifdef CONFIG_X86_32 1625#ifdef CONFIG_X86_32
1629 /* 1626 if (apic->dest_logical) {
1630 * APIC LDR is initialized. If logical_apicid mapping was 1627 int logical_apicid, ldr_apicid;
1631 * initialized during get_smp_config(), make sure it matches the 1628
1632 * actual value. 1629 /*
1633 */ 1630 * APIC LDR is initialized. If logical_apicid mapping was
1634 logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); 1631 * initialized during get_smp_config(), make sure it matches
1635 ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); 1632 * the actual value.
1636 WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid); 1633 */
1637 /* always use the value from LDR */ 1634 logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1638 early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid; 1635 ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1636 if (logical_apicid != BAD_APICID)
1637 WARN_ON(logical_apicid != ldr_apicid);
1638 /* Always use the value from LDR. */
1639 early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
1640 }
1639#endif 1641#endif
1640 1642
1641 /* 1643 /*
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index efbd54cc4e69..055c8613b531 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -522,6 +522,10 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
522 int ret = 0; 522 int ret = 0;
523 523
524 rdtgrp = rdtgroup_kn_lock_live(of->kn); 524 rdtgrp = rdtgroup_kn_lock_live(of->kn);
525 if (!rdtgrp) {
526 ret = -ENOENT;
527 goto out;
528 }
525 529
526 md.priv = of->kn->priv; 530 md.priv = of->kn->priv;
527 resid = md.u.rid; 531 resid = md.u.rid;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 753b8cfe8b8a..87b97897a881 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -94,6 +94,13 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
94 BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); 94 BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
95 95
96 begin = (unsigned long)__this_cpu_read(cea_exception_stacks); 96 begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
97 /*
98 * Handle the case where stack trace is collected _before_
99 * cea_exception_stacks had been initialized.
100 */
101 if (!begin)
102 return false;
103
97 end = begin + sizeof(struct cea_exception_stacks); 104 end = begin + sizeof(struct cea_exception_stacks);
98 /* Bail if @stack is outside the exception stack area. */ 105 /* Bail if @stack is outside the exception stack area. */
99 if (stk < begin || stk >= end) 106 if (stk < begin || stk >= end)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index c59454c382fd..7e322e2daaf5 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1505,6 +1505,9 @@ void __init tsc_init(void)
1505 return; 1505 return;
1506 } 1506 }
1507 1507
1508 if (tsc_clocksource_reliable || no_tsc_watchdog)
1509 clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1510
1508 clocksource_register_khz(&clocksource_tsc_early, tsc_khz); 1511 clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
1509 detect_art(); 1512 detect_art();
1510} 1513}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5d21027b1faf..1eb8895be4c6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -934,9 +934,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
934 int i; 934 int i;
935 bool has_stats = false; 935 bool has_stats = false;
936 936
937 spin_lock_irq(&blkg->q->queue_lock);
938
939 if (!blkg->online)
940 goto skip;
941
937 dname = blkg_dev_name(blkg); 942 dname = blkg_dev_name(blkg);
938 if (!dname) 943 if (!dname)
939 continue; 944 goto skip;
940 945
941 /* 946 /*
942 * Hooray string manipulation, count is the size written NOT 947 * Hooray string manipulation, count is the size written NOT
@@ -946,8 +951,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
946 */ 951 */
947 off += scnprintf(buf+off, size-off, "%s ", dname); 952 off += scnprintf(buf+off, size-off, "%s ", dname);
948 953
949 spin_lock_irq(&blkg->q->queue_lock);
950
951 blkg_rwstat_recursive_sum(blkg, NULL, 954 blkg_rwstat_recursive_sum(blkg, NULL,
952 offsetof(struct blkcg_gq, stat_bytes), &rwstat); 955 offsetof(struct blkcg_gq, stat_bytes), &rwstat);
953 rbytes = rwstat.cnt[BLKG_RWSTAT_READ]; 956 rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
@@ -960,8 +963,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
960 wios = rwstat.cnt[BLKG_RWSTAT_WRITE]; 963 wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
961 dios = rwstat.cnt[BLKG_RWSTAT_DISCARD]; 964 dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
962 965
963 spin_unlock_irq(&blkg->q->queue_lock);
964
965 if (rbytes || wbytes || rios || wios) { 966 if (rbytes || wbytes || rios || wios) {
966 has_stats = true; 967 has_stats = true;
967 off += scnprintf(buf+off, size-off, 968 off += scnprintf(buf+off, size-off,
@@ -999,6 +1000,8 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
999 seq_commit(sf, -1); 1000 seq_commit(sf, -1);
1000 } 1001 }
1001 } 1002 }
1003 skip:
1004 spin_unlock_irq(&blkg->q->queue_lock);
1002 } 1005 }
1003 1006
1004 rcu_read_unlock(); 1007 rcu_read_unlock();
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5b248763a672..a18155cdce41 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
786 786
787 if (nc->tentative && connection->agreed_pro_version < 92) { 787 if (nc->tentative && connection->agreed_pro_version < 92) {
788 rcu_read_unlock(); 788 rcu_read_unlock();
789 mutex_unlock(&sock->mutex);
790 drbd_err(connection, "--dry-run is not supported by peer"); 789 drbd_err(connection, "--dry-run is not supported by peer");
791 return -EOPNOTSUPP; 790 return -EOPNOTSUPP;
792 } 791 }
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 87083b3a2769..37c22667e831 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -297,7 +297,10 @@ static int clk_main_probe_frequency(struct regmap *regmap)
297 regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); 297 regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
298 if (mcfr & AT91_PMC_MAINRDY) 298 if (mcfr & AT91_PMC_MAINRDY)
299 return 0; 299 return 0;
300 usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); 300 if (system_state < SYSTEM_RUNNING)
301 udelay(MAINF_LOOP_MIN_WAIT);
302 else
303 usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
301 } while (time_before(prep_time, timeout)); 304 } while (time_before(prep_time, timeout));
302 305
303 return -ETIMEDOUT; 306 return -ETIMEDOUT;
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 9790ddfa5b3c..86238d5ecb4d 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -43,6 +43,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
43}; 43};
44 44
45static const struct clk_programmable_layout sam9x60_programmable_layout = { 45static const struct clk_programmable_layout sam9x60_programmable_layout = {
46 .pres_mask = 0xff,
46 .pres_shift = 8, 47 .pres_shift = 8,
47 .css_mask = 0x1f, 48 .css_mask = 0x1f,
48 .have_slck_mck = 0, 49 .have_slck_mck = 0,
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index 9bfe9a28294a..fac0ca56d42d 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -76,7 +76,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw)
76 76
77 writel(tmp | osc->bits->cr_osc32en, sckcr); 77 writel(tmp | osc->bits->cr_osc32en, sckcr);
78 78
79 usleep_range(osc->startup_usec, osc->startup_usec + 1); 79 if (system_state < SYSTEM_RUNNING)
80 udelay(osc->startup_usec);
81 else
82 usleep_range(osc->startup_usec, osc->startup_usec + 1);
80 83
81 return 0; 84 return 0;
82} 85}
@@ -187,7 +190,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
187 190
188 writel(readl(sckcr) | osc->bits->cr_rcen, sckcr); 191 writel(readl(sckcr) | osc->bits->cr_rcen, sckcr);
189 192
190 usleep_range(osc->startup_usec, osc->startup_usec + 1); 193 if (system_state < SYSTEM_RUNNING)
194 udelay(osc->startup_usec);
195 else
196 usleep_range(osc->startup_usec, osc->startup_usec + 1);
191 197
192 return 0; 198 return 0;
193} 199}
@@ -288,7 +294,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
288 294
289 writel(tmp, sckcr); 295 writel(tmp, sckcr);
290 296
291 usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); 297 if (system_state < SYSTEM_RUNNING)
298 udelay(SLOWCK_SW_TIME_USEC);
299 else
300 usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
292 301
293 return 0; 302 return 0;
294} 303}
@@ -533,7 +542,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
533 return 0; 542 return 0;
534 } 543 }
535 544
536 usleep_range(osc->startup_usec, osc->startup_usec + 1); 545 if (system_state < SYSTEM_RUNNING)
546 udelay(osc->startup_usec);
547 else
548 usleep_range(osc->startup_usec, osc->startup_usec + 1);
537 osc->prepared = true; 549 osc->prepared = true;
538 550
539 return 0; 551 return 0;
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 1c1bb39bb04e..b1318e6b655b 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -266,10 +266,11 @@ static int aspeed_g6_clk_enable(struct clk_hw *hw)
266 266
267 /* Enable clock */ 267 /* Enable clock */
268 if (gate->flags & CLK_GATE_SET_TO_DISABLE) { 268 if (gate->flags & CLK_GATE_SET_TO_DISABLE) {
269 regmap_write(gate->map, get_clock_reg(gate), clk); 269 /* Clock is clear to enable, so use set to clear register */
270 } else {
271 /* Use set to clear register */
272 regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk); 270 regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk);
271 } else {
272 /* Clock is set to enable, so use write to set register */
273 regmap_write(gate->map, get_clock_reg(gate), clk);
273 } 274 }
274 275
275 if (gate->reset_idx >= 0) { 276 if (gate->reset_idx >= 0) {
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 067ab876911d..172589e94f60 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -638,7 +638,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
638 clks[IMX8MM_CLK_A53_DIV], 638 clks[IMX8MM_CLK_A53_DIV],
639 clks[IMX8MM_CLK_A53_SRC], 639 clks[IMX8MM_CLK_A53_SRC],
640 clks[IMX8MM_ARM_PLL_OUT], 640 clks[IMX8MM_ARM_PLL_OUT],
641 clks[IMX8MM_CLK_24M]); 641 clks[IMX8MM_SYS_PLL1_800M]);
642 642
643 imx_check_clocks(clks, ARRAY_SIZE(clks)); 643 imx_check_clocks(clks, ARRAY_SIZE(clks));
644 644
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 47a4b44ba3cb..58b5acee3830 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -610,7 +610,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
610 clks[IMX8MN_CLK_A53_DIV], 610 clks[IMX8MN_CLK_A53_DIV],
611 clks[IMX8MN_CLK_A53_SRC], 611 clks[IMX8MN_CLK_A53_SRC],
612 clks[IMX8MN_ARM_PLL_OUT], 612 clks[IMX8MN_ARM_PLL_OUT],
613 clks[IMX8MN_CLK_24M]); 613 clks[IMX8MN_SYS_PLL1_800M]);
614 614
615 imx_check_clocks(clks, ARRAY_SIZE(clks)); 615 imx_check_clocks(clks, ARRAY_SIZE(clks));
616 616
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index ea4c791f106d..b3af61cc6fb9 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -343,6 +343,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
343 .offset = HHI_SYS_CPU_CLK_CNTL0, 343 .offset = HHI_SYS_CPU_CLK_CNTL0,
344 .mask = 0x3, 344 .mask = 0x3,
345 .shift = 0, 345 .shift = 0,
346 .flags = CLK_MUX_ROUND_CLOSEST,
346 }, 347 },
347 .hw.init = &(struct clk_init_data){ 348 .hw.init = &(struct clk_init_data){
348 .name = "cpu_clk_dyn0_sel", 349 .name = "cpu_clk_dyn0_sel",
@@ -353,8 +354,7 @@ static struct clk_regmap g12a_cpu_clk_premux0 = {
353 { .hw = &g12a_fclk_div3.hw }, 354 { .hw = &g12a_fclk_div3.hw },
354 }, 355 },
355 .num_parents = 3, 356 .num_parents = 3,
356 /* This sub-tree is used a parking clock */ 357 .flags = CLK_SET_RATE_PARENT,
357 .flags = CLK_SET_RATE_NO_REPARENT,
358 }, 358 },
359}; 359};
360 360
@@ -410,6 +410,7 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
410 .offset = HHI_SYS_CPU_CLK_CNTL0, 410 .offset = HHI_SYS_CPU_CLK_CNTL0,
411 .mask = 0x1, 411 .mask = 0x1,
412 .shift = 2, 412 .shift = 2,
413 .flags = CLK_MUX_ROUND_CLOSEST,
413 }, 414 },
414 .hw.init = &(struct clk_init_data){ 415 .hw.init = &(struct clk_init_data){
415 .name = "cpu_clk_dyn0", 416 .name = "cpu_clk_dyn0",
@@ -466,6 +467,7 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
466 .offset = HHI_SYS_CPU_CLK_CNTL0, 467 .offset = HHI_SYS_CPU_CLK_CNTL0,
467 .mask = 0x1, 468 .mask = 0x1,
468 .shift = 10, 469 .shift = 10,
470 .flags = CLK_MUX_ROUND_CLOSEST,
469 }, 471 },
470 .hw.init = &(struct clk_init_data){ 472 .hw.init = &(struct clk_init_data){
471 .name = "cpu_clk_dyn", 473 .name = "cpu_clk_dyn",
@@ -485,6 +487,7 @@ static struct clk_regmap g12a_cpu_clk = {
485 .offset = HHI_SYS_CPU_CLK_CNTL0, 487 .offset = HHI_SYS_CPU_CLK_CNTL0,
486 .mask = 0x1, 488 .mask = 0x1,
487 .shift = 11, 489 .shift = 11,
490 .flags = CLK_MUX_ROUND_CLOSEST,
488 }, 491 },
489 .hw.init = &(struct clk_init_data){ 492 .hw.init = &(struct clk_init_data){
490 .name = "cpu_clk", 493 .name = "cpu_clk",
@@ -504,6 +507,7 @@ static struct clk_regmap g12b_cpu_clk = {
504 .offset = HHI_SYS_CPU_CLK_CNTL0, 507 .offset = HHI_SYS_CPU_CLK_CNTL0,
505 .mask = 0x1, 508 .mask = 0x1,
506 .shift = 11, 509 .shift = 11,
510 .flags = CLK_MUX_ROUND_CLOSEST,
507 }, 511 },
508 .hw.init = &(struct clk_init_data){ 512 .hw.init = &(struct clk_init_data){
509 .name = "cpu_clk", 513 .name = "cpu_clk",
@@ -523,6 +527,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
523 .offset = HHI_SYS_CPUB_CLK_CNTL, 527 .offset = HHI_SYS_CPUB_CLK_CNTL,
524 .mask = 0x3, 528 .mask = 0x3,
525 .shift = 0, 529 .shift = 0,
530 .flags = CLK_MUX_ROUND_CLOSEST,
526 }, 531 },
527 .hw.init = &(struct clk_init_data){ 532 .hw.init = &(struct clk_init_data){
528 .name = "cpub_clk_dyn0_sel", 533 .name = "cpub_clk_dyn0_sel",
@@ -533,6 +538,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
533 { .hw = &g12a_fclk_div3.hw }, 538 { .hw = &g12a_fclk_div3.hw },
534 }, 539 },
535 .num_parents = 3, 540 .num_parents = 3,
541 .flags = CLK_SET_RATE_PARENT,
536 }, 542 },
537}; 543};
538 544
@@ -567,6 +573,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
567 .offset = HHI_SYS_CPUB_CLK_CNTL, 573 .offset = HHI_SYS_CPUB_CLK_CNTL,
568 .mask = 0x1, 574 .mask = 0x1,
569 .shift = 2, 575 .shift = 2,
576 .flags = CLK_MUX_ROUND_CLOSEST,
570 }, 577 },
571 .hw.init = &(struct clk_init_data){ 578 .hw.init = &(struct clk_init_data){
572 .name = "cpub_clk_dyn0", 579 .name = "cpub_clk_dyn0",
@@ -644,6 +651,7 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
644 .offset = HHI_SYS_CPUB_CLK_CNTL, 651 .offset = HHI_SYS_CPUB_CLK_CNTL,
645 .mask = 0x1, 652 .mask = 0x1,
646 .shift = 10, 653 .shift = 10,
654 .flags = CLK_MUX_ROUND_CLOSEST,
647 }, 655 },
648 .hw.init = &(struct clk_init_data){ 656 .hw.init = &(struct clk_init_data){
649 .name = "cpub_clk_dyn", 657 .name = "cpub_clk_dyn",
@@ -663,6 +671,7 @@ static struct clk_regmap g12b_cpub_clk = {
663 .offset = HHI_SYS_CPUB_CLK_CNTL, 671 .offset = HHI_SYS_CPUB_CLK_CNTL,
664 .mask = 0x1, 672 .mask = 0x1,
665 .shift = 11, 673 .shift = 11,
674 .flags = CLK_MUX_ROUND_CLOSEST,
666 }, 675 },
667 .hw.init = &(struct clk_init_data){ 676 .hw.init = &(struct clk_init_data){
668 .name = "cpub_clk", 677 .name = "cpub_clk",
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 7cfb998eeb3e..1f9c056e684c 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -935,6 +935,7 @@ static struct clk_regmap gxbb_sar_adc_clk_div = {
935 &gxbb_sar_adc_clk_sel.hw 935 &gxbb_sar_adc_clk_sel.hw
936 }, 936 },
937 .num_parents = 1, 937 .num_parents = 1,
938 .flags = CLK_SET_RATE_PARENT,
938 }, 939 },
939}; 940};
940 941
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 7670cc596c74..31466cd1842f 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -165,12 +165,18 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
165 GATE_BUS_CPU, 165 GATE_BUS_CPU,
166 GATE_SCLK_CPU, 166 GATE_SCLK_CPU,
167 CLKOUT_CMU_CPU, 167 CLKOUT_CMU_CPU,
168 CPLL_CON0,
169 DPLL_CON0,
168 EPLL_CON0, 170 EPLL_CON0,
169 EPLL_CON1, 171 EPLL_CON1,
170 EPLL_CON2, 172 EPLL_CON2,
171 RPLL_CON0, 173 RPLL_CON0,
172 RPLL_CON1, 174 RPLL_CON1,
173 RPLL_CON2, 175 RPLL_CON2,
176 IPLL_CON0,
177 SPLL_CON0,
178 VPLL_CON0,
179 MPLL_CON0,
174 SRC_TOP0, 180 SRC_TOP0,
175 SRC_TOP1, 181 SRC_TOP1,
176 SRC_TOP2, 182 SRC_TOP2,
@@ -1172,8 +1178,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1172 GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2", 1178 GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2",
1173 GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0), 1179 GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0),
1174 1180
1175 GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
1176
1177 /* CDREX */ 1181 /* CDREX */
1178 GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex", 1182 GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex",
1179 GATE_BUS_CDREX0, 0, 0, 0), 1183 GATE_BUS_CDREX0, 0, 0, 0),
@@ -1248,6 +1252,15 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
1248 { DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */ 1252 { DIV2_RATIO0, 0, 0x30 }, /* DIV dout_gscl_blk_300 */
1249}; 1253};
1250 1254
1255static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = {
1256 GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
1257};
1258
1259static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = {
1260 { GATE_IP_G3D, 0x3ff, 0x3ff }, /* G3D gates */
1261 { SRC_TOP5, 0, BIT(16) }, /* MUX mout_user_aclk_g3d */
1262};
1263
1251static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = { 1264static const struct samsung_div_clock exynos5x_mfc_div_clks[] __initconst = {
1252 DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2), 1265 DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2),
1253}; 1266};
@@ -1320,6 +1333,14 @@ static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
1320 .pd_name = "GSC", 1333 .pd_name = "GSC",
1321}; 1334};
1322 1335
1336static const struct exynos5_subcmu_info exynos5x_g3d_subcmu = {
1337 .gate_clks = exynos5x_g3d_gate_clks,
1338 .nr_gate_clks = ARRAY_SIZE(exynos5x_g3d_gate_clks),
1339 .suspend_regs = exynos5x_g3d_suspend_regs,
1340 .nr_suspend_regs = ARRAY_SIZE(exynos5x_g3d_suspend_regs),
1341 .pd_name = "G3D",
1342};
1343
1323static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = { 1344static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
1324 .div_clks = exynos5x_mfc_div_clks, 1345 .div_clks = exynos5x_mfc_div_clks,
1325 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), 1346 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
@@ -1351,6 +1372,7 @@ static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
1351static const struct exynos5_subcmu_info *exynos5x_subcmus[] = { 1372static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1352 &exynos5x_disp_subcmu, 1373 &exynos5x_disp_subcmu,
1353 &exynos5x_gsc_subcmu, 1374 &exynos5x_gsc_subcmu,
1375 &exynos5x_g3d_subcmu,
1354 &exynos5x_mfc_subcmu, 1376 &exynos5x_mfc_subcmu,
1355 &exynos5x_mscl_subcmu, 1377 &exynos5x_mscl_subcmu,
1356}; 1378};
@@ -1358,6 +1380,7 @@ static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1358static const struct exynos5_subcmu_info *exynos5800_subcmus[] = { 1380static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
1359 &exynos5x_disp_subcmu, 1381 &exynos5x_disp_subcmu,
1360 &exynos5x_gsc_subcmu, 1382 &exynos5x_gsc_subcmu,
1383 &exynos5x_g3d_subcmu,
1361 &exynos5x_mfc_subcmu, 1384 &exynos5x_mfc_subcmu,
1362 &exynos5x_mscl_subcmu, 1385 &exynos5x_mscl_subcmu,
1363 &exynos5800_mau_subcmu, 1386 &exynos5800_mau_subcmu,
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 7824c2ba3d8e..4b1aa9382ad2 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -13,6 +13,7 @@
13#include <linux/of_device.h> 13#include <linux/of_device.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h> 15#include <linux/pm_runtime.h>
16#include <linux/slab.h>
16 17
17#include <dt-bindings/clock/exynos5433.h> 18#include <dt-bindings/clock/exynos5433.h>
18 19
@@ -5584,6 +5585,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
5584 5585
5585 data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs, 5586 data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
5586 info->nr_clk_regs); 5587 info->nr_clk_regs);
5588 if (!data->clk_save)
5589 return -ENOMEM;
5587 data->nr_clk_save = info->nr_clk_regs; 5590 data->nr_clk_save = info->nr_clk_regs;
5588 data->clk_suspend = info->suspend_regs; 5591 data->clk_suspend = info->suspend_regs;
5589 data->nr_clk_suspend = info->nr_suspend_regs; 5592 data->nr_clk_suspend = info->nr_suspend_regs;
@@ -5592,12 +5595,19 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
5592 if (data->nr_pclks > 0) { 5595 if (data->nr_pclks > 0) {
5593 data->pclks = devm_kcalloc(dev, sizeof(struct clk *), 5596 data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
5594 data->nr_pclks, GFP_KERNEL); 5597 data->nr_pclks, GFP_KERNEL);
5595 5598 if (!data->pclks) {
5599 kfree(data->clk_save);
5600 return -ENOMEM;
5601 }
5596 for (i = 0; i < data->nr_pclks; i++) { 5602 for (i = 0; i < data->nr_pclks; i++) {
5597 struct clk *clk = of_clk_get(dev->of_node, i); 5603 struct clk *clk = of_clk_get(dev->of_node, i);
5598 5604
5599 if (IS_ERR(clk)) 5605 if (IS_ERR(clk)) {
5606 kfree(data->clk_save);
5607 while (--i >= 0)
5608 clk_put(data->pclks[i]);
5600 return PTR_ERR(clk); 5609 return PTR_ERR(clk);
5610 }
5601 data->pclks[i] = clk; 5611 data->pclks[i] = clk;
5602 } 5612 }
5603 } 5613 }
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index dcac1391767f..ef29582676f6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1224,7 +1224,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
1224 1224
1225 /* Enforce d1 = 0, d2 = 0 for Audio PLL */ 1225 /* Enforce d1 = 0, d2 = 0 for Audio PLL */
1226 val = readl(reg + SUN9I_A80_PLL_AUDIO_REG); 1226 val = readl(reg + SUN9I_A80_PLL_AUDIO_REG);
1227 val &= (BIT(16) & BIT(18)); 1227 val &= ~(BIT(16) | BIT(18));
1228 writel(val, reg + SUN9I_A80_PLL_AUDIO_REG); 1228 writel(val, reg + SUN9I_A80_PLL_AUDIO_REG);
1229 1229
1230 /* Enforce P = 1 for both CPU cluster PLLs */ 1230 /* Enforce P = 1 for both CPU cluster PLLs */
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index d3a43381a792..27201fd26e44 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -1080,8 +1080,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
1080 rate_hw, rate_ops, 1080 rate_hw, rate_ops,
1081 gate_hw, &clk_gate_ops, 1081 gate_hw, &clk_gate_ops,
1082 clkflags | 1082 clkflags |
1083 data->div[i].critical ? 1083 (data->div[i].critical ?
1084 CLK_IS_CRITICAL : 0); 1084 CLK_IS_CRITICAL : 0));
1085 1085
1086 WARN_ON(IS_ERR(clk_data->clks[i])); 1086 WARN_ON(IS_ERR(clk_data->clks[i]));
1087 } 1087 }
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index a01ca9395179..f65e16c4f3c4 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -174,7 +174,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
174 struct clk_init_data init = { NULL }; 174 struct clk_init_data init = { NULL };
175 const char **parent_names = NULL; 175 const char **parent_names = NULL;
176 struct clk *clk; 176 struct clk *clk;
177 int ret;
178 177
179 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 178 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
180 if (!clk_hw) { 179 if (!clk_hw) {
@@ -207,11 +206,6 @@ static void __init of_dra7_atl_clock_setup(struct device_node *node)
207 clk = ti_clk_register(NULL, &clk_hw->hw, node->name); 206 clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
208 207
209 if (!IS_ERR(clk)) { 208 if (!IS_ERR(clk)) {
210 ret = ti_clk_add_alias(NULL, clk, node->name);
211 if (ret) {
212 clk_unregister(clk);
213 goto cleanup;
214 }
215 of_clk_add_provider(node, of_clk_src_simple_get, clk); 209 of_clk_add_provider(node, of_clk_src_simple_get, clk);
216 kfree(parent_names); 210 kfree(parent_names);
217 return; 211 return;
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 975995eea15c..b0c0690a5a12 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -100,11 +100,12 @@ static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
100 * can be from a timer that requires pm_runtime access, which 100 * can be from a timer that requires pm_runtime access, which
101 * will eventually bring us here with timekeeping_suspended, 101 * will eventually bring us here with timekeeping_suspended,
102 * during both suspend entry and resume paths. This happens 102 * during both suspend entry and resume paths. This happens
103 * at least on am43xx platform. 103 * at least on am43xx platform. Account for flakeyness
104 * with udelay() by multiplying the timeout value by 2.
104 */ 105 */
105 if (unlikely(_early_timeout || timekeeping_suspended)) { 106 if (unlikely(_early_timeout || timekeeping_suspended)) {
106 if (time->cycles++ < timeout) { 107 if (time->cycles++ < timeout) {
107 udelay(1); 108 udelay(1 * 2);
108 return false; 109 return false;
109 } 110 }
110 } else { 111 } else {
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 354b27d14a19..62812f80b5cc 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -328,12 +328,13 @@ static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
328 return 0; 328 return 0;
329} 329}
330 330
331static const unsigned int sh_mtu2_channel_offsets[] = {
332 0x300, 0x380, 0x000,
333};
334
331static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, 335static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
332 struct sh_mtu2_device *mtu) 336 struct sh_mtu2_device *mtu)
333{ 337{
334 static const unsigned int channel_offsets[] = {
335 0x300, 0x380, 0x000,
336 };
337 char name[6]; 338 char name[6];
338 int irq; 339 int irq;
339 int ret; 340 int ret;
@@ -356,7 +357,7 @@ static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
356 return ret; 357 return ret;
357 } 358 }
358 359
359 ch->base = mtu->mapbase + channel_offsets[index]; 360 ch->base = mtu->mapbase + sh_mtu2_channel_offsets[index];
360 ch->index = index; 361 ch->index = index;
361 362
362 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev)); 363 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
@@ -408,7 +409,12 @@ static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
408 } 409 }
409 410
410 /* Allocate and setup the channels. */ 411 /* Allocate and setup the channels. */
411 mtu->num_channels = 3; 412 ret = platform_irq_count(pdev);
413 if (ret < 0)
414 goto err_unmap;
415
416 mtu->num_channels = min_t(unsigned int, ret,
417 ARRAY_SIZE(sh_mtu2_channel_offsets));
412 418
413 mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels), 419 mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
414 GFP_KERNEL); 420 GFP_KERNEL);
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index a562f491b0f8..9318edcd8963 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -268,15 +268,12 @@ static int __init mtk_syst_init(struct device_node *node)
268 268
269 ret = timer_of_init(node, &to); 269 ret = timer_of_init(node, &to);
270 if (ret) 270 if (ret)
271 goto err; 271 return ret;
272 272
273 clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 273 clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
274 TIMER_SYNC_TICKS, 0xffffffff); 274 TIMER_SYNC_TICKS, 0xffffffff);
275 275
276 return 0; 276 return 0;
277err:
278 timer_of_cleanup(&to);
279 return ret;
280} 277}
281 278
282static int __init mtk_gpt_init(struct device_node *node) 279static int __init mtk_gpt_init(struct device_node *node)
@@ -293,7 +290,7 @@ static int __init mtk_gpt_init(struct device_node *node)
293 290
294 ret = timer_of_init(node, &to); 291 ret = timer_of_init(node, &to);
295 if (ret) 292 if (ret)
296 goto err; 293 return ret;
297 294
298 /* Configure clock source */ 295 /* Configure clock source */
299 mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN); 296 mtk_gpt_setup(&to, TIMER_CLK_SRC, GPT_CTRL_OP_FREERUN);
@@ -311,9 +308,6 @@ static int __init mtk_gpt_init(struct device_node *node)
311 mtk_gpt_enable_irq(&to, TIMER_CLK_EVT); 308 mtk_gpt_enable_irq(&to, TIMER_CLK_EVT);
312 309
313 return 0; 310 return 0;
314err:
315 timer_of_cleanup(&to);
316 return ret;
317} 311}
318TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init); 312TIMER_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_gpt_init);
319TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init); 313TIMER_OF_DECLARE(mtk_mt6765, "mediatek,mt6765-timer", mtk_syst_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 53a51c169451..8ab31702cf6a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -847,11 +847,9 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
847 value |= HWP_MAX_PERF(min_perf); 847 value |= HWP_MAX_PERF(min_perf);
848 value |= HWP_MIN_PERF(min_perf); 848 value |= HWP_MIN_PERF(min_perf);
849 849
850 /* Set EPP/EPB to min */ 850 /* Set EPP to min */
851 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) 851 if (boot_cpu_has(X86_FEATURE_HWP_EPP))
852 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); 852 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
853 else
854 intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
855 853
856 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 854 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
857} 855}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 2f1e9da81c1e..3302125e5265 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -362,9 +362,8 @@ static void mrfld_irq_handler(struct irq_desc *desc)
362 chained_irq_exit(irqchip, desc); 362 chained_irq_exit(irqchip, desc);
363} 363}
364 364
365static int mrfld_irq_init_hw(struct gpio_chip *chip) 365static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
366{ 366{
367 struct mrfld_gpio *priv = gpiochip_get_data(chip);
368 void __iomem *reg; 367 void __iomem *reg;
369 unsigned int base; 368 unsigned int base;
370 369
@@ -376,8 +375,6 @@ static int mrfld_irq_init_hw(struct gpio_chip *chip)
376 reg = gpio_reg(&priv->chip, base, GFER); 375 reg = gpio_reg(&priv->chip, base, GFER);
377 writel(0, reg); 376 writel(0, reg);
378 } 377 }
379
380 return 0;
381} 378}
382 379
383static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv) 380static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -400,7 +397,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
400{ 397{
401 const struct mrfld_gpio_pinrange *range; 398 const struct mrfld_gpio_pinrange *range;
402 const char *pinctrl_dev_name; 399 const char *pinctrl_dev_name;
403 struct gpio_irq_chip *girq;
404 struct mrfld_gpio *priv; 400 struct mrfld_gpio *priv;
405 u32 gpio_base, irq_base; 401 u32 gpio_base, irq_base;
406 void __iomem *base; 402 void __iomem *base;
@@ -448,21 +444,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
448 444
449 raw_spin_lock_init(&priv->lock); 445 raw_spin_lock_init(&priv->lock);
450 446
451 girq = &priv->chip.irq;
452 girq->chip = &mrfld_irqchip;
453 girq->init_hw = mrfld_irq_init_hw;
454 girq->parent_handler = mrfld_irq_handler;
455 girq->num_parents = 1;
456 girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
457 sizeof(*girq->parents),
458 GFP_KERNEL);
459 if (!girq->parents)
460 return -ENOMEM;
461 girq->parents[0] = pdev->irq;
462 girq->first = irq_base;
463 girq->default_type = IRQ_TYPE_NONE;
464 girq->handler = handle_bad_irq;
465
466 pci_set_drvdata(pdev, priv); 447 pci_set_drvdata(pdev, priv);
467 retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv); 448 retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
468 if (retval) { 449 if (retval) {
@@ -484,6 +465,18 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
484 } 465 }
485 } 466 }
486 467
468 retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
469 handle_bad_irq, IRQ_TYPE_NONE);
470 if (retval) {
471 dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
472 return retval;
473 }
474
475 mrfld_irq_init_hw(priv);
476
477 gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
478 mrfld_irq_handler);
479
487 return 0; 480 return 0;
488} 481}
489 482
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6614d8a6f4c8..2cdaf3b2a721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -604,8 +604,11 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
604 continue; 604 continue;
605 } 605 }
606 606
607 for (i = 0; i < num_entities; i++) 607 for (i = 0; i < num_entities; i++) {
608 mutex_lock(&ctx->adev->lock_reset);
608 drm_sched_entity_fini(&ctx->entities[0][i].entity); 609 drm_sched_entity_fini(&ctx->entities[0][i].entity);
610 mutex_unlock(&ctx->adev->lock_reset);
611 }
609 } 612 }
610} 613}
611 614
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5a1939dbd4e3..7a6c837c0a85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2885,6 +2885,13 @@ fence_driver_init:
2885 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n"); 2885 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2886 } 2886 }
2887 2887
2888 /*
2889 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
2890 * Otherwise the mgpu fan boost feature will be skipped due to the
2891 * gpu instance is counted less.
2892 */
2893 amdgpu_register_gpu_instance(adev);
2894
2888 /* enable clockgating, etc. after ib tests, etc. since some blocks require 2895 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2889 * explicit gating rather than handling it automatically. 2896 * explicit gating rather than handling it automatically.
2890 */ 2897 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 2a00a36106b2..e1c15721611a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1016,6 +1016,7 @@ static const struct pci_device_id pciidlist[] = {
1016 {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, 1016 {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
1017 {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, 1017 {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
1018 {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT}, 1018 {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
1019 {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
1019 1020
1020 /* Renoir */ 1021 /* Renoir */
1021 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, 1022 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6ee4021910e2..6d19183b478b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -289,6 +289,7 @@ struct amdgpu_gfx {
289 uint32_t mec2_feature_version; 289 uint32_t mec2_feature_version;
290 bool mec_fw_write_wait; 290 bool mec_fw_write_wait;
291 bool me_fw_write_wait; 291 bool me_fw_write_wait;
292 bool cp_fw_write_wait;
292 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 293 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
293 unsigned num_gfx_rings; 294 unsigned num_gfx_rings;
294 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 295 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d55f5baa83d3..a042ef471fbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -190,7 +190,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
190 pm_runtime_put_autosuspend(dev->dev); 190 pm_runtime_put_autosuspend(dev->dev);
191 } 191 }
192 192
193 amdgpu_register_gpu_instance(adev);
194out: 193out:
195 if (r) { 194 if (r) {
196 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ 195 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 8dfc775626a7..53090eae0082 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -564,6 +564,32 @@ static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
564 kfree(adev->gfx.rlc.register_list_format); 564 kfree(adev->gfx.rlc.register_list_format);
565} 565}
566 566
567static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
568{
569 adev->gfx.cp_fw_write_wait = false;
570
571 switch (adev->asic_type) {
572 case CHIP_NAVI10:
573 case CHIP_NAVI12:
574 case CHIP_NAVI14:
575 if ((adev->gfx.me_fw_version >= 0x00000046) &&
576 (adev->gfx.me_feature_version >= 27) &&
577 (adev->gfx.pfp_fw_version >= 0x00000068) &&
578 (adev->gfx.pfp_feature_version >= 27) &&
579 (adev->gfx.mec_fw_version >= 0x0000005b) &&
580 (adev->gfx.mec_feature_version >= 27))
581 adev->gfx.cp_fw_write_wait = true;
582 break;
583 default:
584 break;
585 }
586
587 if (adev->gfx.cp_fw_write_wait == false)
588 DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
589 GRBM requires 1-cycle delay in cp firmware\n");
590}
591
592
567static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) 593static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
568{ 594{
569 const struct rlc_firmware_header_v2_1 *rlc_hdr; 595 const struct rlc_firmware_header_v2_1 *rlc_hdr;
@@ -832,6 +858,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
832 } 858 }
833 } 859 }
834 860
861 gfx_v10_0_check_fw_write_wait(adev);
835out: 862out:
836 if (err) { 863 if (err) {
837 dev_err(adev->dev, 864 dev_err(adev->dev,
@@ -4765,6 +4792,24 @@ static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4765 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4792 gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4766} 4793}
4767 4794
4795static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4796 uint32_t reg0, uint32_t reg1,
4797 uint32_t ref, uint32_t mask)
4798{
4799 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4800 struct amdgpu_device *adev = ring->adev;
4801 bool fw_version_ok = false;
4802
4803 fw_version_ok = adev->gfx.cp_fw_write_wait;
4804
4805 if (fw_version_ok)
4806 gfx_v10_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4807 ref, mask, 0x20);
4808 else
4809 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4810 ref, mask);
4811}
4812
4768static void 4813static void
4769gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4814gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4770 uint32_t me, uint32_t pipe, 4815 uint32_t me, uint32_t pipe,
@@ -5155,6 +5200,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
5155 .emit_tmz = gfx_v10_0_ring_emit_tmz, 5200 .emit_tmz = gfx_v10_0_ring_emit_tmz,
5156 .emit_wreg = gfx_v10_0_ring_emit_wreg, 5201 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5157 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 5202 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5203 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5158}; 5204};
5159 5205
5160static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { 5206static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5188,6 +5234,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
5188 .pad_ib = amdgpu_ring_generic_pad_ib, 5234 .pad_ib = amdgpu_ring_generic_pad_ib,
5189 .emit_wreg = gfx_v10_0_ring_emit_wreg, 5235 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5190 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 5236 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5237 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5191}; 5238};
5192 5239
5193static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { 5240static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5218,6 +5265,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
5218 .emit_rreg = gfx_v10_0_ring_emit_rreg, 5265 .emit_rreg = gfx_v10_0_ring_emit_rreg,
5219 .emit_wreg = gfx_v10_0_ring_emit_wreg, 5266 .emit_wreg = gfx_v10_0_ring_emit_wreg,
5220 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 5267 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
5268 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
5221}; 5269};
5222 5270
5223static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) 5271static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index dcadc73bffd2..dfca83a2de47 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -973,6 +973,13 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
973 adev->gfx.me_fw_write_wait = false; 973 adev->gfx.me_fw_write_wait = false;
974 adev->gfx.mec_fw_write_wait = false; 974 adev->gfx.mec_fw_write_wait = false;
975 975
976 if ((adev->gfx.mec_fw_version < 0x000001a5) ||
977 (adev->gfx.mec_feature_version < 46) ||
978 (adev->gfx.pfp_fw_version < 0x000000b7) ||
979 (adev->gfx.pfp_feature_version < 46))
980 DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
981 GRBM requires 1-cycle delay in cp firmware\n");
982
976 switch (adev->asic_type) { 983 switch (adev->asic_type) {
977 case CHIP_VEGA10: 984 case CHIP_VEGA10:
978 if ((adev->gfx.me_fw_version >= 0x0000009c) && 985 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
@@ -1044,6 +1051,12 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1044 AMD_PG_SUPPORT_CP | 1051 AMD_PG_SUPPORT_CP |
1045 AMD_PG_SUPPORT_RLC_SMU_HS; 1052 AMD_PG_SUPPORT_RLC_SMU_HS;
1046 break; 1053 break;
1054 case CHIP_RENOIR:
1055 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1056 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1057 AMD_PG_SUPPORT_CP |
1058 AMD_PG_SUPPORT_RLC_SMU_HS;
1059 break;
1047 default: 1060 default:
1048 break; 1061 break;
1049 } 1062 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 354e6200ca9a..5c7d5f73f54f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -344,11 +344,9 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
344 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), 344 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
345 upper_32_bits(pd_addr)); 345 upper_32_bits(pd_addr));
346 346
347 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); 347 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
348 348 hub->vm_inv_eng0_ack + eng,
349 /* wait for the invalidate to complete */ 349 req, 1 << vmid);
350 amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
351 1 << vmid, 1 << vmid);
352 350
353 return pd_addr; 351 return pd_addr;
354} 352}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0cf7ef44b4b5..9ed178fa241c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -219,6 +219,15 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
219 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); 219 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
220 220
221 tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT; 221 tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
222 if (adev->gmc.translate_further) {
223 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
224 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
225 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
226 } else {
227 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
228 tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
229 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
230 }
222 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3, 231 WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
223 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); 232 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
224 233
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index f6e81680dd7e..8493bfbbc148 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1173,6 +1173,16 @@ static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1173 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); 1173 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1174} 1174}
1175 1175
1176static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1177 uint32_t reg0, uint32_t reg1,
1178 uint32_t ref, uint32_t mask)
1179{
1180 amdgpu_ring_emit_wreg(ring, reg0, ref);
1181 /* wait for a cycle to reset vm_inv_eng*_ack */
1182 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1183 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1184}
1185
1176static int sdma_v5_0_early_init(void *handle) 1186static int sdma_v5_0_early_init(void *handle)
1177{ 1187{
1178 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1588,7 +1598,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1588 6 + /* sdma_v5_0_ring_emit_pipeline_sync */ 1598 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1589 /* sdma_v5_0_ring_emit_vm_flush */ 1599 /* sdma_v5_0_ring_emit_vm_flush */
1590 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1600 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1591 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 1601 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1592 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ 1602 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1593 .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ 1603 .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
1594 .emit_ib = sdma_v5_0_ring_emit_ib, 1604 .emit_ib = sdma_v5_0_ring_emit_ib,
@@ -1602,6 +1612,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1602 .pad_ib = sdma_v5_0_ring_pad_ib, 1612 .pad_ib = sdma_v5_0_ring_pad_ib,
1603 .emit_wreg = sdma_v5_0_ring_emit_wreg, 1613 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1604 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait, 1614 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1615 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1605 .init_cond_exec = sdma_v5_0_ring_init_cond_exec, 1616 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1606 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec, 1617 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
1607 .preempt_ib = sdma_v5_0_ring_preempt_ib, 1618 .preempt_ib = sdma_v5_0_ring_preempt_ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f8ab80c8801b..4ccfcdf8f16a 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1186,11 +1186,6 @@ static int soc15_common_early_init(void *handle)
1186 AMD_PG_SUPPORT_VCN | 1186 AMD_PG_SUPPORT_VCN |
1187 AMD_PG_SUPPORT_VCN_DPG; 1187 AMD_PG_SUPPORT_VCN_DPG;
1188 adev->external_rev_id = adev->rev_id + 0x91; 1188 adev->external_rev_id = adev->rev_id + 0x91;
1189
1190 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1191 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1192 AMD_PG_SUPPORT_CP |
1193 AMD_PG_SUPPORT_RLC_SMU_HS;
1194 break; 1189 break;
1195 default: 1190 default:
1196 /* FIXME: not supported yet */ 1191 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 9c58670d5414..ca20b150afcc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2767,15 +2767,6 @@ void core_link_enable_stream(
2767 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, 2767 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
2768 COLOR_DEPTH_UNDEFINED); 2768 COLOR_DEPTH_UNDEFINED);
2769 2769
2770 /* This second call is needed to reconfigure the DIG
2771 * as a workaround for the incorrect value being applied
2772 * from transmitter control.
2773 */
2774 if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
2775 stream->link->link_enc->funcs->setup(
2776 stream->link->link_enc,
2777 pipe_ctx->stream->signal);
2778
2779#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 2770#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2780 if (pipe_ctx->stream->timing.flags.DSC) { 2771 if (pipe_ctx->stream->timing.flags.DSC) {
2781 if (dc_is_dp_signal(pipe_ctx->stream->signal) || 2772 if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index dfb208285a9c..6b2f2f1a1c9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1107,6 +1107,11 @@ struct stream_encoder *dcn20_stream_encoder_create(
1107 if (!enc1) 1107 if (!enc1)
1108 return NULL; 1108 return NULL;
1109 1109
1110 if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
1111 if (eng_id >= ENGINE_ID_DIGD)
1112 eng_id++;
1113 }
1114
1110 dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, 1115 dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
1111 &stream_enc_regs[eng_id], 1116 &stream_enc_regs[eng_id],
1112 &se_shift, &se_mask); 1117 &se_shift, &se_mask);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 0b461404af6b..3ec5a10a7c4d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -205,7 +205,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
208 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), 208 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
209 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 209 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
210}; 210};
211 211
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index bbd8ebd58434..92c393f613d3 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -219,7 +219,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE
219 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 219 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
220 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 220 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
221 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 221 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
222 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), 222 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
223 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 223 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
224}; 224};
225 225
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 3ef2ac52ce94..2dd2cd87cdbb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1581,8 +1581,11 @@ static void commit_tail(struct drm_atomic_state *old_state)
1581{ 1581{
1582 struct drm_device *dev = old_state->dev; 1582 struct drm_device *dev = old_state->dev;
1583 const struct drm_mode_config_helper_funcs *funcs; 1583 const struct drm_mode_config_helper_funcs *funcs;
1584 struct drm_crtc_state *new_crtc_state;
1585 struct drm_crtc *crtc;
1584 ktime_t start; 1586 ktime_t start;
1585 s64 commit_time_ms; 1587 s64 commit_time_ms;
1588 unsigned int i, new_self_refresh_mask = 0;
1586 1589
1587 funcs = dev->mode_config.helper_private; 1590 funcs = dev->mode_config.helper_private;
1588 1591
@@ -1602,6 +1605,15 @@ static void commit_tail(struct drm_atomic_state *old_state)
1602 1605
1603 drm_atomic_helper_wait_for_dependencies(old_state); 1606 drm_atomic_helper_wait_for_dependencies(old_state);
1604 1607
1608 /*
1609 * We cannot safely access new_crtc_state after
1610 * drm_atomic_helper_commit_hw_done() so figure out which crtc's have
1611 * self-refresh active beforehand:
1612 */
1613 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i)
1614 if (new_crtc_state->self_refresh_active)
1615 new_self_refresh_mask |= BIT(i);
1616
1605 if (funcs && funcs->atomic_commit_tail) 1617 if (funcs && funcs->atomic_commit_tail)
1606 funcs->atomic_commit_tail(old_state); 1618 funcs->atomic_commit_tail(old_state);
1607 else 1619 else
@@ -1610,7 +1622,8 @@ static void commit_tail(struct drm_atomic_state *old_state)
1610 commit_time_ms = ktime_ms_delta(ktime_get(), start); 1622 commit_time_ms = ktime_ms_delta(ktime_get(), start);
1611 if (commit_time_ms > 0) 1623 if (commit_time_ms > 0)
1612 drm_self_refresh_helper_update_avg_times(old_state, 1624 drm_self_refresh_helper_update_avg_times(old_state,
1613 (unsigned long)commit_time_ms); 1625 (unsigned long)commit_time_ms,
1626 new_self_refresh_mask);
1614 1627
1615 drm_atomic_helper_commit_cleanup_done(old_state); 1628 drm_atomic_helper_commit_cleanup_done(old_state);
1616 1629
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index 68f4765a5896..dd33fec5aabd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -133,29 +133,33 @@ out_drop_locks:
133 * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages 133 * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages
134 * @state: the state which has just been applied to hardware 134 * @state: the state which has just been applied to hardware
135 * @commit_time_ms: the amount of time in ms that this commit took to complete 135 * @commit_time_ms: the amount of time in ms that this commit took to complete
136 * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in
137 * new state
136 * 138 *
137 * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will 139 * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will
138 * update the average entry/exit self refresh times on self refresh transitions. 140 * update the average entry/exit self refresh times on self refresh transitions.
139 * These averages will be used when calculating how long to delay before 141 * These averages will be used when calculating how long to delay before
140 * entering self refresh mode after activity. 142 * entering self refresh mode after activity.
141 */ 143 */
142void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, 144void
143 unsigned int commit_time_ms) 145drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
146 unsigned int commit_time_ms,
147 unsigned int new_self_refresh_mask)
144{ 148{
145 struct drm_crtc *crtc; 149 struct drm_crtc *crtc;
146 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 150 struct drm_crtc_state *old_crtc_state;
147 int i; 151 int i;
148 152
149 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 153 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
150 new_crtc_state, i) { 154 bool new_self_refresh_active = new_self_refresh_mask & BIT(i);
151 struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; 155 struct drm_self_refresh_data *sr_data = crtc->self_refresh_data;
152 struct ewma_psr_time *time; 156 struct ewma_psr_time *time;
153 157
154 if (old_crtc_state->self_refresh_active == 158 if (old_crtc_state->self_refresh_active ==
155 new_crtc_state->self_refresh_active) 159 new_self_refresh_active)
156 continue; 160 continue;
157 161
158 if (new_crtc_state->self_refresh_active) 162 if (new_self_refresh_active)
159 time = &sr_data->entry_avg_ms; 163 time = &sr_data->entry_avg_ms;
160 else 164 else
161 time = &sr_data->exit_avg_ms; 165 time = &sr_data->exit_avg_ms;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index e6e8d4a82044..0a08354a6183 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -864,6 +864,13 @@ load_detect:
864 864
865out: 865out:
866 intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); 866 intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
867
868 /*
869 * Make sure the refs for power wells enabled during detect are
870 * dropped to avoid a new detect cycle triggered by HPD polling.
871 */
872 intel_display_power_flush_work(dev_priv);
873
867 return status; 874 return status;
868} 875}
869 876
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 57e9f0ba331b..9b15ac4f2fb6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1256,6 +1256,9 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1256 u32 unused) 1256 u32 unused)
1257{ 1257{
1258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1259 struct drm_i915_private *i915 =
1260 to_i915(intel_dig_port->base.base.dev);
1261 enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1259 u32 ret; 1262 u32 ret;
1260 1263
1261 ret = DP_AUX_CH_CTL_SEND_BUSY | 1264 ret = DP_AUX_CH_CTL_SEND_BUSY |
@@ -1268,7 +1271,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1268 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 1271 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1269 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 1272 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1270 1273
1271 if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) 1274 if (intel_phy_is_tc(i915, phy) &&
1275 intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1272 ret |= DP_AUX_CH_CTL_TBT_IO; 1276 ret |= DP_AUX_CH_CTL_TBT_IO;
1273 1277
1274 return ret; 1278 return ret;
@@ -5436,6 +5440,12 @@ out:
5436 if (status != connector_status_connected && !intel_dp->is_mst) 5440 if (status != connector_status_connected && !intel_dp->is_mst)
5437 intel_dp_unset_edid(intel_dp); 5441 intel_dp_unset_edid(intel_dp);
5438 5442
5443 /*
5444 * Make sure the refs for power wells enabled during detect are
5445 * dropped to avoid a new detect cycle triggered by HPD polling.
5446 */
5447 intel_display_power_flush_work(dev_priv);
5448
5439 return status; 5449 return status;
5440} 5450}
5441 5451
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index e02f0faecf02..b030f7ae3302 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2565,6 +2565,12 @@ out:
2565 if (status != connector_status_connected) 2565 if (status != connector_status_connected)
2566 cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier); 2566 cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
2567 2567
2568 /*
2569 * Make sure the refs for power wells enabled during detect are
2570 * dropped to avoid a new detect cycle triggered by HPD polling.
2571 */
2572 intel_display_power_flush_work(dev_priv);
2573
2568 return status; 2574 return status;
2569} 2575}
2570 2576
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 460fd98e40a7..a0b382a637a6 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1958,6 +1958,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
1958 case 0x682C: 1958 case 0x682C:
1959 si_pi->cac_weights = cac_weights_cape_verde_pro; 1959 si_pi->cac_weights = cac_weights_cape_verde_pro;
1960 si_pi->dte_data = dte_data_sun_xt; 1960 si_pi->dte_data = dte_data_sun_xt;
1961 update_dte_from_pl2 = true;
1961 break; 1962 break;
1962 case 0x6825: 1963 case 0x6825:
1963 case 0x6827: 1964 case 0x6827:
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index d9c55e30f986..04c088131e04 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -447,8 +447,12 @@ static int i2c_hid_hwreset(struct i2c_client *client)
447 if (ret) { 447 if (ret) {
448 dev_err(&client->dev, "failed to reset device.\n"); 448 dev_err(&client->dev, "failed to reset device.\n");
449 i2c_hid_set_power(client, I2C_HID_PWR_SLEEP); 449 i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
450 goto out_unlock;
450 } 451 }
451 452
453 /* At least some SIS devices need this after reset */
454 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
455
452out_unlock: 456out_unlock:
453 mutex_unlock(&ihid->reset_lock); 457 mutex_unlock(&ihid->reset_lock);
454 return ret; 458 return ret;
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 4a7f8d363220..203d27d198b8 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -202,6 +202,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac,
202 } 202 }
203} 203}
204 204
205/*
206 * Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes
207 * the normally-helpful work of 'hid_snto32' for fields that use signed
208 * ranges for questionable reasons.
209 */
210static inline __u32 wacom_s32tou(s32 value, __u8 n)
211{
212 switch (n) {
213 case 8: return ((__u8)value);
214 case 16: return ((__u16)value);
215 case 32: return ((__u32)value);
216 }
217 return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
218}
219
205extern const struct hid_device_id wacom_ids[]; 220extern const struct hid_device_id wacom_ids[];
206 221
207void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); 222void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2b0a5b8ca6e6..ccb74529bc78 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2303,7 +2303,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2303 case HID_DG_TOOLSERIALNUMBER: 2303 case HID_DG_TOOLSERIALNUMBER:
2304 if (value) { 2304 if (value) {
2305 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); 2305 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
2306 wacom_wac->serial[0] |= (__u32)value; 2306 wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size);
2307 } 2307 }
2308 return; 2308 return;
2309 case HID_DG_TWIST: 2309 case HID_DG_TWIST:
@@ -2319,15 +2319,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2319 return; 2319 return;
2320 case WACOM_HID_WD_SERIALHI: 2320 case WACOM_HID_WD_SERIALHI:
2321 if (value) { 2321 if (value) {
2322 __u32 raw_value = wacom_s32tou(value, field->report_size);
2323
2322 wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); 2324 wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
2323 wacom_wac->serial[0] |= ((__u64)value) << 32; 2325 wacom_wac->serial[0] |= ((__u64)raw_value) << 32;
2324 /* 2326 /*
2325 * Non-USI EMR devices may contain additional tool type 2327 * Non-USI EMR devices may contain additional tool type
2326 * information here. See WACOM_HID_WD_TOOLTYPE case for 2328 * information here. See WACOM_HID_WD_TOOLTYPE case for
2327 * more details. 2329 * more details.
2328 */ 2330 */
2329 if (value >> 20 == 1) { 2331 if (value >> 20 == 1) {
2330 wacom_wac->id[0] |= value & 0xFFFFF; 2332 wacom_wac->id[0] |= raw_value & 0xFFFFF;
2331 } 2333 }
2332 } 2334 }
2333 return; 2335 return;
@@ -2339,7 +2341,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2339 * bitwise OR so the complete value can be built 2341 * bitwise OR so the complete value can be built
2340 * up over time :( 2342 * up over time :(
2341 */ 2343 */
2342 wacom_wac->id[0] |= value; 2344 wacom_wac->id[0] |= wacom_s32tou(value, field->report_size);
2343 return; 2345 return;
2344 case WACOM_HID_WD_OFFSETLEFT: 2346 case WACOM_HID_WD_OFFSETLEFT:
2345 if (features->offset_left && value != features->offset_left) 2347 if (features->offset_left && value != features->offset_left)
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index fa9d34af87ac..f72803a02391 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -626,6 +626,9 @@ static void intel_th_gth_switch(struct intel_th_device *thdev,
626 if (!count) 626 if (!count)
627 dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n"); 627 dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
628 628
629 /* De-assert the trigger */
630 iowrite32(0, gth->base + REG_CTS_CTL);
631
629 intel_th_gth_stop(gth, output, false); 632 intel_th_gth_stop(gth, output, false);
630 intel_th_gth_start(gth, output); 633 intel_th_gth_start(gth, output);
631} 634}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index fc9f15f36ad4..6d240dfae9d9 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -164,7 +164,7 @@ struct msc {
164}; 164};
165 165
166static LIST_HEAD(msu_buffer_list); 166static LIST_HEAD(msu_buffer_list);
167static struct mutex msu_buffer_mutex; 167static DEFINE_MUTEX(msu_buffer_mutex);
168 168
169/** 169/**
170 * struct msu_buffer_entry - internal MSU buffer bookkeeping 170 * struct msu_buffer_entry - internal MSU buffer bookkeeping
@@ -327,7 +327,7 @@ static size_t msc_win_total_sz(struct msc_window *win)
327 struct msc_block_desc *bdesc = sg_virt(sg); 327 struct msc_block_desc *bdesc = sg_virt(sg);
328 328
329 if (msc_block_wrapped(bdesc)) 329 if (msc_block_wrapped(bdesc))
330 return win->nr_blocks << PAGE_SHIFT; 330 return (size_t)win->nr_blocks << PAGE_SHIFT;
331 331
332 size += msc_total_sz(bdesc); 332 size += msc_total_sz(bdesc);
333 if (msc_block_last_written(bdesc)) 333 if (msc_block_last_written(bdesc))
@@ -1848,9 +1848,14 @@ mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1848 len = cp - buf; 1848 len = cp - buf;
1849 1849
1850 mode = kstrndup(buf, len, GFP_KERNEL); 1850 mode = kstrndup(buf, len, GFP_KERNEL);
1851 if (!mode)
1852 return -ENOMEM;
1853
1851 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1854 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1852 if (i >= 0) 1855 if (i >= 0) {
1856 kfree(mode);
1853 goto found; 1857 goto found;
1858 }
1854 1859
1855 /* Buffer sinks only work with a usable IRQ */ 1860 /* Buffer sinks only work with a usable IRQ */
1856 if (!msc->do_irq) { 1861 if (!msc->do_irq) {
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 91dfeba62485..03ca5b1bef9f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -200,6 +200,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
200 .driver_data = (kernel_ulong_t)&intel_th_2x, 200 .driver_data = (kernel_ulong_t)&intel_th_2x,
201 }, 201 },
202 { 202 {
203 /* Comet Lake PCH */
204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x06a6),
205 .driver_data = (kernel_ulong_t)&intel_th_2x,
206 },
207 {
203 /* Ice Lake NNPI */ 208 /* Ice Lake NNPI */
204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), 209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
205 .driver_data = (kernel_ulong_t)&intel_th_2x, 210 .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -209,6 +214,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), 214 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
210 .driver_data = (kernel_ulong_t)&intel_th_2x, 215 .driver_data = (kernel_ulong_t)&intel_th_2x,
211 }, 216 },
217 {
218 /* Jasper Lake PCH */
219 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
220 .driver_data = (kernel_ulong_t)&intel_th_2x,
221 },
212 { 0 }, 222 { 0 },
213}; 223};
214 224
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 663f8a5012d6..73aee5949b6b 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1399,7 +1399,7 @@ static int stm32_adc_dma_start(struct iio_dev *indio_dev)
1399 cookie = dmaengine_submit(desc); 1399 cookie = dmaengine_submit(desc);
1400 ret = dma_submit_error(cookie); 1400 ret = dma_submit_error(cookie);
1401 if (ret) { 1401 if (ret) {
1402 dmaengine_terminate_all(adc->dma_chan); 1402 dmaengine_terminate_sync(adc->dma_chan);
1403 return ret; 1403 return ret;
1404 } 1404 }
1405 1405
@@ -1477,7 +1477,7 @@ static void __stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
1477 stm32_adc_conv_irq_disable(adc); 1477 stm32_adc_conv_irq_disable(adc);
1478 1478
1479 if (adc->dma_chan) 1479 if (adc->dma_chan)
1480 dmaengine_terminate_all(adc->dma_chan); 1480 dmaengine_terminate_sync(adc->dma_chan);
1481 1481
1482 if (stm32_adc_set_trig(indio_dev, NULL)) 1482 if (stm32_adc_set_trig(indio_dev, NULL))
1483 dev_err(&indio_dev->dev, "Can't clear trigger\n"); 1483 dev_err(&indio_dev->dev, "Can't clear trigger\n");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index b99d73887c9f..8743b2f376e2 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -317,8 +317,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
317 struct adis16480 *st = iio_priv(indio_dev); 317 struct adis16480 *st = iio_priv(indio_dev);
318 unsigned int t, reg; 318 unsigned int t, reg;
319 319
320 if (val < 0 || val2 < 0)
321 return -EINVAL;
322
320 t = val * 1000 + val2 / 1000; 323 t = val * 1000 + val2 / 1000;
321 if (t <= 0) 324 if (t == 0)
322 return -EINVAL; 325 return -EINVAL;
323 326
324 /* 327 /*
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b17f060b52fc..868281b8adb0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -114,54 +114,63 @@ static const struct inv_mpu6050_hw hw_info[] = {
114 .name = "MPU6050", 114 .name = "MPU6050",
115 .reg = &reg_set_6050, 115 .reg = &reg_set_6050,
116 .config = &chip_config_6050, 116 .config = &chip_config_6050,
117 .fifo_size = 1024,
117 }, 118 },
118 { 119 {
119 .whoami = INV_MPU6500_WHOAMI_VALUE, 120 .whoami = INV_MPU6500_WHOAMI_VALUE,
120 .name = "MPU6500", 121 .name = "MPU6500",
121 .reg = &reg_set_6500, 122 .reg = &reg_set_6500,
122 .config = &chip_config_6050, 123 .config = &chip_config_6050,
124 .fifo_size = 512,
123 }, 125 },
124 { 126 {
125 .whoami = INV_MPU6515_WHOAMI_VALUE, 127 .whoami = INV_MPU6515_WHOAMI_VALUE,
126 .name = "MPU6515", 128 .name = "MPU6515",
127 .reg = &reg_set_6500, 129 .reg = &reg_set_6500,
128 .config = &chip_config_6050, 130 .config = &chip_config_6050,
131 .fifo_size = 512,
129 }, 132 },
130 { 133 {
131 .whoami = INV_MPU6000_WHOAMI_VALUE, 134 .whoami = INV_MPU6000_WHOAMI_VALUE,
132 .name = "MPU6000", 135 .name = "MPU6000",
133 .reg = &reg_set_6050, 136 .reg = &reg_set_6050,
134 .config = &chip_config_6050, 137 .config = &chip_config_6050,
138 .fifo_size = 1024,
135 }, 139 },
136 { 140 {
137 .whoami = INV_MPU9150_WHOAMI_VALUE, 141 .whoami = INV_MPU9150_WHOAMI_VALUE,
138 .name = "MPU9150", 142 .name = "MPU9150",
139 .reg = &reg_set_6050, 143 .reg = &reg_set_6050,
140 .config = &chip_config_6050, 144 .config = &chip_config_6050,
145 .fifo_size = 1024,
141 }, 146 },
142 { 147 {
143 .whoami = INV_MPU9250_WHOAMI_VALUE, 148 .whoami = INV_MPU9250_WHOAMI_VALUE,
144 .name = "MPU9250", 149 .name = "MPU9250",
145 .reg = &reg_set_6500, 150 .reg = &reg_set_6500,
146 .config = &chip_config_6050, 151 .config = &chip_config_6050,
152 .fifo_size = 512,
147 }, 153 },
148 { 154 {
149 .whoami = INV_MPU9255_WHOAMI_VALUE, 155 .whoami = INV_MPU9255_WHOAMI_VALUE,
150 .name = "MPU9255", 156 .name = "MPU9255",
151 .reg = &reg_set_6500, 157 .reg = &reg_set_6500,
152 .config = &chip_config_6050, 158 .config = &chip_config_6050,
159 .fifo_size = 512,
153 }, 160 },
154 { 161 {
155 .whoami = INV_ICM20608_WHOAMI_VALUE, 162 .whoami = INV_ICM20608_WHOAMI_VALUE,
156 .name = "ICM20608", 163 .name = "ICM20608",
157 .reg = &reg_set_6500, 164 .reg = &reg_set_6500,
158 .config = &chip_config_6050, 165 .config = &chip_config_6050,
166 .fifo_size = 512,
159 }, 167 },
160 { 168 {
161 .whoami = INV_ICM20602_WHOAMI_VALUE, 169 .whoami = INV_ICM20602_WHOAMI_VALUE,
162 .name = "ICM20602", 170 .name = "ICM20602",
163 .reg = &reg_set_icm20602, 171 .reg = &reg_set_icm20602,
164 .config = &chip_config_6050, 172 .config = &chip_config_6050,
173 .fifo_size = 1008,
165 }, 174 },
166}; 175};
167 176
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index db1c6904388b..51235677c534 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -100,12 +100,14 @@ struct inv_mpu6050_chip_config {
100 * @name: name of the chip. 100 * @name: name of the chip.
101 * @reg: register map of the chip. 101 * @reg: register map of the chip.
102 * @config: configuration of the chip. 102 * @config: configuration of the chip.
103 * @fifo_size: size of the FIFO in bytes.
103 */ 104 */
104struct inv_mpu6050_hw { 105struct inv_mpu6050_hw {
105 u8 whoami; 106 u8 whoami;
106 u8 *name; 107 u8 *name;
107 const struct inv_mpu6050_reg_map *reg; 108 const struct inv_mpu6050_reg_map *reg;
108 const struct inv_mpu6050_chip_config *config; 109 const struct inv_mpu6050_chip_config *config;
110 size_t fifo_size;
109}; 111};
110 112
111/* 113/*
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 5f9a5de0bab4..72d8c5790076 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -180,9 +180,6 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
180 "failed to ack interrupt\n"); 180 "failed to ack interrupt\n");
181 goto flush_fifo; 181 goto flush_fifo;
182 } 182 }
183 /* handle fifo overflow by reseting fifo */
184 if (int_status & INV_MPU6050_BIT_FIFO_OVERFLOW_INT)
185 goto flush_fifo;
186 if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) { 183 if (!(int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT)) {
187 dev_warn(regmap_get_device(st->map), 184 dev_warn(regmap_get_device(st->map),
188 "spurious interrupt with status 0x%x\n", int_status); 185 "spurious interrupt with status 0x%x\n", int_status);
@@ -211,6 +208,18 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
211 if (result) 208 if (result)
212 goto end_session; 209 goto end_session;
213 fifo_count = get_unaligned_be16(&data[0]); 210 fifo_count = get_unaligned_be16(&data[0]);
211
212 /*
213 * Handle fifo overflow by resetting fifo.
214 * Reset if there is only 3 data set free remaining to mitigate
215 * possible delay between reading fifo count and fifo data.
216 */
217 nb = 3 * bytes_per_datum;
218 if (fifo_count >= st->hw->fifo_size - nb) {
219 dev_warn(regmap_get_device(st->map), "fifo overflow reset\n");
220 goto flush_fifo;
221 }
222
214 /* compute and process all complete datum */ 223 /* compute and process all complete datum */
215 nb = fifo_count / bytes_per_datum; 224 nb = fifo_count / bytes_per_datum;
216 inv_mpu6050_update_period(st, pf->timestamp, nb); 225 inv_mpu6050_update_period(st, pf->timestamp, nb);
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 8b50d56b0a03..01eb8cc63076 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -110,7 +110,7 @@ static int srf04_read(struct srf04_data *data)
110 udelay(data->cfg->trigger_pulse_us); 110 udelay(data->cfg->trigger_pulse_us);
111 gpiod_set_value(data->gpiod_trig, 0); 111 gpiod_set_value(data->gpiod_trig, 0);
112 112
113 /* it cannot take more than 20 ms */ 113 /* it should not take more than 20 ms until echo is rising */
114 ret = wait_for_completion_killable_timeout(&data->rising, HZ/50); 114 ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
115 if (ret < 0) { 115 if (ret < 0) {
116 mutex_unlock(&data->lock); 116 mutex_unlock(&data->lock);
@@ -120,7 +120,8 @@ static int srf04_read(struct srf04_data *data)
120 return -ETIMEDOUT; 120 return -ETIMEDOUT;
121 } 121 }
122 122
123 ret = wait_for_completion_killable_timeout(&data->falling, HZ/50); 123 /* it cannot take more than 50 ms until echo is falling */
124 ret = wait_for_completion_killable_timeout(&data->falling, HZ/20);
124 if (ret < 0) { 125 if (ret < 0) {
125 mutex_unlock(&data->lock); 126 mutex_unlock(&data->lock);
126 return ret; 127 return ret;
@@ -135,19 +136,19 @@ static int srf04_read(struct srf04_data *data)
135 136
136 dt_ns = ktime_to_ns(ktime_dt); 137 dt_ns = ktime_to_ns(ktime_dt);
137 /* 138 /*
138 * measuring more than 3 meters is beyond the capabilities of 139 * measuring more than 6,45 meters is beyond the capabilities of
139 * the sensor 140 * the supported sensors
140 * ==> filter out invalid results for not measuring echos of 141 * ==> filter out invalid results for not measuring echos of
141 * another us sensor 142 * another us sensor
142 * 143 *
143 * formula: 144 * formula:
144 * distance 3 m 145 * distance 6,45 * 2 m
145 * time = ---------- = --------- = 9404389 ns 146 * time = ---------- = ------------ = 40438871 ns
146 * speed 319 m/s 147 * speed 319 m/s
147 * 148 *
148 * using a minimum speed at -20 °C of 319 m/s 149 * using a minimum speed at -20 °C of 319 m/s
149 */ 150 */
150 if (dt_ns > 9404389) 151 if (dt_ns > 40438871)
151 return -EIO; 152 return -EIO;
152 153
153 time_ns = dt_ns; 154 time_ns = dt_ns;
@@ -159,20 +160,20 @@ static int srf04_read(struct srf04_data *data)
159 * with Temp in °C 160 * with Temp in °C
160 * and speed in m/s 161 * and speed in m/s
161 * 162 *
162 * use 343 m/s as ultrasonic speed at 20 °C here in absence of the 163 * use 343,5 m/s as ultrasonic speed at 20 °C here in absence of the
163 * temperature 164 * temperature
164 * 165 *
165 * therefore: 166 * therefore:
166 * time 343 167 * time 343,5 time * 106
167 * distance = ------ * ----- 168 * distance = ------ * ------- = ------------
168 * 10^6 2 169 * 10^6 2 617176
169 * with time in ns 170 * with time in ns
170 * and distance in mm (one way) 171 * and distance in mm (one way)
171 * 172 *
172 * because we limit to 3 meters the multiplication with 343 just 173 * because we limit to 6,45 meters the multiplication with 106 just
173 * fits into 32 bit 174 * fits into 32 bit
174 */ 175 */
175 distance_mm = time_ns * 343 / 2000000; 176 distance_mm = time_ns * 106 / 617176;
176 177
177 return distance_mm; 178 return distance_mm;
178} 179}
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 7b971228df38..c498796adc07 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -405,8 +405,12 @@ void icc_set_tag(struct icc_path *path, u32 tag)
405 if (!path) 405 if (!path)
406 return; 406 return;
407 407
408 mutex_lock(&icc_lock);
409
408 for (i = 0; i < path->num_nodes; i++) 410 for (i = 0; i < path->num_nodes; i++)
409 path->reqs[i].tag = tag; 411 path->reqs[i].tag = tag;
412
413 mutex_unlock(&icc_lock);
410} 414}
411EXPORT_SYMBOL_GPL(icc_set_tag); 415EXPORT_SYMBOL_GPL(icc_set_tag);
412 416
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 910081d6ddc0..b4966d8f3348 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -433,7 +433,8 @@ static int qnoc_probe(struct platform_device *pdev)
433 if (!qp) 433 if (!qp)
434 return -ENOMEM; 434 return -ENOMEM;
435 435
436 data = devm_kcalloc(dev, num_nodes, sizeof(*node), GFP_KERNEL); 436 data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
437 GFP_KERNEL);
437 if (!data) 438 if (!data)
438 return -ENOMEM; 439 return -ENOMEM;
439 440
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 57955596bb59..502a6c22b41e 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -790,7 +790,8 @@ static int qnoc_probe(struct platform_device *pdev)
790 if (!qp) 790 if (!qp)
791 return -ENOMEM; 791 return -ENOMEM;
792 792
793 data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); 793 data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes),
794 GFP_KERNEL);
794 if (!data) 795 if (!data)
795 return -ENOMEM; 796 return -ENOMEM;
796 797
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 480f9459b402..62f65573eb04 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2083,8 +2083,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2083 ignore_updelay = !rcu_dereference(bond->curr_active_slave); 2083 ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2084 2084
2085 bond_for_each_slave_rcu(bond, slave, iter) { 2085 bond_for_each_slave_rcu(bond, slave, iter) {
2086 slave->new_link = BOND_LINK_NOCHANGE; 2086 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2087 slave->link_new_state = slave->link;
2088 2087
2089 link_state = bond_check_dev_link(bond, slave->dev, 0); 2088 link_state = bond_check_dev_link(bond, slave->dev, 0);
2090 2089
@@ -2118,7 +2117,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2118 } 2117 }
2119 2118
2120 if (slave->delay <= 0) { 2119 if (slave->delay <= 0) {
2121 slave->new_link = BOND_LINK_DOWN; 2120 bond_propose_link_state(slave, BOND_LINK_DOWN);
2122 commit++; 2121 commit++;
2123 continue; 2122 continue;
2124 } 2123 }
@@ -2155,7 +2154,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2155 slave->delay = 0; 2154 slave->delay = 0;
2156 2155
2157 if (slave->delay <= 0) { 2156 if (slave->delay <= 0) {
2158 slave->new_link = BOND_LINK_UP; 2157 bond_propose_link_state(slave, BOND_LINK_UP);
2159 commit++; 2158 commit++;
2160 ignore_updelay = false; 2159 ignore_updelay = false;
2161 continue; 2160 continue;
@@ -2193,7 +2192,7 @@ static void bond_miimon_commit(struct bonding *bond)
2193 struct slave *slave, *primary; 2192 struct slave *slave, *primary;
2194 2193
2195 bond_for_each_slave(bond, slave, iter) { 2194 bond_for_each_slave(bond, slave, iter) {
2196 switch (slave->new_link) { 2195 switch (slave->link_new_state) {
2197 case BOND_LINK_NOCHANGE: 2196 case BOND_LINK_NOCHANGE:
2198 /* For 802.3ad mode, check current slave speed and 2197 /* For 802.3ad mode, check current slave speed and
2199 * duplex again in case its port was disabled after 2198 * duplex again in case its port was disabled after
@@ -2265,8 +2264,8 @@ static void bond_miimon_commit(struct bonding *bond)
2265 2264
2266 default: 2265 default:
2267 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2266 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2268 slave->new_link); 2267 slave->link_new_state);
2269 slave->new_link = BOND_LINK_NOCHANGE; 2268 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2270 2269
2271 continue; 2270 continue;
2272 } 2271 }
@@ -2674,13 +2673,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2674 bond_for_each_slave_rcu(bond, slave, iter) { 2673 bond_for_each_slave_rcu(bond, slave, iter) {
2675 unsigned long trans_start = dev_trans_start(slave->dev); 2674 unsigned long trans_start = dev_trans_start(slave->dev);
2676 2675
2677 slave->new_link = BOND_LINK_NOCHANGE; 2676 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2678 2677
2679 if (slave->link != BOND_LINK_UP) { 2678 if (slave->link != BOND_LINK_UP) {
2680 if (bond_time_in_interval(bond, trans_start, 1) && 2679 if (bond_time_in_interval(bond, trans_start, 1) &&
2681 bond_time_in_interval(bond, slave->last_rx, 1)) { 2680 bond_time_in_interval(bond, slave->last_rx, 1)) {
2682 2681
2683 slave->new_link = BOND_LINK_UP; 2682 bond_propose_link_state(slave, BOND_LINK_UP);
2684 slave_state_changed = 1; 2683 slave_state_changed = 1;
2685 2684
2686 /* primary_slave has no meaning in round-robin 2685 /* primary_slave has no meaning in round-robin
@@ -2705,7 +2704,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2705 if (!bond_time_in_interval(bond, trans_start, 2) || 2704 if (!bond_time_in_interval(bond, trans_start, 2) ||
2706 !bond_time_in_interval(bond, slave->last_rx, 2)) { 2705 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2707 2706
2708 slave->new_link = BOND_LINK_DOWN; 2707 bond_propose_link_state(slave, BOND_LINK_DOWN);
2709 slave_state_changed = 1; 2708 slave_state_changed = 1;
2710 2709
2711 if (slave->link_failure_count < UINT_MAX) 2710 if (slave->link_failure_count < UINT_MAX)
@@ -2736,8 +2735,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2736 goto re_arm; 2735 goto re_arm;
2737 2736
2738 bond_for_each_slave(bond, slave, iter) { 2737 bond_for_each_slave(bond, slave, iter) {
2739 if (slave->new_link != BOND_LINK_NOCHANGE) 2738 if (slave->link_new_state != BOND_LINK_NOCHANGE)
2740 slave->link = slave->new_link; 2739 slave->link = slave->link_new_state;
2741 } 2740 }
2742 2741
2743 if (slave_state_changed) { 2742 if (slave_state_changed) {
@@ -2760,9 +2759,9 @@ re_arm:
2760} 2759}
2761 2760
2762/* Called to inspect slaves for active-backup mode ARP monitor link state 2761/* Called to inspect slaves for active-backup mode ARP monitor link state
2763 * changes. Sets new_link in slaves to specify what action should take 2762 * changes. Sets proposed link state in slaves to specify what action
2764 * place for the slave. Returns 0 if no changes are found, >0 if changes 2763 * should take place for the slave. Returns 0 if no changes are found, >0
2765 * to link states must be committed. 2764 * if changes to link states must be committed.
2766 * 2765 *
2767 * Called with rcu_read_lock held. 2766 * Called with rcu_read_lock held.
2768 */ 2767 */
@@ -2774,12 +2773,12 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2774 int commit = 0; 2773 int commit = 0;
2775 2774
2776 bond_for_each_slave_rcu(bond, slave, iter) { 2775 bond_for_each_slave_rcu(bond, slave, iter) {
2777 slave->new_link = BOND_LINK_NOCHANGE; 2776 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2778 last_rx = slave_last_rx(bond, slave); 2777 last_rx = slave_last_rx(bond, slave);
2779 2778
2780 if (slave->link != BOND_LINK_UP) { 2779 if (slave->link != BOND_LINK_UP) {
2781 if (bond_time_in_interval(bond, last_rx, 1)) { 2780 if (bond_time_in_interval(bond, last_rx, 1)) {
2782 slave->new_link = BOND_LINK_UP; 2781 bond_propose_link_state(slave, BOND_LINK_UP);
2783 commit++; 2782 commit++;
2784 } 2783 }
2785 continue; 2784 continue;
@@ -2807,7 +2806,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2807 if (!bond_is_active_slave(slave) && 2806 if (!bond_is_active_slave(slave) &&
2808 !rcu_access_pointer(bond->current_arp_slave) && 2807 !rcu_access_pointer(bond->current_arp_slave) &&
2809 !bond_time_in_interval(bond, last_rx, 3)) { 2808 !bond_time_in_interval(bond, last_rx, 3)) {
2810 slave->new_link = BOND_LINK_DOWN; 2809 bond_propose_link_state(slave, BOND_LINK_DOWN);
2811 commit++; 2810 commit++;
2812 } 2811 }
2813 2812
@@ -2820,7 +2819,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2820 if (bond_is_active_slave(slave) && 2819 if (bond_is_active_slave(slave) &&
2821 (!bond_time_in_interval(bond, trans_start, 2) || 2820 (!bond_time_in_interval(bond, trans_start, 2) ||
2822 !bond_time_in_interval(bond, last_rx, 2))) { 2821 !bond_time_in_interval(bond, last_rx, 2))) {
2823 slave->new_link = BOND_LINK_DOWN; 2822 bond_propose_link_state(slave, BOND_LINK_DOWN);
2824 commit++; 2823 commit++;
2825 } 2824 }
2826 } 2825 }
@@ -2840,7 +2839,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2840 struct slave *slave; 2839 struct slave *slave;
2841 2840
2842 bond_for_each_slave(bond, slave, iter) { 2841 bond_for_each_slave(bond, slave, iter) {
2843 switch (slave->new_link) { 2842 switch (slave->link_new_state) {
2844 case BOND_LINK_NOCHANGE: 2843 case BOND_LINK_NOCHANGE:
2845 continue; 2844 continue;
2846 2845
@@ -2890,8 +2889,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
2890 continue; 2889 continue;
2891 2890
2892 default: 2891 default:
2893 slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n", 2892 slave_err(bond->dev, slave->dev,
2894 slave->new_link); 2893 "impossible: link_new_state %d on slave\n",
2894 slave->link_new_state);
2895 continue; 2895 continue;
2896 } 2896 }
2897 2897
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 606b7d8ffe13..8e9f5620c9a2 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -52,6 +52,7 @@
52#define CONTROL_EX_PDR BIT(8) 52#define CONTROL_EX_PDR BIT(8)
53 53
54/* control register */ 54/* control register */
55#define CONTROL_SWR BIT(15)
55#define CONTROL_TEST BIT(7) 56#define CONTROL_TEST BIT(7)
56#define CONTROL_CCE BIT(6) 57#define CONTROL_CCE BIT(6)
57#define CONTROL_DISABLE_AR BIT(5) 58#define CONTROL_DISABLE_AR BIT(5)
@@ -97,6 +98,9 @@
97#define BTR_TSEG2_SHIFT 12 98#define BTR_TSEG2_SHIFT 12
98#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) 99#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
99 100
101/* interrupt register */
102#define INT_STS_PENDING 0x8000
103
100/* brp extension register */ 104/* brp extension register */
101#define BRP_EXT_BRPE_MASK 0x0f 105#define BRP_EXT_BRPE_MASK 0x0f
102#define BRP_EXT_BRPE_SHIFT 0 106#define BRP_EXT_BRPE_SHIFT 0
@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
569 IF_MCONT_RCV_EOB); 573 IF_MCONT_RCV_EOB);
570} 574}
571 575
576static int c_can_software_reset(struct net_device *dev)
577{
578 struct c_can_priv *priv = netdev_priv(dev);
579 int retry = 0;
580
581 if (priv->type != BOSCH_D_CAN)
582 return 0;
583
584 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
585 while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
586 msleep(20);
587 if (retry++ > 100) {
588 netdev_err(dev, "CCTRL: software reset failed\n");
589 return -EIO;
590 }
591 }
592
593 return 0;
594}
595
572/* 596/*
573 * Configure C_CAN chip: 597 * Configure C_CAN chip:
574 * - enable/disable auto-retransmission 598 * - enable/disable auto-retransmission
@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
578static int c_can_chip_config(struct net_device *dev) 602static int c_can_chip_config(struct net_device *dev)
579{ 603{
580 struct c_can_priv *priv = netdev_priv(dev); 604 struct c_can_priv *priv = netdev_priv(dev);
605 int err;
606
607 err = c_can_software_reset(dev);
608 if (err)
609 return err;
581 610
582 /* enable automatic retransmission */ 611 /* enable automatic retransmission */
583 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); 612 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
886 struct can_berr_counter bec; 915 struct can_berr_counter bec;
887 916
888 switch (error_type) { 917 switch (error_type) {
918 case C_CAN_NO_ERROR:
919 priv->can.state = CAN_STATE_ERROR_ACTIVE;
920 break;
889 case C_CAN_ERROR_WARNING: 921 case C_CAN_ERROR_WARNING:
890 /* error warning state */ 922 /* error warning state */
891 priv->can.can_stats.error_warning++; 923 priv->can.can_stats.error_warning++;
@@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
916 ERR_CNT_RP_SHIFT; 948 ERR_CNT_RP_SHIFT;
917 949
918 switch (error_type) { 950 switch (error_type) {
951 case C_CAN_NO_ERROR:
952 /* error warning state */
953 cf->can_id |= CAN_ERR_CRTL;
954 cf->data[1] = CAN_ERR_CRTL_ACTIVE;
955 cf->data[6] = bec.txerr;
956 cf->data[7] = bec.rxerr;
957 break;
919 case C_CAN_ERROR_WARNING: 958 case C_CAN_ERROR_WARNING:
920 /* error warning state */ 959 /* error warning state */
921 cf->can_id |= CAN_ERR_CRTL; 960 cf->can_id |= CAN_ERR_CRTL;
@@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
1029 u16 curr, last = priv->last_status; 1068 u16 curr, last = priv->last_status;
1030 int work_done = 0; 1069 int work_done = 0;
1031 1070
1032 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); 1071 /* Only read the status register if a status interrupt was pending */
1033 /* Ack status on C_CAN. D_CAN is self clearing */ 1072 if (atomic_xchg(&priv->sie_pending, 0)) {
1034 if (priv->type != BOSCH_D_CAN) 1073 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1035 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 1074 /* Ack status on C_CAN. D_CAN is self clearing */
1075 if (priv->type != BOSCH_D_CAN)
1076 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1077 } else {
1078 /* no change detected ... */
1079 curr = last;
1080 }
1036 1081
1037 /* handle state changes */ 1082 /* handle state changes */
1038 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { 1083 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
1054 /* handle bus recovery events */ 1099 /* handle bus recovery events */
1055 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { 1100 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1056 netdev_dbg(dev, "left bus off state\n"); 1101 netdev_dbg(dev, "left bus off state\n");
1057 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1102 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1058 } 1103 }
1104
1059 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { 1105 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1060 netdev_dbg(dev, "left error passive state\n"); 1106 netdev_dbg(dev, "left error passive state\n");
1061 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1107 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1108 }
1109
1110 if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
1111 netdev_dbg(dev, "left error warning state\n");
1112 work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
1062 } 1113 }
1063 1114
1064 /* handle lec errors on the bus */ 1115 /* handle lec errors on the bus */
@@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1083{ 1134{
1084 struct net_device *dev = (struct net_device *)dev_id; 1135 struct net_device *dev = (struct net_device *)dev_id;
1085 struct c_can_priv *priv = netdev_priv(dev); 1136 struct c_can_priv *priv = netdev_priv(dev);
1137 int reg_int;
1086 1138
1087 if (!priv->read_reg(priv, C_CAN_INT_REG)) 1139 reg_int = priv->read_reg(priv, C_CAN_INT_REG);
1140 if (!reg_int)
1088 return IRQ_NONE; 1141 return IRQ_NONE;
1089 1142
1143 /* save for later use */
1144 if (reg_int & INT_STS_PENDING)
1145 atomic_set(&priv->sie_pending, 1);
1146
1090 /* disable all interrupts and schedule the NAPI */ 1147 /* disable all interrupts and schedule the NAPI */
1091 c_can_irq_control(priv, false); 1148 c_can_irq_control(priv, false);
1092 napi_schedule(&priv->napi); 1149 napi_schedule(&priv->napi);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 8acdc7fa4792..d5567a7c1c6d 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -198,6 +198,7 @@ struct c_can_priv {
198 struct net_device *dev; 198 struct net_device *dev;
199 struct device *device; 199 struct device *device;
200 atomic_t tx_active; 200 atomic_t tx_active;
201 atomic_t sie_pending;
201 unsigned long tx_dir; 202 unsigned long tx_dir;
202 int last_status; 203 int last_status;
203 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); 204 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ac86be52b461..1c88c361938c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev)
848 return; 848 return;
849 849
850 ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); 850 ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
851 of_node_put(dn);
851 if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) 852 if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
852 netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); 853 netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
853} 854}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index dc5695dffc2e..57f9a2f51085 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
677 struct can_frame *cf; 677 struct can_frame *cf;
678 bool rx_errors = false, tx_errors = false; 678 bool rx_errors = false, tx_errors = false;
679 u32 timestamp; 679 u32 timestamp;
680 int err;
680 681
681 timestamp = priv->read(&regs->timer) << 16; 682 timestamp = priv->read(&regs->timer) << 16;
682 683
@@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
725 if (tx_errors) 726 if (tx_errors)
726 dev->stats.tx_errors++; 727 dev->stats.tx_errors++;
727 728
728 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 729 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
730 if (err)
731 dev->stats.rx_fifo_errors++;
729} 732}
730 733
731static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) 734static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
@@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
738 int flt; 741 int flt;
739 struct can_berr_counter bec; 742 struct can_berr_counter bec;
740 u32 timestamp; 743 u32 timestamp;
744 int err;
741 745
742 timestamp = priv->read(&regs->timer) << 16; 746 timestamp = priv->read(&regs->timer) << 16;
743 747
@@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
769 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 773 if (unlikely(new_state == CAN_STATE_BUS_OFF))
770 can_bus_off(dev); 774 can_bus_off(dev);
771 775
772 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 776 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
777 if (err)
778 dev->stats.rx_fifo_errors++;
773} 779}
774 780
775static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 781static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev)
1188 reg_mecr = priv->read(&regs->mecr); 1194 reg_mecr = priv->read(&regs->mecr);
1189 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; 1195 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
1190 priv->write(reg_mecr, &regs->mecr); 1196 priv->write(reg_mecr, &regs->mecr);
1197 reg_mecr |= FLEXCAN_MECR_ECCDIS;
1191 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | 1198 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
1192 FLEXCAN_MECR_FANCEI_MSK); 1199 FLEXCAN_MECR_FANCEI_MSK);
1193 priv->write(reg_mecr, &regs->mecr); 1200 priv->write(reg_mecr, &regs->mecr);
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index e6a668ee7730..84cae167e42f 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
107 return cb_b->timestamp - cb_a->timestamp; 107 return cb_b->timestamp - cb_a->timestamp;
108} 108}
109 109
110static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) 110/**
111 * can_rx_offload_offload_one() - Read one CAN frame from HW
112 * @offload: pointer to rx_offload context
113 * @n: number of mailbox to read
114 *
115 * The task of this function is to read a CAN frame from mailbox @n
116 * from the device and return the mailbox's content as a struct
117 * sk_buff.
118 *
119 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
120 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
121 * allocated, the mailbox contents is discarded by reading it into an
122 * overflow buffer. This way the mailbox is marked as free by the
123 * driver.
124 *
125 * Return: A pointer to skb containing the CAN frame on success.
126 *
127 * NULL if the mailbox @n is empty.
128 *
129 * ERR_PTR() in case of an error
130 */
131static struct sk_buff *
132can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
111{ 133{
112 struct sk_buff *skb = NULL; 134 struct sk_buff *skb = NULL, *skb_error = NULL;
113 struct can_rx_offload_cb *cb; 135 struct can_rx_offload_cb *cb;
114 struct can_frame *cf; 136 struct can_frame *cf;
115 int ret; 137 int ret;
116 138
117 /* If queue is full or skb not available, read to discard mailbox */ 139 if (likely(skb_queue_len(&offload->skb_queue) <
118 if (likely(skb_queue_len(&offload->skb_queue) <= 140 offload->skb_queue_len_max)) {
119 offload->skb_queue_len_max))
120 skb = alloc_can_skb(offload->dev, &cf); 141 skb = alloc_can_skb(offload->dev, &cf);
142 if (unlikely(!skb))
143 skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
144 } else {
145 skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
146 }
121 147
122 if (!skb) { 148 /* If queue is full or skb not available, drop by reading into
149 * overflow buffer.
150 */
151 if (unlikely(skb_error)) {
123 struct can_frame cf_overflow; 152 struct can_frame cf_overflow;
124 u32 timestamp; 153 u32 timestamp;
125 154
126 ret = offload->mailbox_read(offload, &cf_overflow, 155 ret = offload->mailbox_read(offload, &cf_overflow,
127 &timestamp, n); 156 &timestamp, n);
128 if (ret)
129 offload->dev->stats.rx_dropped++;
130 157
131 return NULL; 158 /* Mailbox was empty. */
159 if (unlikely(!ret))
160 return NULL;
161
162 /* Mailbox has been read and we're dropping it or
163 * there was a problem reading the mailbox.
164 *
165 * Increment error counters in any case.
166 */
167 offload->dev->stats.rx_dropped++;
168 offload->dev->stats.rx_fifo_errors++;
169
170 /* There was a problem reading the mailbox, propagate
171 * error value.
172 */
173 if (unlikely(ret < 0))
174 return ERR_PTR(ret);
175
176 return skb_error;
132 } 177 }
133 178
134 cb = can_rx_offload_get_cb(skb); 179 cb = can_rx_offload_get_cb(skb);
135 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); 180 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
136 if (!ret) { 181
182 /* Mailbox was empty. */
183 if (unlikely(!ret)) {
137 kfree_skb(skb); 184 kfree_skb(skb);
138 return NULL; 185 return NULL;
139 } 186 }
140 187
188 /* There was a problem reading the mailbox, propagate error value. */
189 if (unlikely(ret < 0)) {
190 kfree_skb(skb);
191
192 offload->dev->stats.rx_dropped++;
193 offload->dev->stats.rx_fifo_errors++;
194
195 return ERR_PTR(ret);
196 }
197
198 /* Mailbox was read. */
141 return skb; 199 return skb;
142} 200}
143 201
@@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
157 continue; 215 continue;
158 216
159 skb = can_rx_offload_offload_one(offload, i); 217 skb = can_rx_offload_offload_one(offload, i);
160 if (!skb) 218 if (IS_ERR_OR_NULL(skb))
161 break; 219 continue;
162 220
163 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); 221 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
164 } 222 }
@@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
188 struct sk_buff *skb; 246 struct sk_buff *skb;
189 int received = 0; 247 int received = 0;
190 248
191 while ((skb = can_rx_offload_offload_one(offload, 0))) { 249 while (1) {
250 skb = can_rx_offload_offload_one(offload, 0);
251 if (IS_ERR(skb))
252 continue;
253 if (!skb)
254 break;
255
192 skb_queue_tail(&offload->skb_queue, skb); 256 skb_queue_tail(&offload->skb_queue, skb);
193 received++; 257 received++;
194 } 258 }
@@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
207 unsigned long flags; 271 unsigned long flags;
208 272
209 if (skb_queue_len(&offload->skb_queue) > 273 if (skb_queue_len(&offload->skb_queue) >
210 offload->skb_queue_len_max) 274 offload->skb_queue_len_max) {
211 return -ENOMEM; 275 kfree_skb(skb);
276 return -ENOBUFS;
277 }
212 278
213 cb = can_rx_offload_get_cb(skb); 279 cb = can_rx_offload_get_cb(skb);
214 cb->timestamp = timestamp; 280 cb->timestamp = timestamp;
@@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
250 struct sk_buff *skb) 316 struct sk_buff *skb)
251{ 317{
252 if (skb_queue_len(&offload->skb_queue) > 318 if (skb_queue_len(&offload->skb_queue) >
253 offload->skb_queue_len_max) 319 offload->skb_queue_len_max) {
254 return -ENOMEM; 320 kfree_skb(skb);
321 return -ENOBUFS;
322 }
255 323
256 skb_queue_tail(&offload->skb_queue, skb); 324 skb_queue_tail(&offload->skb_queue, skb);
257 can_rx_offload_schedule(offload); 325 can_rx_offload_schedule(offload);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bee9f7b8dad6..bb20a9b75cc6 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -717,6 +717,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
717 if (priv->after_suspend) { 717 if (priv->after_suspend) {
718 mcp251x_hw_reset(spi); 718 mcp251x_hw_reset(spi);
719 mcp251x_setup(net, spi); 719 mcp251x_setup(net, spi);
720 priv->force_quit = 0;
720 if (priv->after_suspend & AFTER_SUSPEND_RESTART) { 721 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
721 mcp251x_set_normal_mode(spi); 722 mcp251x_set_normal_mode(spi);
722 } else if (priv->after_suspend & AFTER_SUSPEND_UP) { 723 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -728,7 +729,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
728 mcp251x_hw_sleep(spi); 729 mcp251x_hw_sleep(spi);
729 } 730 }
730 priv->after_suspend = 0; 731 priv->after_suspend = 0;
731 priv->force_quit = 0;
732 } 732 }
733 733
734 if (priv->restart_tx) { 734 if (priv->restart_tx) {
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f8b19eef5d26..31ad364a89bb 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
73 */ 73 */
74#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) 74#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
75#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) 75#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
76#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
76 77
77/* TI HECC module registers */ 78/* TI HECC module registers */
78#define HECC_CANME 0x0 /* Mailbox enable */ 79#define HECC_CANME 0x0 /* Mailbox enable */
@@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
82#define HECC_CANTA 0x10 /* Transmission acknowledge */ 83#define HECC_CANTA 0x10 /* Transmission acknowledge */
83#define HECC_CANAA 0x14 /* Abort acknowledge */ 84#define HECC_CANAA 0x14 /* Abort acknowledge */
84#define HECC_CANRMP 0x18 /* Receive message pending */ 85#define HECC_CANRMP 0x18 /* Receive message pending */
85#define HECC_CANRML 0x1C /* Remote message lost */ 86#define HECC_CANRML 0x1C /* Receive message lost */
86#define HECC_CANRFP 0x20 /* Remote frame pending */ 87#define HECC_CANRFP 0x20 /* Remote frame pending */
87#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ 88#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
88#define HECC_CANMC 0x28 /* Master control */ 89#define HECC_CANMC 0x28 /* Master control */
@@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
149#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ 150#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
150 HECC_CANES_CRCE | HECC_CANES_SE |\ 151 HECC_CANES_CRCE | HECC_CANES_SE |\
151 HECC_CANES_ACKE) 152 HECC_CANES_ACKE)
153#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
154 HECC_CANES_EP | HECC_CANES_EW)
152 155
153#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ 156#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
154 157
@@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev)
382 hecc_set_bit(priv, HECC_CANMIM, mbx_mask); 385 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
383 } 386 }
384 387
385 /* Prevent message over-write & Enable interrupts */ 388 /* Enable tx interrupts */
386 hecc_write(priv, HECC_CANOPC, HECC_SET_REG); 389 hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
390
391 /* Prevent message over-write to create a rx fifo, but not for
392 * the lowest priority mailbox, since that allows detecting
393 * overflows instead of the hardware silently dropping the
394 * messages.
395 */
396 mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
397 hecc_write(priv, HECC_CANOPC, mbx_mask);
398
399 /* Enable interrupts */
387 if (priv->use_hecc1int) { 400 if (priv->use_hecc1int) {
388 hecc_write(priv, HECC_CANMIL, HECC_SET_REG); 401 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
389 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | 402 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
@@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev)
400{ 413{
401 struct ti_hecc_priv *priv = netdev_priv(ndev); 414 struct ti_hecc_priv *priv = netdev_priv(ndev);
402 415
416 /* Disable the CPK; stop sending, erroring and acking */
417 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
418
403 /* Disable interrupts and disable mailboxes */ 419 /* Disable interrupts and disable mailboxes */
404 hecc_write(priv, HECC_CANGIM, 0); 420 hecc_write(priv, HECC_CANGIM, 0);
405 hecc_write(priv, HECC_CANMIM, 0); 421 hecc_write(priv, HECC_CANMIM, 0);
@@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
508 hecc_set_bit(priv, HECC_CANME, mbx_mask); 524 hecc_set_bit(priv, HECC_CANME, mbx_mask);
509 spin_unlock_irqrestore(&priv->mbx_lock, flags); 525 spin_unlock_irqrestore(&priv->mbx_lock, flags);
510 526
511 hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
512 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
513 hecc_write(priv, HECC_CANTRS, mbx_mask); 527 hecc_write(priv, HECC_CANTRS, mbx_mask);
514 528
515 return NETDEV_TX_OK; 529 return NETDEV_TX_OK;
@@ -526,8 +540,10 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
526 u32 *timestamp, unsigned int mbxno) 540 u32 *timestamp, unsigned int mbxno)
527{ 541{
528 struct ti_hecc_priv *priv = rx_offload_to_priv(offload); 542 struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
529 u32 data; 543 u32 data, mbx_mask;
544 int ret = 1;
530 545
546 mbx_mask = BIT(mbxno);
531 data = hecc_read_mbx(priv, mbxno, HECC_CANMID); 547 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
532 if (data & HECC_CANMID_IDE) 548 if (data & HECC_CANMID_IDE)
533 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; 549 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -548,7 +564,25 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
548 564
549 *timestamp = hecc_read_stamp(priv, mbxno); 565 *timestamp = hecc_read_stamp(priv, mbxno);
550 566
551 return 1; 567 /* Check for FIFO overrun.
568 *
569 * All but the last RX mailbox have activated overwrite
570 * protection. So skip check for overrun, if we're not
571 * handling the last RX mailbox.
572 *
573 * As the overwrite protection for the last RX mailbox is
574 * disabled, the CAN core might update while we're reading
575 * it. This means the skb might be inconsistent.
576 *
577 * Return an error to let rx-offload discard this CAN frame.
578 */
579 if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
580 hecc_read(priv, HECC_CANRML) & mbx_mask))
581 ret = -ENOBUFS;
582
583 hecc_write(priv, HECC_CANRMP, mbx_mask);
584
585 return ret;
552} 586}
553 587
554static int ti_hecc_error(struct net_device *ndev, int int_status, 588static int ti_hecc_error(struct net_device *ndev, int int_status,
@@ -558,92 +592,73 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
558 struct can_frame *cf; 592 struct can_frame *cf;
559 struct sk_buff *skb; 593 struct sk_buff *skb;
560 u32 timestamp; 594 u32 timestamp;
595 int err;
561 596
562 /* propagate the error condition to the can stack */ 597 if (err_status & HECC_BUS_ERROR) {
563 skb = alloc_can_err_skb(ndev, &cf); 598 /* propagate the error condition to the can stack */
564 if (!skb) { 599 skb = alloc_can_err_skb(ndev, &cf);
565 if (printk_ratelimit()) 600 if (!skb) {
566 netdev_err(priv->ndev, 601 if (net_ratelimit())
567 "%s: alloc_can_err_skb() failed\n", 602 netdev_err(priv->ndev,
568 __func__); 603 "%s: alloc_can_err_skb() failed\n",
569 return -ENOMEM; 604 __func__);
570 } 605 return -ENOMEM;
571
572 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
573 if ((int_status & HECC_CANGIF_BOIF) == 0) {
574 priv->can.state = CAN_STATE_ERROR_WARNING;
575 ++priv->can.can_stats.error_warning;
576 cf->can_id |= CAN_ERR_CRTL;
577 if (hecc_read(priv, HECC_CANTEC) > 96)
578 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
579 if (hecc_read(priv, HECC_CANREC) > 96)
580 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
581 }
582 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
583 netdev_dbg(priv->ndev, "Error Warning interrupt\n");
584 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
585 }
586
587 if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
588 if ((int_status & HECC_CANGIF_BOIF) == 0) {
589 priv->can.state = CAN_STATE_ERROR_PASSIVE;
590 ++priv->can.can_stats.error_passive;
591 cf->can_id |= CAN_ERR_CRTL;
592 if (hecc_read(priv, HECC_CANTEC) > 127)
593 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
594 if (hecc_read(priv, HECC_CANREC) > 127)
595 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
596 } 606 }
597 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
598 netdev_dbg(priv->ndev, "Error passive interrupt\n");
599 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
600 }
601
602 /* Need to check busoff condition in error status register too to
603 * ensure warning interrupts don't hog the system
604 */
605 if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
606 priv->can.state = CAN_STATE_BUS_OFF;
607 cf->can_id |= CAN_ERR_BUSOFF;
608 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
609 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
610 /* Disable all interrupts in bus-off to avoid int hog */
611 hecc_write(priv, HECC_CANGIM, 0);
612 ++priv->can.can_stats.bus_off;
613 can_bus_off(ndev);
614 }
615 607
616 if (err_status & HECC_BUS_ERROR) {
617 ++priv->can.can_stats.bus_error; 608 ++priv->can.can_stats.bus_error;
618 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 609 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
619 if (err_status & HECC_CANES_FE) { 610 if (err_status & HECC_CANES_FE)
620 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
621 cf->data[2] |= CAN_ERR_PROT_FORM; 611 cf->data[2] |= CAN_ERR_PROT_FORM;
622 } 612 if (err_status & HECC_CANES_BE)
623 if (err_status & HECC_CANES_BE) {
624 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
625 cf->data[2] |= CAN_ERR_PROT_BIT; 613 cf->data[2] |= CAN_ERR_PROT_BIT;
626 } 614 if (err_status & HECC_CANES_SE)
627 if (err_status & HECC_CANES_SE) {
628 hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
629 cf->data[2] |= CAN_ERR_PROT_STUFF; 615 cf->data[2] |= CAN_ERR_PROT_STUFF;
630 } 616 if (err_status & HECC_CANES_CRCE)
631 if (err_status & HECC_CANES_CRCE) {
632 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
633 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 617 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
634 } 618 if (err_status & HECC_CANES_ACKE)
635 if (err_status & HECC_CANES_ACKE) {
636 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
637 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 619 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
638 } 620
621 timestamp = hecc_read(priv, HECC_CANLNT);
622 err = can_rx_offload_queue_sorted(&priv->offload, skb,
623 timestamp);
624 if (err)
625 ndev->stats.rx_fifo_errors++;
639 } 626 }
640 627
641 timestamp = hecc_read(priv, HECC_CANLNT); 628 hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
642 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
643 629
644 return 0; 630 return 0;
645} 631}
646 632
633static void ti_hecc_change_state(struct net_device *ndev,
634 enum can_state rx_state,
635 enum can_state tx_state)
636{
637 struct ti_hecc_priv *priv = netdev_priv(ndev);
638 struct can_frame *cf;
639 struct sk_buff *skb;
640 u32 timestamp;
641 int err;
642
643 skb = alloc_can_err_skb(priv->ndev, &cf);
644 if (unlikely(!skb)) {
645 priv->can.state = max(tx_state, rx_state);
646 return;
647 }
648
649 can_change_state(priv->ndev, cf, tx_state, rx_state);
650
651 if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
652 cf->data[6] = hecc_read(priv, HECC_CANTEC);
653 cf->data[7] = hecc_read(priv, HECC_CANREC);
654 }
655
656 timestamp = hecc_read(priv, HECC_CANLNT);
657 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
658 if (err)
659 ndev->stats.rx_fifo_errors++;
660}
661
647static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) 662static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
648{ 663{
649 struct net_device *ndev = (struct net_device *)dev_id; 664 struct net_device *ndev = (struct net_device *)dev_id;
@@ -651,6 +666,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
651 struct net_device_stats *stats = &ndev->stats; 666 struct net_device_stats *stats = &ndev->stats;
652 u32 mbxno, mbx_mask, int_status, err_status, stamp; 667 u32 mbxno, mbx_mask, int_status, err_status, stamp;
653 unsigned long flags, rx_pending; 668 unsigned long flags, rx_pending;
669 u32 handled = 0;
654 670
655 int_status = hecc_read(priv, 671 int_status = hecc_read(priv,
656 priv->use_hecc1int ? 672 priv->use_hecc1int ?
@@ -660,17 +676,66 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
660 return IRQ_NONE; 676 return IRQ_NONE;
661 677
662 err_status = hecc_read(priv, HECC_CANES); 678 err_status = hecc_read(priv, HECC_CANES);
663 if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | 679 if (unlikely(err_status & HECC_CANES_FLAGS))
664 HECC_CANES_EP | HECC_CANES_EW))
665 ti_hecc_error(ndev, int_status, err_status); 680 ti_hecc_error(ndev, int_status, err_status);
666 681
682 if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
683 enum can_state rx_state, tx_state;
684 u32 rec = hecc_read(priv, HECC_CANREC);
685 u32 tec = hecc_read(priv, HECC_CANTEC);
686
687 if (int_status & HECC_CANGIF_WLIF) {
688 handled |= HECC_CANGIF_WLIF;
689 rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
690 tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
691 netdev_dbg(priv->ndev, "Error Warning interrupt\n");
692 ti_hecc_change_state(ndev, rx_state, tx_state);
693 }
694
695 if (int_status & HECC_CANGIF_EPIF) {
696 handled |= HECC_CANGIF_EPIF;
697 rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
698 tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
699 netdev_dbg(priv->ndev, "Error passive interrupt\n");
700 ti_hecc_change_state(ndev, rx_state, tx_state);
701 }
702
703 if (int_status & HECC_CANGIF_BOIF) {
704 handled |= HECC_CANGIF_BOIF;
705 rx_state = CAN_STATE_BUS_OFF;
706 tx_state = CAN_STATE_BUS_OFF;
707 netdev_dbg(priv->ndev, "Bus off interrupt\n");
708
709 /* Disable all interrupts */
710 hecc_write(priv, HECC_CANGIM, 0);
711 can_bus_off(ndev);
712 ti_hecc_change_state(ndev, rx_state, tx_state);
713 }
714 } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
715 enum can_state new_state, tx_state, rx_state;
716 u32 rec = hecc_read(priv, HECC_CANREC);
717 u32 tec = hecc_read(priv, HECC_CANTEC);
718
719 if (rec >= 128 || tec >= 128)
720 new_state = CAN_STATE_ERROR_PASSIVE;
721 else if (rec >= 96 || tec >= 96)
722 new_state = CAN_STATE_ERROR_WARNING;
723 else
724 new_state = CAN_STATE_ERROR_ACTIVE;
725
726 if (new_state < priv->can.state) {
727 rx_state = rec >= tec ? new_state : 0;
728 tx_state = rec <= tec ? new_state : 0;
729 ti_hecc_change_state(ndev, rx_state, tx_state);
730 }
731 }
732
667 if (int_status & HECC_CANGIF_GMIF) { 733 if (int_status & HECC_CANGIF_GMIF) {
668 while (priv->tx_tail - priv->tx_head > 0) { 734 while (priv->tx_tail - priv->tx_head > 0) {
669 mbxno = get_tx_tail_mb(priv); 735 mbxno = get_tx_tail_mb(priv);
670 mbx_mask = BIT(mbxno); 736 mbx_mask = BIT(mbxno);
671 if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) 737 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
672 break; 738 break;
673 hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
674 hecc_write(priv, HECC_CANTA, mbx_mask); 739 hecc_write(priv, HECC_CANTA, mbx_mask);
675 spin_lock_irqsave(&priv->mbx_lock, flags); 740 spin_lock_irqsave(&priv->mbx_lock, flags);
676 hecc_clear_bit(priv, HECC_CANME, mbx_mask); 741 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
@@ -695,16 +760,15 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
695 while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { 760 while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
696 can_rx_offload_irq_offload_timestamp(&priv->offload, 761 can_rx_offload_irq_offload_timestamp(&priv->offload,
697 rx_pending); 762 rx_pending);
698 hecc_write(priv, HECC_CANRMP, rx_pending);
699 } 763 }
700 } 764 }
701 765
702 /* clear all interrupt conditions - read back to avoid spurious ints */ 766 /* clear all interrupt conditions - read back to avoid spurious ints */
703 if (priv->use_hecc1int) { 767 if (priv->use_hecc1int) {
704 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); 768 hecc_write(priv, HECC_CANGIF1, handled);
705 int_status = hecc_read(priv, HECC_CANGIF1); 769 int_status = hecc_read(priv, HECC_CANGIF1);
706 } else { 770 } else {
707 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); 771 hecc_write(priv, HECC_CANGIF0, handled);
708 int_status = hecc_read(priv, HECC_CANGIF0); 772 int_status = hecc_read(priv, HECC_CANGIF0);
709 } 773 }
710 774
@@ -877,7 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
877 941
878 priv->offload.mailbox_read = ti_hecc_mailbox_read; 942 priv->offload.mailbox_read = ti_hecc_mailbox_read;
879 priv->offload.mb_first = HECC_RX_FIRST_MBOX; 943 priv->offload.mb_first = HECC_RX_FIRST_MBOX;
880 priv->offload.mb_last = HECC_MAX_TX_MBOX; 944 priv->offload.mb_last = HECC_RX_LAST_MBOX;
881 err = can_rx_offload_add_timestamp(ndev, &priv->offload); 945 err = can_rx_offload_add_timestamp(ndev, &priv->offload);
882 if (err) { 946 if (err) {
883 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); 947 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bd6eb9967630..2f74f6704c12 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -623,6 +623,7 @@ static int gs_can_open(struct net_device *netdev)
623 rc); 623 rc);
624 624
625 usb_unanchor_urb(urb); 625 usb_unanchor_urb(urb);
626 usb_free_urb(urb);
626 break; 627 break;
627 } 628 }
628 629
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 19a702ac49e4..21faa2ec4632 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -876,9 +876,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf)
876 netdev_info(priv->netdev, "device disconnected\n"); 876 netdev_info(priv->netdev, "device disconnected\n");
877 877
878 unregister_candev(priv->netdev); 878 unregister_candev(priv->netdev);
879 free_candev(priv->netdev);
880
881 mcba_urb_unlink(priv); 879 mcba_urb_unlink(priv);
880 free_candev(priv->netdev);
882} 881}
883 882
884static struct usb_driver mcba_usb_driver = { 883static struct usb_driver mcba_usb_driver = {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 617da295b6c1..d2539c95adb6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -100,7 +100,7 @@ struct pcan_usb_msg_context {
100 u8 *end; 100 u8 *end;
101 u8 rec_cnt; 101 u8 rec_cnt;
102 u8 rec_idx; 102 u8 rec_idx;
103 u8 rec_data_idx; 103 u8 rec_ts_idx;
104 struct net_device *netdev; 104 struct net_device *netdev;
105 struct pcan_usb *pdev; 105 struct pcan_usb *pdev;
106}; 106};
@@ -436,8 +436,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
436 } 436 }
437 if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { 437 if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
438 /* no error (back to active state) */ 438 /* no error (back to active state) */
439 mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; 439 new_state = CAN_STATE_ERROR_ACTIVE;
440 return 0; 440 break;
441 } 441 }
442 break; 442 break;
443 443
@@ -460,9 +460,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
460 } 460 }
461 461
462 if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { 462 if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
463 /* no error (back to active state) */ 463 /* no error (back to warning state) */
464 mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; 464 new_state = CAN_STATE_ERROR_WARNING;
465 return 0; 465 break;
466 } 466 }
467 break; 467 break;
468 468
@@ -501,6 +501,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
501 mc->pdev->dev.can.can_stats.error_warning++; 501 mc->pdev->dev.can.can_stats.error_warning++;
502 break; 502 break;
503 503
504 case CAN_STATE_ERROR_ACTIVE:
505 cf->can_id |= CAN_ERR_CRTL;
506 cf->data[1] = CAN_ERR_CRTL_ACTIVE;
507 break;
508
504 default: 509 default:
505 /* CAN_STATE_MAX (trick to handle other errors) */ 510 /* CAN_STATE_MAX (trick to handle other errors) */
506 cf->can_id |= CAN_ERR_CRTL; 511 cf->can_id |= CAN_ERR_CRTL;
@@ -547,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
547 mc->ptr += PCAN_USB_CMD_ARGS; 552 mc->ptr += PCAN_USB_CMD_ARGS;
548 553
549 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { 554 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
550 int err = pcan_usb_decode_ts(mc, !mc->rec_idx); 555 int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
551 556
552 if (err) 557 if (err)
553 return err; 558 return err;
559
560 /* Next packet in the buffer will have a timestamp on a single
561 * byte
562 */
563 mc->rec_ts_idx++;
554 } 564 }
555 565
556 switch (f) { 566 switch (f) {
@@ -632,10 +642,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
632 642
633 cf->can_dlc = get_can_dlc(rec_len); 643 cf->can_dlc = get_can_dlc(rec_len);
634 644
635 /* first data packet timestamp is a word */ 645 /* Only first packet timestamp is a word */
636 if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) 646 if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
637 goto decode_failed; 647 goto decode_failed;
638 648
649 /* Next packet in the buffer will have a timestamp on a single byte */
650 mc->rec_ts_idx++;
651
639 /* read data */ 652 /* read data */
640 memset(cf->data, 0x0, sizeof(cf->data)); 653 memset(cf->data, 0x0, sizeof(cf->data));
641 if (status_len & PCAN_USB_STATUSLEN_RTR) { 654 if (status_len & PCAN_USB_STATUSLEN_RTR) {
@@ -688,7 +701,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
688 /* handle normal can frames here */ 701 /* handle normal can frames here */
689 } else { 702 } else {
690 err = pcan_usb_decode_data(&mc, sl); 703 err = pcan_usb_decode_data(&mc, sl);
691 mc.rec_data_idx++;
692 } 704 }
693 } 705 }
694 706
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 65dce642b86b..0b7766b715fd 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -750,7 +750,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
750 dev = netdev_priv(netdev); 750 dev = netdev_priv(netdev);
751 751
752 /* allocate a buffer large enough to send commands */ 752 /* allocate a buffer large enough to send commands */
753 dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); 753 dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
754 if (!dev->cmd_buf) { 754 if (!dev->cmd_buf) {
755 err = -ENOMEM; 755 err = -ENOMEM;
756 goto lbl_free_candev; 756 goto lbl_free_candev;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d596a2ad7f78..8fa224b28218 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -996,9 +996,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
996 netdev_info(priv->netdev, "device disconnected\n"); 996 netdev_info(priv->netdev, "device disconnected\n");
997 997
998 unregister_netdev(priv->netdev); 998 unregister_netdev(priv->netdev);
999 free_candev(priv->netdev);
1000
1001 unlink_all_urbs(priv); 999 unlink_all_urbs(priv);
1000 free_candev(priv->netdev);
1002 } 1001 }
1003 1002
1004} 1003}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 911b34316c9d..7c482b2d78d2 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1599,7 +1599,6 @@ static const struct xcan_devtype_data xcan_zynq_data = {
1599 1599
1600static const struct xcan_devtype_data xcan_axi_data = { 1600static const struct xcan_devtype_data xcan_axi_data = {
1601 .cantype = XAXI_CAN, 1601 .cantype = XAXI_CAN,
1602 .flags = XCAN_FLAG_TXFEMP,
1603 .bittiming_const = &xcan_bittiming_const, 1602 .bittiming_const = &xcan_bittiming_const,
1604 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1603 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1605 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1604 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index d44651ad520c..69fc13046ac7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1215,10 +1215,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
1215 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1215 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1216 1216
1217 priv->wol_ports_mask = 0; 1217 priv->wol_ports_mask = 0;
1218 /* Disable interrupts */
1219 bcm_sf2_intr_disable(priv);
1218 dsa_unregister_switch(priv->dev->ds); 1220 dsa_unregister_switch(priv->dev->ds);
1219 bcm_sf2_cfp_exit(priv->dev->ds); 1221 bcm_sf2_cfp_exit(priv->dev->ds);
1220 /* Disable all ports and interrupts */
1221 bcm_sf2_sw_suspend(priv->dev->ds);
1222 bcm_sf2_mdio_unregister(priv); 1222 bcm_sf2_mdio_unregister(priv);
1223 1223
1224 return 0; 1224 return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0f138280315a..1de51811fcb4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv)
1996 1996
1997 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ 1997 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
1998 bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); 1998 bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
1999 udelay(2);
2000 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
2001} 1999}
2002 2000
2003static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) 2001static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2614,8 +2612,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
2614 spin_unlock_irq(&priv->lock); 2612 spin_unlock_irq(&priv->lock);
2615 2613
2616 if (status & UMAC_IRQ_PHY_DET_R && 2614 if (status & UMAC_IRQ_PHY_DET_R &&
2617 priv->dev->phydev->autoneg != AUTONEG_ENABLE) 2615 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
2618 phy_init_hw(priv->dev->phydev); 2616 phy_init_hw(priv->dev->phydev);
2617 genphy_config_aneg(priv->dev->phydev);
2618 }
2619 2619
2620 /* Link UP/DOWN event */ 2620 /* Link UP/DOWN event */
2621 if (status & UMAC_IRQ_LINK_EVENT) 2621 if (status & UMAC_IRQ_LINK_EVENT)
@@ -2879,12 +2879,6 @@ static int bcmgenet_open(struct net_device *dev)
2879 if (priv->internal_phy) 2879 if (priv->internal_phy)
2880 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 2880 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2881 2881
2882 ret = bcmgenet_mii_connect(dev);
2883 if (ret) {
2884 netdev_err(dev, "failed to connect to PHY\n");
2885 goto err_clk_disable;
2886 }
2887
2888 /* take MAC out of reset */ 2882 /* take MAC out of reset */
2889 bcmgenet_umac_reset(priv); 2883 bcmgenet_umac_reset(priv);
2890 2884
@@ -2894,12 +2888,6 @@ static int bcmgenet_open(struct net_device *dev)
2894 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2888 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2895 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); 2889 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2896 2890
2897 ret = bcmgenet_mii_config(dev, true);
2898 if (ret) {
2899 netdev_err(dev, "unsupported PHY\n");
2900 goto err_disconnect_phy;
2901 }
2902
2903 bcmgenet_set_hw_addr(priv, dev->dev_addr); 2891 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2904 2892
2905 if (priv->internal_phy) { 2893 if (priv->internal_phy) {
@@ -2915,7 +2903,7 @@ static int bcmgenet_open(struct net_device *dev)
2915 ret = bcmgenet_init_dma(priv); 2903 ret = bcmgenet_init_dma(priv);
2916 if (ret) { 2904 if (ret) {
2917 netdev_err(dev, "failed to initialize DMA\n"); 2905 netdev_err(dev, "failed to initialize DMA\n");
2918 goto err_disconnect_phy; 2906 goto err_clk_disable;
2919 } 2907 }
2920 2908
2921 /* Always enable ring 16 - descriptor ring */ 2909 /* Always enable ring 16 - descriptor ring */
@@ -2938,19 +2926,25 @@ static int bcmgenet_open(struct net_device *dev)
2938 goto err_irq0; 2926 goto err_irq0;
2939 } 2927 }
2940 2928
2929 ret = bcmgenet_mii_probe(dev);
2930 if (ret) {
2931 netdev_err(dev, "failed to connect to PHY\n");
2932 goto err_irq1;
2933 }
2934
2941 bcmgenet_netif_start(dev); 2935 bcmgenet_netif_start(dev);
2942 2936
2943 netif_tx_start_all_queues(dev); 2937 netif_tx_start_all_queues(dev);
2944 2938
2945 return 0; 2939 return 0;
2946 2940
2941err_irq1:
2942 free_irq(priv->irq1, priv);
2947err_irq0: 2943err_irq0:
2948 free_irq(priv->irq0, priv); 2944 free_irq(priv->irq0, priv);
2949err_fini_dma: 2945err_fini_dma:
2950 bcmgenet_dma_teardown(priv); 2946 bcmgenet_dma_teardown(priv);
2951 bcmgenet_fini_dma(priv); 2947 bcmgenet_fini_dma(priv);
2952err_disconnect_phy:
2953 phy_disconnect(dev->phydev);
2954err_clk_disable: 2948err_clk_disable:
2955 if (priv->internal_phy) 2949 if (priv->internal_phy)
2956 bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 2950 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3631,8 +3625,6 @@ static int bcmgenet_resume(struct device *d)
3631 if (priv->internal_phy) 3625 if (priv->internal_phy)
3632 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 3626 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3633 3627
3634 phy_init_hw(dev->phydev);
3635
3636 bcmgenet_umac_reset(priv); 3628 bcmgenet_umac_reset(priv);
3637 3629
3638 init_umac(priv); 3630 init_umac(priv);
@@ -3641,7 +3633,10 @@ static int bcmgenet_resume(struct device *d)
3641 if (priv->wolopts) 3633 if (priv->wolopts)
3642 clk_disable_unprepare(priv->clk_wol); 3634 clk_disable_unprepare(priv->clk_wol);
3643 3635
3636 phy_init_hw(dev->phydev);
3637
3644 /* Speed settings must be restored */ 3638 /* Speed settings must be restored */
3639 genphy_config_aneg(dev->phydev);
3645 bcmgenet_mii_config(priv->dev, false); 3640 bcmgenet_mii_config(priv->dev, false);
3646 3641
3647 bcmgenet_set_hw_addr(priv, dev->dev_addr); 3642 bcmgenet_set_hw_addr(priv, dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7fbf573d8d52..dbc69d8fa05f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -720,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
720 720
721/* MDIO routines */ 721/* MDIO routines */
722int bcmgenet_mii_init(struct net_device *dev); 722int bcmgenet_mii_init(struct net_device *dev);
723int bcmgenet_mii_connect(struct net_device *dev);
724int bcmgenet_mii_config(struct net_device *dev, bool init); 723int bcmgenet_mii_config(struct net_device *dev, bool init);
724int bcmgenet_mii_probe(struct net_device *dev);
725void bcmgenet_mii_exit(struct net_device *dev); 725void bcmgenet_mii_exit(struct net_device *dev);
726void bcmgenet_phy_power_set(struct net_device *dev, bool enable); 726void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
727void bcmgenet_mii_setup(struct net_device *dev); 727void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 17bb8d60a157..dbe18cdf6c1b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -173,46 +173,6 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
173 bcmgenet_fixed_phy_link_update); 173 bcmgenet_fixed_phy_link_update);
174} 174}
175 175
176int bcmgenet_mii_connect(struct net_device *dev)
177{
178 struct bcmgenet_priv *priv = netdev_priv(dev);
179 struct device_node *dn = priv->pdev->dev.of_node;
180 struct phy_device *phydev;
181 u32 phy_flags = 0;
182 int ret;
183
184 /* Communicate the integrated PHY revision */
185 if (priv->internal_phy)
186 phy_flags = priv->gphy_rev;
187
188 /* Initialize link state variables that bcmgenet_mii_setup() uses */
189 priv->old_link = -1;
190 priv->old_speed = -1;
191 priv->old_duplex = -1;
192 priv->old_pause = -1;
193
194 if (dn) {
195 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
196 phy_flags, priv->phy_interface);
197 if (!phydev) {
198 pr_err("could not attach to PHY\n");
199 return -ENODEV;
200 }
201 } else {
202 phydev = dev->phydev;
203 phydev->dev_flags = phy_flags;
204
205 ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
206 priv->phy_interface);
207 if (ret) {
208 pr_err("could not attach to PHY\n");
209 return -ENODEV;
210 }
211 }
212
213 return 0;
214}
215
216int bcmgenet_mii_config(struct net_device *dev, bool init) 176int bcmgenet_mii_config(struct net_device *dev, bool init)
217{ 177{
218 struct bcmgenet_priv *priv = netdev_priv(dev); 178 struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -221,8 +181,38 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
221 const char *phy_name = NULL; 181 const char *phy_name = NULL;
222 u32 id_mode_dis = 0; 182 u32 id_mode_dis = 0;
223 u32 port_ctrl; 183 u32 port_ctrl;
184 int bmcr = -1;
185 int ret;
224 u32 reg; 186 u32 reg;
225 187
188 /* MAC clocking workaround during reset of umac state machines */
189 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
190 if (reg & CMD_SW_RESET) {
191 /* An MII PHY must be isolated to prevent TXC contention */
192 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
193 ret = phy_read(phydev, MII_BMCR);
194 if (ret >= 0) {
195 bmcr = ret;
196 ret = phy_write(phydev, MII_BMCR,
197 bmcr | BMCR_ISOLATE);
198 }
199 if (ret) {
200 netdev_err(dev, "failed to isolate PHY\n");
201 return ret;
202 }
203 }
204 /* Switch MAC clocking to RGMII generated clock */
205 bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
206 /* Ensure 5 clks with Rx disabled
207 * followed by 5 clks with Reset asserted
208 */
209 udelay(4);
210 reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
211 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
212 /* Ensure 5 more clocks before Rx is enabled */
213 udelay(2);
214 }
215
226 priv->ext_phy = !priv->internal_phy && 216 priv->ext_phy = !priv->internal_phy &&
227 (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); 217 (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
228 218
@@ -254,6 +244,9 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
254 phy_set_max_speed(phydev, SPEED_100); 244 phy_set_max_speed(phydev, SPEED_100);
255 bcmgenet_sys_writel(priv, 245 bcmgenet_sys_writel(priv,
256 PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); 246 PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
247 /* Restore the MII PHY after isolation */
248 if (bmcr >= 0)
249 phy_write(phydev, MII_BMCR, bmcr);
257 break; 250 break;
258 251
259 case PHY_INTERFACE_MODE_REVMII: 252 case PHY_INTERFACE_MODE_REVMII:
@@ -306,21 +299,71 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
306 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); 299 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
307 } 300 }
308 301
309 if (init) { 302 if (init)
310 linkmode_copy(phydev->advertising, phydev->supported); 303 dev_info(kdev, "configuring instance for %s\n", phy_name);
311 304
312 /* The internal PHY has its link interrupts routed to the 305 return 0;
313 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue 306}
314 * that prevents the signaling of link UP interrupts when
315 * the link operates at 10Mbps, so fallback to polling for
316 * those versions of GENET.
317 */
318 if (priv->internal_phy && !GENET_IS_V5(priv))
319 phydev->irq = PHY_IGNORE_INTERRUPT;
320 307
321 dev_info(kdev, "configuring instance for %s\n", phy_name); 308int bcmgenet_mii_probe(struct net_device *dev)
309{
310 struct bcmgenet_priv *priv = netdev_priv(dev);
311 struct device_node *dn = priv->pdev->dev.of_node;
312 struct phy_device *phydev;
313 u32 phy_flags = 0;
314 int ret;
315
316 /* Communicate the integrated PHY revision */
317 if (priv->internal_phy)
318 phy_flags = priv->gphy_rev;
319
320 /* Initialize link state variables that bcmgenet_mii_setup() uses */
321 priv->old_link = -1;
322 priv->old_speed = -1;
323 priv->old_duplex = -1;
324 priv->old_pause = -1;
325
326 if (dn) {
327 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
328 phy_flags, priv->phy_interface);
329 if (!phydev) {
330 pr_err("could not attach to PHY\n");
331 return -ENODEV;
332 }
333 } else {
334 phydev = dev->phydev;
335 phydev->dev_flags = phy_flags;
336
337 ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
338 priv->phy_interface);
339 if (ret) {
340 pr_err("could not attach to PHY\n");
341 return -ENODEV;
342 }
322 } 343 }
323 344
345 /* Configure port multiplexer based on what the probed PHY device since
346 * reading the 'max-speed' property determines the maximum supported
347 * PHY speed which is needed for bcmgenet_mii_config() to configure
348 * things appropriately.
349 */
350 ret = bcmgenet_mii_config(dev, true);
351 if (ret) {
352 phy_disconnect(dev->phydev);
353 return ret;
354 }
355
356 linkmode_copy(phydev->advertising, phydev->supported);
357
358 /* The internal PHY has its link interrupts routed to the
359 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
360 * that prevents the signaling of link UP interrupts when
361 * the link operates at 10Mbps, so fallback to polling for
362 * those versions of GENET.
363 */
364 if (priv->internal_phy && !GENET_IS_V5(priv))
365 dev->phydev->irq = PHY_IGNORE_INTERRUPT;
366
324 return 0; 367 return 0;
325} 368}
326 369
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 0e5de88fd6e8..cdd7e5da4a74 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
1499 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1499 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1500 1500
1501 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; 1501 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1502 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; 1502 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
1503 1503
1504 mac = of_get_mac_address(pdev->dev.of_node); 1504 mac = of_get_mac_address(pdev->dev.of_node);
1505 1505
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 22c01b224baa..a9c386b63581 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3645,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev)
3645 regulator_disable(fep->reg_phy); 3645 regulator_disable(fep->reg_phy);
3646 pm_runtime_put(&pdev->dev); 3646 pm_runtime_put(&pdev->dev);
3647 pm_runtime_disable(&pdev->dev); 3647 pm_runtime_disable(&pdev->dev);
3648 clk_disable_unprepare(fep->clk_ahb);
3649 clk_disable_unprepare(fep->clk_ipg);
3648 if (of_phy_is_fixed_link(np)) 3650 if (of_phy_is_fixed_link(np))
3649 of_phy_deregister_fixed_link(np); 3651 of_phy_deregister_fixed_link(np);
3650 of_node_put(fep->phy_node); 3652 of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 6d0457eb4faa..08339278c722 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
199 199
200 ring->q = q; 200 ring->q = q;
201 ring->flags = flags; 201 ring->flags = flags;
202 spin_lock_init(&ring->lock);
203 ring->coal_param = q->handle->coal_param; 202 ring->coal_param = q->handle->coal_param;
204 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); 203 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
205 204
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e9c67c06bfd2..6ab9458302e1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -274,9 +274,6 @@ struct hnae_ring {
274 /* statistic */ 274 /* statistic */
275 struct ring_stats stats; 275 struct ring_stats stats;
276 276
277 /* ring lock for poll one */
278 spinlock_t lock;
279
280 dma_addr_t desc_dma_addr; 277 dma_addr_t desc_dma_addr;
281 u32 buf_size; /* size for hnae_desc->addr, preset by AE */ 278 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
282 u16 desc_num; /* total number of desc */ 279 u16 desc_num; /* total number of desc */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index a48396dd4ebb..14ab20491fd0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
943 return u > c ? (h > c && h <= u) : (h > c || h <= u); 943 return u > c ? (h > c && h <= u) : (h > c || h <= u);
944} 944}
945 945
946/* netif_tx_lock will turn down the performance, set only when necessary */
947#ifdef CONFIG_NET_POLL_CONTROLLER
948#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
949#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
950#else
951#define NETIF_TX_LOCK(ring)
952#define NETIF_TX_UNLOCK(ring)
953#endif
954
955/* reclaim all desc in one budget 946/* reclaim all desc in one budget
956 * return error or number of desc left 947 * return error or number of desc left
957 */ 948 */
@@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
965 int head; 956 int head;
966 int bytes, pkts; 957 int bytes, pkts;
967 958
968 NETIF_TX_LOCK(ring);
969
970 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 959 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
971 rmb(); /* make sure head is ready before touch any data */ 960 rmb(); /* make sure head is ready before touch any data */
972 961
973 if (is_ring_empty(ring) || head == ring->next_to_clean) { 962 if (is_ring_empty(ring) || head == ring->next_to_clean)
974 NETIF_TX_UNLOCK(ring);
975 return 0; /* no data to poll */ 963 return 0; /* no data to poll */
976 }
977 964
978 if (!is_valid_clean_head(ring, head)) { 965 if (!is_valid_clean_head(ring, head)) {
979 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, 966 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
980 ring->next_to_use, ring->next_to_clean); 967 ring->next_to_use, ring->next_to_clean);
981 ring->stats.io_err_cnt++; 968 ring->stats.io_err_cnt++;
982 NETIF_TX_UNLOCK(ring);
983 return -EIO; 969 return -EIO;
984 } 970 }
985 971
@@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
994 ring->stats.tx_pkts += pkts; 980 ring->stats.tx_pkts += pkts;
995 ring->stats.tx_bytes += bytes; 981 ring->stats.tx_bytes += bytes;
996 982
997 NETIF_TX_UNLOCK(ring);
998
999 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 983 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1000 netdev_tx_completed_queue(dev_queue, pkts, bytes); 984 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1001 985
@@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1055 int head; 1039 int head;
1056 int bytes, pkts; 1040 int bytes, pkts;
1057 1041
1058 NETIF_TX_LOCK(ring);
1059
1060 head = ring->next_to_use; /* ntu :soft setted ring position*/ 1042 head = ring->next_to_use; /* ntu :soft setted ring position*/
1061 bytes = 0; 1043 bytes = 0;
1062 pkts = 0; 1044 pkts = 0;
1063 while (head != ring->next_to_clean) 1045 while (head != ring->next_to_clean)
1064 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 1046 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1065 1047
1066 NETIF_TX_UNLOCK(ring);
1067
1068 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 1048 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1069 netdev_tx_reset_queue(dev_queue); 1049 netdev_tx_reset_queue(dev_queue);
1070} 1050}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 75ccc1e7076b..a0998937727d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HNAE3_H 4#ifndef __HNAE3_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 2110fa3b4479..5d468ed404a6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HNS3_ENET_H 4#ifndef __HNS3_ENET_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 4821fe08b5e4..1426eb5ddf3d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_CMD_H 4#ifndef __HCLGE_CMD_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
index 278f21e02736..b04702e65689 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_DCB_H__ 4#ifndef __HCLGE_DCB_H__
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e02e01bd9eff..16f7d0e15b4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3587,12 +3587,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev)
3587{ 3587{
3588 struct hclge_pf_rst_done_cmd *req; 3588 struct hclge_pf_rst_done_cmd *req;
3589 struct hclge_desc desc; 3589 struct hclge_desc desc;
3590 int ret;
3590 3591
3591 req = (struct hclge_pf_rst_done_cmd *)desc.data; 3592 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 3593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3593 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 3594 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3594 3595
3595 return hclge_cmd_send(&hdev->hw, &desc, 1); 3596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3597 /* To be compatible with the old firmware, which does not support
3598 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3599 * return success
3600 */
3601 if (ret == -EOPNOTSUPP) {
3602 dev_warn(&hdev->pdev->dev,
3603 "current firmware does not support command(0x%x)!\n",
3604 HCLGE_OPC_PF_RST_DONE);
3605 return 0;
3606 } else if (ret) {
3607 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3608 ret);
3609 }
3610
3611 return ret;
3596} 3612}
3597 3613
3598static int hclge_reset_prepare_up(struct hclge_dev *hdev) 3614static int hclge_reset_prepare_up(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index c3d56b872ed7..59b824347ba4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_MAIN_H 4#ifndef __HCLGE_MAIN_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index ef095d9c566f..dd9a1218a7b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_MDIO_H 4#ifndef __HCLGE_MDIO_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 818610988d34..260f22d19d81 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_TM_H 4#ifndef __HCLGE_TM_H
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 530613f31527..69a2daaca5c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -20,6 +20,8 @@
20 20
21/* API version 1.7 implements additional link and PHY-specific APIs */ 21/* API version 1.7 implements additional link and PHY-specific APIs */
22#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 22#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
23/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
24#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
23/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ 25/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
24#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 26#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
25 27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d37c6e0e5f08..7560f06768e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1876,7 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1876 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1876 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1877 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1877 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1878 1878
1879 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 1879 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1880 hw->mac.type != I40E_MAC_X722) {
1880 __le32 tmp; 1881 __le32 tmp;
1881 1882
1882 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1883 memcpy(&tmp, resp->link_type, sizeof(tmp));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index a05dfecdd9b4..d07e1a890428 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -689,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
689 i40e_xdp_ring_update_tail(xdp_ring); 689 i40e_xdp_ring_update_tail(xdp_ring);
690 690
691 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 691 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
692 if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
693 xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
694 } 692 }
695 693
696 return !!budget && work_done; 694 return !!budget && work_done;
@@ -769,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
769 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); 767 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
770 768
771out_xmit: 769out_xmit:
772 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 770 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
773 if (tx_ring->next_to_clean == tx_ring->next_to_use) 771 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
774 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
775 else
776 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
777 }
778 772
779 xmit_done = i40e_xmit_zc(tx_ring, budget); 773 xmit_done = i40e_xmit_zc(tx_ring, budget);
780 774
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8f310e520b06..821987da5698 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
315 q_vector->ring_mask |= BIT(r_idx); 315 q_vector->ring_mask |= BIT(r_idx);
316 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 316 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
317 q_vector->rx.current_itr); 317 q_vector->rx.current_itr >> 1);
318 q_vector->rx.current_itr = q_vector->rx.target_itr; 318 q_vector->rx.current_itr = q_vector->rx.target_itr;
319} 319}
320 320
@@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
340 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 340 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
341 q_vector->num_ringpairs++; 341 q_vector->num_ringpairs++;
342 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 342 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
343 q_vector->tx.target_itr); 343 q_vector->tx.target_itr >> 1);
344 q_vector->tx.current_itr = q_vector->tx.target_itr; 344 q_vector->tx.current_itr = q_vector->tx.target_itr;
345} 345}
346 346
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index fc624b73d05d..2fde9653a608 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1036,7 +1036,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1036 struct ice_aqc_query_txsched_res_resp *buf; 1036 struct ice_aqc_query_txsched_res_resp *buf;
1037 enum ice_status status = 0; 1037 enum ice_status status = 0;
1038 __le16 max_sibl; 1038 __le16 max_sibl;
1039 u8 i; 1039 u16 i;
1040 1040
1041 if (hw->layer_info) 1041 if (hw->layer_info)
1042 return status; 1042 return status;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9148c62d9ac5..ed7e667d7eb2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5675,8 +5675,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5675 * should have been handled by the upper layers. 5675 * should have been handled by the upper layers.
5676 */ 5676 */
5677 if (tx_ring->launchtime_enable) { 5677 if (tx_ring->launchtime_enable) {
5678 ts = ns_to_timespec64(first->skb->tstamp); 5678 ts = ktime_to_timespec64(first->skb->tstamp);
5679 first->skb->tstamp = 0; 5679 first->skb->tstamp = ktime_set(0, 0);
5680 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); 5680 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5681 } else { 5681 } else {
5682 context_desc->seqnum_seed = 0; 5682 context_desc->seqnum_seed = 0;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e424dfab12e..24888676f69b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -824,8 +824,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
824 * should have been handled by the upper layers. 824 * should have been handled by the upper layers.
825 */ 825 */
826 if (tx_ring->launchtime_enable) { 826 if (tx_ring->launchtime_enable) {
827 ts = ns_to_timespec64(first->skb->tstamp); 827 ts = ktime_to_timespec64(first->skb->tstamp);
828 first->skb->tstamp = 0; 828 first->skb->tstamp = ktime_set(0, 0);
829 context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); 829 context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
830 } else { 830 } else {
831 context_desc->launch_time = 0; 831 context_desc->launch_time = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 100ac89b345d..d6feaacfbf89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
622 if (tx_desc) { 622 if (tx_desc) {
623 ixgbe_xdp_ring_update_tail(xdp_ring); 623 ixgbe_xdp_ring_update_tail(xdp_ring);
624 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 624 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
625 if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
626 xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
627 } 625 }
628 626
629 return !!budget && work_done; 627 return !!budget && work_done;
@@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
691 if (xsk_frames) 689 if (xsk_frames)
692 xsk_umem_complete_tx(umem, xsk_frames); 690 xsk_umem_complete_tx(umem, xsk_frames);
693 691
694 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 692 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
695 if (tx_ring->next_to_clean == tx_ring->next_to_use) 693 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
696 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
697 else
698 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
699 }
700 694
701 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); 695 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
702} 696}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index fce9b3a24347..69bb6bb06e76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
515 /* 515 /*
516 * Subtract 1 from the limit because we need to allocate a 516 * Subtract 1 from the limit because we need to allocate a
517 * spare CQE so the HCA HW can tell the difference between an 517 * spare CQE to enable resizing the CQ.
518 * empty CQ and a full CQ.
519 */ 518 */
520 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 519 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
521 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 520 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 369499e88fe8..9004a07e457a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1079,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1079 MLX5_CAP_GEN(dev, max_flow_counter_15_0); 1079 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1080 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 1080 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1081 1081
1082 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", 1082 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
1083 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), 1083 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1084 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, 1084 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1085 fdb_max); 1085 fdb_max);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 7879e1746297..366bda1bb1c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -183,7 +183,8 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
183 u32 port_mask, port_value; 183 u32 port_mask, port_value;
184 184
185 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 185 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
186 return spec->flow_context.flow_source == MLX5_VPORT_UPLINK; 186 return spec->flow_context.flow_source ==
187 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
187 188
188 port_mask = MLX5_GET(fte_match_param, spec->match_criteria, 189 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
189 misc_parameters.source_port); 190 misc_parameters.source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index b74b7d0f6590..004c56c2fc0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1577,6 +1577,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
1577 break; 1577 break;
1578 case DR_ACTION_TYP_MODIFY_HDR: 1578 case DR_ACTION_TYP_MODIFY_HDR:
1579 mlx5dr_icm_free_chunk(action->rewrite.chunk); 1579 mlx5dr_icm_free_chunk(action->rewrite.chunk);
1580 kfree(action->rewrite.data);
1580 refcount_dec(&action->rewrite.dmn->refcount); 1581 refcount_dec(&action->rewrite.dmn->refcount);
1581 break; 1582 break;
1582 default: 1583 default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index e8b656075c6f..5dcb8baf491a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1096,6 +1096,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1096 if (htbl) 1096 if (htbl)
1097 mlx5dr_htbl_put(htbl); 1097 mlx5dr_htbl_put(htbl);
1098 1098
1099 kfree(hw_ste_arr);
1100
1099 return 0; 1101 return 0;
1100 1102
1101free_ste: 1103free_ste:
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 344539c0d3aa..672ea1342add 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1680,9 +1680,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
1680 struct ocelot_port *ocelot_port = netdev_priv(dev); 1680 struct ocelot_port *ocelot_port = netdev_priv(dev);
1681 int err = 0; 1681 int err = 0;
1682 1682
1683 if (!ocelot_netdevice_dev_check(dev))
1684 return 0;
1685
1686 switch (event) { 1683 switch (event) {
1687 case NETDEV_CHANGEUPPER: 1684 case NETDEV_CHANGEUPPER:
1688 if (netif_is_bridge_master(info->upper_dev)) { 1685 if (netif_is_bridge_master(info->upper_dev)) {
@@ -1719,12 +1716,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
1719 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1716 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1720 int ret = 0; 1717 int ret = 0;
1721 1718
1719 if (!ocelot_netdevice_dev_check(dev))
1720 return 0;
1721
1722 if (event == NETDEV_PRECHANGEUPPER && 1722 if (event == NETDEV_PRECHANGEUPPER &&
1723 netif_is_lag_master(info->upper_dev)) { 1723 netif_is_lag_master(info->upper_dev)) {
1724 struct netdev_lag_upper_info *lag_upper_info = info->upper_info; 1724 struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
1725 struct netlink_ext_ack *extack; 1725 struct netlink_ext_ack *extack;
1726 1726
1727 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 1727 if (lag_upper_info &&
1728 lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1728 extack = netdev_notifier_info_to_extack(&info->info); 1729 extack = netdev_notifier_info_to_extack(&info->info);
1729 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 1730 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
1730 1731
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e40773c01a44..06ac806052bc 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -523,7 +523,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
523#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) 523#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
524#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) 524#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
525 525
526void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, 526void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
527 u32 offset); 527 u32 offset);
528#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) 528#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
529#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) 529#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8d1c208f778f..a220cc7c947a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1208,8 +1208,16 @@ enum qede_remove_mode {
1208static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) 1208static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1209{ 1209{
1210 struct net_device *ndev = pci_get_drvdata(pdev); 1210 struct net_device *ndev = pci_get_drvdata(pdev);
1211 struct qede_dev *edev = netdev_priv(ndev); 1211 struct qede_dev *edev;
1212 struct qed_dev *cdev = edev->cdev; 1212 struct qed_dev *cdev;
1213
1214 if (!ndev) {
1215 dev_info(&pdev->dev, "Device has already been removed\n");
1216 return;
1217 }
1218
1219 edev = netdev_priv(ndev);
1220 cdev = edev->cdev;
1213 1221
1214 DP_INFO(edev, "Starting qede_remove\n"); 1222 DP_INFO(edev, "Starting qede_remove\n");
1215 1223
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 9c54b715228e..06de59521fc4 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
57 if (port->nr_rmnet_devs) 57 if (port->nr_rmnet_devs)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 kfree(port);
61
62 netdev_rx_handler_unregister(real_dev); 60 netdev_rx_handler_unregister(real_dev);
63 61
62 kfree(port);
63
64 /* release reference on real_dev */ 64 /* release reference on real_dev */
65 dev_put(real_dev); 65 dev_put(real_dev);
66 66
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5064c292b873..c4e961ea44d5 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -916,6 +916,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
916 916
917static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) 917static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
918{ 918{
919 if (reg == 0x1f)
920 return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
921
919 if (tp->ocp_base != OCP_STD_PHY_BASE) 922 if (tp->ocp_base != OCP_STD_PHY_BASE)
920 reg -= 0x10; 923 reg -= 0x10;
921 924
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 5a7b0aca1d31..66e60c7e9850 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -432,7 +432,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
432 * bits used depends on the hardware configuration 432 * bits used depends on the hardware configuration
433 * selected at core configuration time. 433 * selected at core configuration time.
434 */ 434 */
435 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 435 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
436 ETH_ALEN)) >> (32 - mcbitslog2); 436 ETH_ALEN)) >> (32 - mcbitslog2);
437 /* The most significant bit determines the register to 437 /* The most significant bit determines the register to
438 * use (H/L) while the other 5 bits determine the bit 438 * use (H/L) while the other 5 bits determine the bit
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 5031398e612c..070bd7d1ae4c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -224,6 +224,7 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); 224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
225 225
226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); 226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
227 value &= ~XGMAC_TSA;
227 value |= XGMAC_CC | XGMAC_CBS; 228 value |= XGMAC_CC | XGMAC_CBS;
228 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); 229 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
229} 230}
@@ -463,7 +464,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
463 value |= XGMAC_FILTER_HMC; 464 value |= XGMAC_FILTER_HMC;
464 465
465 netdev_for_each_mc_addr(ha, dev) { 466 netdev_for_each_mc_addr(ha, dev) {
466 int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 467 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
467 (32 - mcbitslog2)); 468 (32 - mcbitslog2));
468 mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); 469 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
469 } 470 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index ae48154f933c..bd5838ce1e8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -288,7 +288,8 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
288 288
289static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) 289static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
290{ 290{
291 *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; 291 if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
292 *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
292 return 0; 293 return 0;
293} 294}
294 295
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 965cbe3e6f51..f70ca5300b82 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -369,7 +369,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
369 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; 369 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
370 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; 370 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
371 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; 371 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
372 dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; 372 dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
373 dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; 373 dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
374 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; 374 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
375 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; 375 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
@@ -470,6 +470,7 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
470static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) 470static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
471{ 471{
472 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 472 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
473 u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
473 474
474 value &= ~XGMAC_TXQEN; 475 value &= ~XGMAC_TXQEN;
475 if (qmode != MTL_QUEUE_AVB) { 476 if (qmode != MTL_QUEUE_AVB) {
@@ -477,6 +478,7 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
477 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); 478 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
478 } else { 479 } else {
479 value |= 0x1 << XGMAC_TXQEN_SHIFT; 480 value |= 0x1 << XGMAC_TXQEN_SHIFT;
481 writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
480 } 482 }
481 483
482 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 484 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a223584f5f9a..252cf48c5816 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -176,6 +176,7 @@
176#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c 176#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
177#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 177#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
178#define MMC_XGMAC_RX_FPE_FRAG 0x234 178#define MMC_XGMAC_RX_FPE_FRAG 0x234
179#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
179 180
180static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) 181static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
181{ 182{
@@ -333,8 +334,9 @@ static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
333 334
334static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) 335static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
335{ 336{
336 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); 337 writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
337 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); 338 writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
339 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
338} 340}
339 341
340static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) 342static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4e9c848c67cc..f826365c979d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2996,6 +2996,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2996 stmmac_set_desc_addr(priv, first, des); 2996 stmmac_set_desc_addr(priv, first, des);
2997 tmp_pay_len = pay_len; 2997 tmp_pay_len = pay_len;
2998 des += proto_hdr_len; 2998 des += proto_hdr_len;
2999 pay_len = 0;
2999 } 3000 }
3000 3001
3001 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 3002 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -3023,6 +3024,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3023 /* Only the last descriptor gets to point to the skb. */ 3024 /* Only the last descriptor gets to point to the skb. */
3024 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 3025 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3025 3026
3027 /* Manage tx mitigation */
3028 tx_q->tx_count_frames += nfrags + 1;
3029 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3030 !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3031 priv->hwts_tx_en)) {
3032 stmmac_tx_timer_arm(priv, queue);
3033 } else {
3034 desc = &tx_q->dma_tx[tx_q->cur_tx];
3035 tx_q->tx_count_frames = 0;
3036 stmmac_set_tx_ic(priv, desc);
3037 priv->xstats.tx_set_ic_bit++;
3038 }
3039
3026 /* We've used all descriptors we need for this skb, however, 3040 /* We've used all descriptors we need for this skb, however,
3027 * advance cur_tx so that it references a fresh descriptor. 3041 * advance cur_tx so that it references a fresh descriptor.
3028 * ndo_start_xmit will fill this descriptor the next time it's 3042 * ndo_start_xmit will fill this descriptor the next time it's
@@ -3040,19 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3040 priv->xstats.tx_tso_frames++; 3054 priv->xstats.tx_tso_frames++;
3041 priv->xstats.tx_tso_nfrags += nfrags; 3055 priv->xstats.tx_tso_nfrags += nfrags;
3042 3056
3043 /* Manage tx mitigation */
3044 tx_q->tx_count_frames += nfrags + 1;
3045 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3046 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3047 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3048 priv->hwts_tx_en)) {
3049 stmmac_tx_timer_arm(priv, queue);
3050 } else {
3051 tx_q->tx_count_frames = 0;
3052 stmmac_set_tx_ic(priv, desc);
3053 priv->xstats.tx_set_ic_bit++;
3054 }
3055
3056 if (priv->sarc_type) 3057 if (priv->sarc_type)
3057 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3058 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3058 3059
@@ -3224,6 +3225,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3224 /* Only the last descriptor gets to point to the skb. */ 3225 /* Only the last descriptor gets to point to the skb. */
3225 tx_q->tx_skbuff[entry] = skb; 3226 tx_q->tx_skbuff[entry] = skb;
3226 3227
3228 /* According to the coalesce parameter the IC bit for the latest
3229 * segment is reset and the timer re-started to clean the tx status.
3230 * This approach takes care about the fragments: desc is the first
3231 * element in case of no SG.
3232 */
3233 tx_q->tx_count_frames += nfrags + 1;
3234 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3235 !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3236 priv->hwts_tx_en)) {
3237 stmmac_tx_timer_arm(priv, queue);
3238 } else {
3239 if (likely(priv->extend_desc))
3240 desc = &tx_q->dma_etx[entry].basic;
3241 else
3242 desc = &tx_q->dma_tx[entry];
3243
3244 tx_q->tx_count_frames = 0;
3245 stmmac_set_tx_ic(priv, desc);
3246 priv->xstats.tx_set_ic_bit++;
3247 }
3248
3227 /* We've used all descriptors we need for this skb, however, 3249 /* We've used all descriptors we need for this skb, however,
3228 * advance cur_tx so that it references a fresh descriptor. 3250 * advance cur_tx so that it references a fresh descriptor.
3229 * ndo_start_xmit will fill this descriptor the next time it's 3251 * ndo_start_xmit will fill this descriptor the next time it's
@@ -3259,23 +3281,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3259 3281
3260 dev->stats.tx_bytes += skb->len; 3282 dev->stats.tx_bytes += skb->len;
3261 3283
3262 /* According to the coalesce parameter the IC bit for the latest
3263 * segment is reset and the timer re-started to clean the tx status.
3264 * This approach takes care about the fragments: desc is the first
3265 * element in case of no SG.
3266 */
3267 tx_q->tx_count_frames += nfrags + 1;
3268 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3269 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3270 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3271 priv->hwts_tx_en)) {
3272 stmmac_tx_timer_arm(priv, queue);
3273 } else {
3274 tx_q->tx_count_frames = 0;
3275 stmmac_set_tx_ic(priv, desc);
3276 priv->xstats.tx_set_ic_bit++;
3277 }
3278
3279 if (priv->sarc_type) 3284 if (priv->sarc_type)
3280 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3285 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3281 3286
@@ -3506,8 +3511,6 @@ read_again:
3506 if (unlikely(status & dma_own)) 3511 if (unlikely(status & dma_own))
3507 break; 3512 break;
3508 3513
3509 count++;
3510
3511 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); 3514 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3512 next_entry = rx_q->cur_rx; 3515 next_entry = rx_q->cur_rx;
3513 3516
@@ -3534,6 +3537,7 @@ read_again:
3534 goto read_again; 3537 goto read_again;
3535 if (unlikely(error)) { 3538 if (unlikely(error)) {
3536 dev_kfree_skb(skb); 3539 dev_kfree_skb(skb);
3540 count++;
3537 continue; 3541 continue;
3538 } 3542 }
3539 3543
@@ -3573,6 +3577,7 @@ read_again:
3573 skb = napi_alloc_skb(&ch->rx_napi, len); 3577 skb = napi_alloc_skb(&ch->rx_napi, len);
3574 if (!skb) { 3578 if (!skb) {
3575 priv->dev->stats.rx_dropped++; 3579 priv->dev->stats.rx_dropped++;
3580 count++;
3576 continue; 3581 continue;
3577 } 3582 }
3578 3583
@@ -3638,6 +3643,7 @@ read_again:
3638 3643
3639 priv->dev->stats.rx_packets++; 3644 priv->dev->stats.rx_packets++;
3640 priv->dev->stats.rx_bytes += len; 3645 priv->dev->stats.rx_bytes += len;
3646 count++;
3641 } 3647 }
3642 3648
3643 if (status & rx_not_ls) { 3649 if (status & rx_not_ls) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index e4ac3c401432..ac3f658105c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -6,7 +6,9 @@
6 * Author: Jose Abreu <joabreu@synopsys.com> 6 * Author: Jose Abreu <joabreu@synopsys.com>
7 */ 7 */
8 8
9#include <linux/bitrev.h>
9#include <linux/completion.h> 10#include <linux/completion.h>
11#include <linux/crc32.h>
10#include <linux/ethtool.h> 12#include <linux/ethtool.h>
11#include <linux/ip.h> 13#include <linux/ip.h>
12#include <linux/phy.h> 14#include <linux/phy.h>
@@ -485,12 +487,48 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
485 return -EOPNOTSUPP; 487 return -EOPNOTSUPP;
486} 488}
487 489
490static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
491{
492 int mc_offset = 32 - priv->hw->mcast_bits_log2;
493 struct netdev_hw_addr *ha;
494 u32 hash, hash_nr;
495
496 /* First compute the hash for desired addr */
497 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
498 hash_nr = hash >> 5;
499 hash = 1 << (hash & 0x1f);
500
501 /* Now, check if it collides with any existing one */
502 netdev_for_each_mc_addr(ha, priv->dev) {
503 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
504 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
505 return false;
506 }
507
508 /* No collisions, address is good to go */
509 return true;
510}
511
512static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
513{
514 struct netdev_hw_addr *ha;
515
516 /* Check if it collides with any existing one */
517 netdev_for_each_uc_addr(ha, priv->dev) {
518 if (!memcmp(ha->addr, addr, ETH_ALEN))
519 return false;
520 }
521
522 /* No collisions, address is good to go */
523 return true;
524}
525
488static int stmmac_test_hfilt(struct stmmac_priv *priv) 526static int stmmac_test_hfilt(struct stmmac_priv *priv)
489{ 527{
490 unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; 528 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
491 unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05}; 529 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
492 struct stmmac_packet_attrs attr = { }; 530 struct stmmac_packet_attrs attr = { };
493 int ret; 531 int ret, tries = 256;
494 532
495 ret = stmmac_filter_check(priv); 533 ret = stmmac_filter_check(priv);
496 if (ret) 534 if (ret)
@@ -499,6 +537,16 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
499 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 537 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
500 return -EOPNOTSUPP; 538 return -EOPNOTSUPP;
501 539
540 while (--tries) {
541 /* We only need to check the bd_addr for collisions */
542 bd_addr[ETH_ALEN - 1] = tries;
543 if (stmmac_hash_check(priv, bd_addr))
544 break;
545 }
546
547 if (!tries)
548 return -EOPNOTSUPP;
549
502 ret = dev_mc_add(priv->dev, gd_addr); 550 ret = dev_mc_add(priv->dev, gd_addr);
503 if (ret) 551 if (ret)
504 return ret; 552 return ret;
@@ -523,13 +571,25 @@ cleanup:
523 571
524static int stmmac_test_pfilt(struct stmmac_priv *priv) 572static int stmmac_test_pfilt(struct stmmac_priv *priv)
525{ 573{
526 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 574 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
527 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; 575 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
528 struct stmmac_packet_attrs attr = { }; 576 struct stmmac_packet_attrs attr = { };
529 int ret; 577 int ret, tries = 256;
530 578
531 if (stmmac_filter_check(priv)) 579 if (stmmac_filter_check(priv))
532 return -EOPNOTSUPP; 580 return -EOPNOTSUPP;
581 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
582 return -EOPNOTSUPP;
583
584 while (--tries) {
585 /* We only need to check the bd_addr for collisions */
586 bd_addr[ETH_ALEN - 1] = tries;
587 if (stmmac_perfect_check(priv, bd_addr))
588 break;
589 }
590
591 if (!tries)
592 return -EOPNOTSUPP;
533 593
534 ret = dev_uc_add(priv->dev, gd_addr); 594 ret = dev_uc_add(priv->dev, gd_addr);
535 if (ret) 595 if (ret)
@@ -553,39 +613,31 @@ cleanup:
553 return ret; 613 return ret;
554} 614}
555 615
556static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
557{
558 return 0;
559}
560
561static void stmmac_test_set_rx_mode(struct net_device *netdev)
562{
563 /* As we are in test mode of ethtool we already own the rtnl lock
564 * so no address will change from user. We can just call the
565 * ndo_set_rx_mode() callback directly */
566 if (netdev->netdev_ops->ndo_set_rx_mode)
567 netdev->netdev_ops->ndo_set_rx_mode(netdev);
568}
569
570static int stmmac_test_mcfilt(struct stmmac_priv *priv) 616static int stmmac_test_mcfilt(struct stmmac_priv *priv)
571{ 617{
572 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 618 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
573 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 619 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
574 struct stmmac_packet_attrs attr = { }; 620 struct stmmac_packet_attrs attr = { };
575 int ret; 621 int ret, tries = 256;
576 622
577 if (stmmac_filter_check(priv)) 623 if (stmmac_filter_check(priv))
578 return -EOPNOTSUPP; 624 return -EOPNOTSUPP;
579 if (!priv->hw->multicast_filter_bins) 625 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
580 return -EOPNOTSUPP; 626 return -EOPNOTSUPP;
581 627
582 /* Remove all MC addresses */ 628 while (--tries) {
583 __dev_mc_unsync(priv->dev, NULL); 629 /* We only need to check the mc_addr for collisions */
584 stmmac_test_set_rx_mode(priv->dev); 630 mc_addr[ETH_ALEN - 1] = tries;
631 if (stmmac_hash_check(priv, mc_addr))
632 break;
633 }
634
635 if (!tries)
636 return -EOPNOTSUPP;
585 637
586 ret = dev_uc_add(priv->dev, uc_addr); 638 ret = dev_uc_add(priv->dev, uc_addr);
587 if (ret) 639 if (ret)
588 goto cleanup; 640 return ret;
589 641
590 attr.dst = uc_addr; 642 attr.dst = uc_addr;
591 643
@@ -602,30 +654,34 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
602 654
603cleanup: 655cleanup:
604 dev_uc_del(priv->dev, uc_addr); 656 dev_uc_del(priv->dev, uc_addr);
605 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
606 stmmac_test_set_rx_mode(priv->dev);
607 return ret; 657 return ret;
608} 658}
609 659
610static int stmmac_test_ucfilt(struct stmmac_priv *priv) 660static int stmmac_test_ucfilt(struct stmmac_priv *priv)
611{ 661{
612 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 662 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
613 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 663 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
614 struct stmmac_packet_attrs attr = { }; 664 struct stmmac_packet_attrs attr = { };
615 int ret; 665 int ret, tries = 256;
616 666
617 if (stmmac_filter_check(priv)) 667 if (stmmac_filter_check(priv))
618 return -EOPNOTSUPP; 668 return -EOPNOTSUPP;
619 if (!priv->hw->multicast_filter_bins) 669 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
620 return -EOPNOTSUPP; 670 return -EOPNOTSUPP;
621 671
622 /* Remove all UC addresses */ 672 while (--tries) {
623 __dev_uc_unsync(priv->dev, NULL); 673 /* We only need to check the uc_addr for collisions */
624 stmmac_test_set_rx_mode(priv->dev); 674 uc_addr[ETH_ALEN - 1] = tries;
675 if (stmmac_perfect_check(priv, uc_addr))
676 break;
677 }
678
679 if (!tries)
680 return -EOPNOTSUPP;
625 681
626 ret = dev_mc_add(priv->dev, mc_addr); 682 ret = dev_mc_add(priv->dev, mc_addr);
627 if (ret) 683 if (ret)
628 goto cleanup; 684 return ret;
629 685
630 attr.dst = mc_addr; 686 attr.dst = mc_addr;
631 687
@@ -642,8 +698,6 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
642 698
643cleanup: 699cleanup:
644 dev_mc_del(priv->dev, mc_addr); 700 dev_mc_del(priv->dev, mc_addr);
645 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
646 stmmac_test_set_rx_mode(priv->dev);
647 return ret; 701 return ret;
648} 702}
649 703
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 00cab3f43a4c..a245597a3902 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
578 /* read current mtu value from device */ 578 /* read current mtu value from device */
579 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, 579 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
580 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, 580 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
581 0, iface_no, &max_datagram_size, 2); 581 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
582 if (err < 0) { 582 if (err < sizeof(max_datagram_size)) {
583 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); 583 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
584 goto out; 584 goto out;
585 } 585 }
@@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
590 max_datagram_size = cpu_to_le16(ctx->max_datagram_size); 590 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
591 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, 591 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
592 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 592 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
593 0, iface_no, &max_datagram_size, 2); 593 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
594 if (err < 0) 594 if (err < 0)
595 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); 595 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
596 596
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 596428ec71df..56d334b9ad45 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1362,6 +1362,7 @@ static const struct usb_device_id products[] = {
1362 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1362 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1363 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1363 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1364 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ 1364 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1365 {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
1365 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1366 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1366 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ 1367 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1367 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 1368 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 1cd113c8d7cb..ad0abb1f0bae 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -259,7 +259,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
259 *fw_vsc_cfg, len); 259 *fw_vsc_cfg, len);
260 260
261 if (r) { 261 if (r) {
262 devm_kfree(dev, fw_vsc_cfg); 262 devm_kfree(dev, *fw_vsc_cfg);
263 goto vsc_read_err; 263 goto vsc_read_err;
264 } 264 }
265 } else { 265 } else {
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index f9ac176cf257..2ce17932a073 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -708,6 +708,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
708 NFC_PROTO_FELICA_MASK; 708 NFC_PROTO_FELICA_MASK;
709 } else { 709 } else {
710 kfree_skb(nfcid_skb); 710 kfree_skb(nfcid_skb);
711 nfcid_skb = NULL;
711 /* P2P in type A */ 712 /* P2P in type A */
712 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, 713 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
713 ST21NFCA_RF_READER_F_NFCID1, 714 ST21NFCA_RF_READER_F_NFCID1,
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index fc99a40c1ec4..e0f064dcbd02 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -158,9 +158,11 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
158 struct nvme_ns *ns; 158 struct nvme_ns *ns;
159 159
160 mutex_lock(&ctrl->scan_lock); 160 mutex_lock(&ctrl->scan_lock);
161 down_read(&ctrl->namespaces_rwsem);
161 list_for_each_entry(ns, &ctrl->namespaces, list) 162 list_for_each_entry(ns, &ctrl->namespaces, list)
162 if (nvme_mpath_clear_current_path(ns)) 163 if (nvme_mpath_clear_current_path(ns))
163 kblockd_schedule_work(&ns->head->requeue_work); 164 kblockd_schedule_work(&ns->head->requeue_work);
165 up_read(&ctrl->namespaces_rwsem);
164 mutex_unlock(&ctrl->scan_lock); 166 mutex_unlock(&ctrl->scan_lock);
165} 167}
166 168
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f19a28b4e997..cb4c3000a57e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2133,8 +2133,16 @@ err_unreg_client:
2133 2133
2134static void __exit nvme_rdma_cleanup_module(void) 2134static void __exit nvme_rdma_cleanup_module(void)
2135{ 2135{
2136 struct nvme_rdma_ctrl *ctrl;
2137
2136 nvmf_unregister_transport(&nvme_rdma_transport); 2138 nvmf_unregister_transport(&nvme_rdma_transport);
2137 ib_unregister_client(&nvme_rdma_ib_client); 2139 ib_unregister_client(&nvme_rdma_ib_client);
2140
2141 mutex_lock(&nvme_rdma_ctrl_mutex);
2142 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2143 nvme_delete_ctrl(&ctrl->ctrl);
2144 mutex_unlock(&nvme_rdma_ctrl_mutex);
2145 flush_workqueue(nvme_delete_wq);
2138} 2146}
2139 2147
2140module_init(nvme_rdma_init_module); 2148module_init(nvme_rdma_init_module);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index c6251eac8946..2c419fa5d1c1 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -147,6 +147,7 @@ struct chv_pin_context {
147 * @pctldesc: Pin controller description 147 * @pctldesc: Pin controller description
148 * @pctldev: Pointer to the pin controller device 148 * @pctldev: Pointer to the pin controller device
149 * @chip: GPIO chip in this pin controller 149 * @chip: GPIO chip in this pin controller
150 * @irqchip: IRQ chip in this pin controller
150 * @regs: MMIO registers 151 * @regs: MMIO registers
151 * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO 152 * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
152 * offset (in GPIO number space) 153 * offset (in GPIO number space)
@@ -162,6 +163,7 @@ struct chv_pinctrl {
162 struct pinctrl_desc pctldesc; 163 struct pinctrl_desc pctldesc;
163 struct pinctrl_dev *pctldev; 164 struct pinctrl_dev *pctldev;
164 struct gpio_chip chip; 165 struct gpio_chip chip;
166 struct irq_chip irqchip;
165 void __iomem *regs; 167 void __iomem *regs;
166 unsigned intr_lines[16]; 168 unsigned intr_lines[16];
167 const struct chv_community *community; 169 const struct chv_community *community;
@@ -1466,16 +1468,6 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
1466 return 0; 1468 return 0;
1467} 1469}
1468 1470
1469static struct irq_chip chv_gpio_irqchip = {
1470 .name = "chv-gpio",
1471 .irq_startup = chv_gpio_irq_startup,
1472 .irq_ack = chv_gpio_irq_ack,
1473 .irq_mask = chv_gpio_irq_mask,
1474 .irq_unmask = chv_gpio_irq_unmask,
1475 .irq_set_type = chv_gpio_irq_type,
1476 .flags = IRQCHIP_SKIP_SET_WAKE,
1477};
1478
1479static void chv_gpio_irq_handler(struct irq_desc *desc) 1471static void chv_gpio_irq_handler(struct irq_desc *desc)
1480{ 1472{
1481 struct gpio_chip *gc = irq_desc_get_handler_data(desc); 1473 struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1559,7 +1551,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip,
1559 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; 1551 intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
1560 1552
1561 if (intsel >= community->nirqs) 1553 if (intsel >= community->nirqs)
1562 clear_bit(i, valid_mask); 1554 clear_bit(desc->number, valid_mask);
1563 } 1555 }
1564} 1556}
1565 1557
@@ -1625,7 +1617,15 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1625 } 1617 }
1626 } 1618 }
1627 1619
1628 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, 1620 pctrl->irqchip.name = "chv-gpio";
1621 pctrl->irqchip.irq_startup = chv_gpio_irq_startup;
1622 pctrl->irqchip.irq_ack = chv_gpio_irq_ack;
1623 pctrl->irqchip.irq_mask = chv_gpio_irq_mask;
1624 pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask;
1625 pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
1626 pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
1627
1628 ret = gpiochip_irqchip_add(chip, &pctrl->irqchip, 0,
1629 handle_bad_irq, IRQ_TYPE_NONE); 1629 handle_bad_irq, IRQ_TYPE_NONE);
1630 if (ret) { 1630 if (ret) {
1631 dev_err(pctrl->dev, "failed to add IRQ chip\n"); 1631 dev_err(pctrl->dev, "failed to add IRQ chip\n");
@@ -1642,7 +1642,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1642 } 1642 }
1643 } 1643 }
1644 1644
1645 gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq, 1645 gpiochip_set_chained_irqchip(chip, &pctrl->irqchip, irq,
1646 chv_gpio_irq_handler); 1646 chv_gpio_irq_handler);
1647 return 0; 1647 return 0;
1648} 1648}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index bc013599a9a3..83981ad66a71 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -52,6 +52,7 @@
52#define PADCFG0_GPIROUTNMI BIT(17) 52#define PADCFG0_GPIROUTNMI BIT(17)
53#define PADCFG0_PMODE_SHIFT 10 53#define PADCFG0_PMODE_SHIFT 10
54#define PADCFG0_PMODE_MASK GENMASK(13, 10) 54#define PADCFG0_PMODE_MASK GENMASK(13, 10)
55#define PADCFG0_PMODE_GPIO 0
55#define PADCFG0_GPIORXDIS BIT(9) 56#define PADCFG0_GPIORXDIS BIT(9)
56#define PADCFG0_GPIOTXDIS BIT(8) 57#define PADCFG0_GPIOTXDIS BIT(8)
57#define PADCFG0_GPIORXSTATE BIT(1) 58#define PADCFG0_GPIORXSTATE BIT(1)
@@ -332,7 +333,7 @@ static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
332 cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); 333 cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
333 334
334 mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT; 335 mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
335 if (!mode) 336 if (mode == PADCFG0_PMODE_GPIO)
336 seq_puts(s, "GPIO "); 337 seq_puts(s, "GPIO ");
337 else 338 else
338 seq_printf(s, "mode %d ", mode); 339 seq_printf(s, "mode %d ", mode);
@@ -458,6 +459,11 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
458 writel(value, padcfg0); 459 writel(value, padcfg0);
459} 460}
460 461
462static int intel_gpio_get_gpio_mode(void __iomem *padcfg0)
463{
464 return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
465}
466
461static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) 467static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
462{ 468{
463 u32 value; 469 u32 value;
@@ -491,7 +497,20 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
491 } 497 }
492 498
493 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); 499 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
500
501 /*
502 * If pin is already configured in GPIO mode, we assume that
503 * firmware provides correct settings. In such case we avoid
504 * potential glitches on the pin. Otherwise, for the pin in
505 * alternative mode, consumer has to supply respective flags.
506 */
507 if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) {
508 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
509 return 0;
510 }
511
494 intel_gpio_set_gpio_mode(padcfg0); 512 intel_gpio_set_gpio_mode(padcfg0);
513
495 /* Disable TX buffer and enable RX (this will be input) */ 514 /* Disable TX buffer and enable RX (this will be input) */
496 __intel_gpio_set_direction(padcfg0, true); 515 __intel_gpio_set_direction(padcfg0, true);
497 516
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 564660028fcc..ccdf0bb21414 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -585,19 +585,6 @@ static int stmfx_pinctrl_gpio_function_enable(struct stmfx_pinctrl *pctl)
585 return stmfx_function_enable(pctl->stmfx, func); 585 return stmfx_function_enable(pctl->stmfx, func);
586} 586}
587 587
588static int stmfx_pinctrl_gpio_init_valid_mask(struct gpio_chip *gc,
589 unsigned long *valid_mask,
590 unsigned int ngpios)
591{
592 struct stmfx_pinctrl *pctl = gpiochip_get_data(gc);
593 u32 n;
594
595 for_each_clear_bit(n, &pctl->gpio_valid_mask, ngpios)
596 clear_bit(n, valid_mask);
597
598 return 0;
599}
600
601static int stmfx_pinctrl_probe(struct platform_device *pdev) 588static int stmfx_pinctrl_probe(struct platform_device *pdev)
602{ 589{
603 struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent); 590 struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
@@ -660,7 +647,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
660 pctl->gpio_chip.ngpio = pctl->pctl_desc.npins; 647 pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
661 pctl->gpio_chip.can_sleep = true; 648 pctl->gpio_chip.can_sleep = true;
662 pctl->gpio_chip.of_node = np; 649 pctl->gpio_chip.of_node = np;
663 pctl->gpio_chip.init_valid_mask = stmfx_pinctrl_gpio_init_valid_mask;
664 650
665 ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl); 651 ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
666 if (ret) { 652 if (ret) {
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 56c38cfae92c..1f829edd8ee7 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
187static const struct pwm_ops iproc_pwm_ops = { 187static const struct pwm_ops iproc_pwm_ops = {
188 .apply = iproc_pwmc_apply, 188 .apply = iproc_pwmc_apply,
189 .get_state = iproc_pwmc_get_state, 189 .get_state = iproc_pwmc_get_state,
190 .owner = THIS_MODULE,
190}; 191};
191 192
192static int iproc_pwmc_probe(struct platform_device *pdev) 193static int iproc_pwmc_probe(struct platform_device *pdev)
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 213ff40dda11..3c9a64c1b7a8 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -76,7 +76,6 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
76 * of_reset_simple_xlate - translate reset_spec to the reset line number 76 * of_reset_simple_xlate - translate reset_spec to the reset line number
77 * @rcdev: a pointer to the reset controller device 77 * @rcdev: a pointer to the reset controller device
78 * @reset_spec: reset line specifier as found in the device tree 78 * @reset_spec: reset line specifier as found in the device tree
79 * @flags: a flags pointer to fill in (optional)
80 * 79 *
81 * This simple translation function should be used for reset controllers 80 * This simple translation function should be used for reset controllers
82 * with 1:1 mapping, where reset lines can be indexed by number without gaps. 81 * with 1:1 mapping, where reset lines can be indexed by number without gaps.
@@ -748,6 +747,7 @@ static void reset_control_array_put(struct reset_control_array *resets)
748 for (i = 0; i < resets->num_rstcs; i++) 747 for (i = 0; i < resets->num_rstcs; i++)
749 __reset_control_put_internal(resets->rstc[i]); 748 __reset_control_put_internal(resets->rstc[i]);
750 mutex_unlock(&reset_list_mutex); 749 mutex_unlock(&reset_list_mutex);
750 kfree(resets);
751} 751}
752 752
753/** 753/**
@@ -825,9 +825,10 @@ int __device_reset(struct device *dev, bool optional)
825} 825}
826EXPORT_SYMBOL_GPL(__device_reset); 826EXPORT_SYMBOL_GPL(__device_reset);
827 827
828/** 828/*
829 * APIs to manage an array of reset controls. 829 * APIs to manage an array of reset controls.
830 */ 830 */
831
831/** 832/**
832 * of_reset_control_get_count - Count number of resets available with a device 833 * of_reset_control_get_count - Count number of resets available with a device
833 * 834 *
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 6afad68e5ba2..238240984bc1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -76,9 +76,11 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
76 * ensures no active vp_list traversal while the vport is removed 76 * ensures no active vp_list traversal while the vport is removed
77 * from the queue) 77 * from the queue)
78 */ 78 */
79 for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++) 79 for (i = 0; i < 10; i++) {
80 wait_event_timeout(vha->vref_waitq, 80 if (wait_event_timeout(vha->vref_waitq,
81 atomic_read(&vha->vref_count), HZ); 81 !atomic_read(&vha->vref_count), HZ) > 0)
82 break;
83 }
82 84
83 spin_lock_irqsave(&ha->vport_slock, flags); 85 spin_lock_irqsave(&ha->vport_slock, flags);
84 if (atomic_read(&vha->vref_count)) { 86 if (atomic_read(&vha->vref_count)) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 337162ac3a77..726ad4cbf4a6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1119,9 +1119,11 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1119 1119
1120 qla2x00_mark_all_devices_lost(vha, 0); 1120 qla2x00_mark_all_devices_lost(vha, 0);
1121 1121
1122 for (i = 0; i < 10; i++) 1122 for (i = 0; i < 10; i++) {
1123 wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 1123 if (wait_event_timeout(vha->fcport_waitQ,
1124 HZ); 1124 test_fcport_count(vha), HZ) > 0)
1125 break;
1126 }
1125 1127
1126 flush_workqueue(vha->hw->wq); 1128 flush_workqueue(vha->hw->wq);
1127} 1129}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5447738906ac..91c007d26c1e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1883,7 +1883,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
1883{ 1883{
1884 unsigned int cmd_size, sgl_size; 1884 unsigned int cmd_size, sgl_size;
1885 1885
1886 sgl_size = scsi_mq_inline_sgl_size(shost); 1886 sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
1887 scsi_mq_inline_sgl_size(shost));
1887 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; 1888 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1888 if (scsi_host_get_prot(shost)) 1889 if (scsi_host_get_prot(shost))
1889 cmd_size += sizeof(struct scsi_data_buffer) + 1890 cmd_size += sizeof(struct scsi_data_buffer) +
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index de4019dc0f0b..1efc69e194f8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -263,25 +263,16 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
263 int result = cmd->result; 263 int result = cmd->result;
264 struct request *rq = cmd->request; 264 struct request *rq = cmd->request;
265 265
266 switch (req_op(rq)) { 266 if (req_op(rq) == REQ_OP_ZONE_RESET &&
267 case REQ_OP_ZONE_RESET: 267 result &&
268 case REQ_OP_ZONE_RESET_ALL: 268 sshdr->sense_key == ILLEGAL_REQUEST &&
269 269 sshdr->asc == 0x24) {
270 if (result && 270 /*
271 sshdr->sense_key == ILLEGAL_REQUEST && 271 * INVALID FIELD IN CDB error: reset of a conventional
272 sshdr->asc == 0x24) 272 * zone was attempted. Nothing to worry about, so be
273 /* 273 * quiet about the error.
274 * INVALID FIELD IN CDB error: reset of a conventional 274 */
275 * zone was attempted. Nothing to worry about, so be 275 rq->rq_flags |= RQF_QUIET;
276 * quiet about the error.
277 */
278 rq->rq_flags |= RQF_QUIET;
279 break;
280
281 case REQ_OP_WRITE:
282 case REQ_OP_WRITE_ZEROES:
283 case REQ_OP_WRITE_SAME:
284 break;
285 } 276 }
286} 277}
287 278
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index d9231bd3c691..98b9d9a902ae 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -249,13 +249,13 @@ static struct genpd_power_state imx6_pm_domain_pu_state = {
249}; 249};
250 250
251static struct imx_pm_domain imx_gpc_domains[] = { 251static struct imx_pm_domain imx_gpc_domains[] = {
252 [GPC_PGC_DOMAIN_ARM] { 252 [GPC_PGC_DOMAIN_ARM] = {
253 .base = { 253 .base = {
254 .name = "ARM", 254 .name = "ARM",
255 .flags = GENPD_FLAG_ALWAYS_ON, 255 .flags = GENPD_FLAG_ALWAYS_ON,
256 }, 256 },
257 }, 257 },
258 [GPC_PGC_DOMAIN_PU] { 258 [GPC_PGC_DOMAIN_PU] = {
259 .base = { 259 .base = {
260 .name = "PU", 260 .name = "PU",
261 .power_off = imx6_pm_domain_power_off, 261 .power_off = imx6_pm_domain_power_off,
@@ -266,7 +266,7 @@ static struct imx_pm_domain imx_gpc_domains[] = {
266 .reg_offs = 0x260, 266 .reg_offs = 0x260,
267 .cntr_pdn_bit = 0, 267 .cntr_pdn_bit = 0,
268 }, 268 },
269 [GPC_PGC_DOMAIN_DISPLAY] { 269 [GPC_PGC_DOMAIN_DISPLAY] = {
270 .base = { 270 .base = {
271 .name = "DISPLAY", 271 .name = "DISPLAY",
272 .power_off = imx6_pm_domain_power_off, 272 .power_off = imx6_pm_domain_power_off,
@@ -275,7 +275,7 @@ static struct imx_pm_domain imx_gpc_domains[] = {
275 .reg_offs = 0x240, 275 .reg_offs = 0x240,
276 .cntr_pdn_bit = 4, 276 .cntr_pdn_bit = 4,
277 }, 277 },
278 [GPC_PGC_DOMAIN_PCI] { 278 [GPC_PGC_DOMAIN_PCI] = {
279 .base = { 279 .base = {
280 .name = "PCI", 280 .name = "PCI",
281 .power_off = imx6_pm_domain_power_off, 281 .power_off = imx6_pm_domain_power_off,
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index f518273cfbe3..c8c80df090d1 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -5,6 +5,7 @@
5 5
6menuconfig SOUNDWIRE 6menuconfig SOUNDWIRE
7 tristate "SoundWire support" 7 tristate "SoundWire support"
8 depends on ACPI || OF
8 help 9 help
9 SoundWire is a 2-Pin interface with data and clock line ratified 10 SoundWire is a 2-Pin interface with data and clock line ratified
10 by the MIPI Alliance. SoundWire is used for transporting data 11 by the MIPI Alliance. SoundWire is used for transporting data
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index f1e38a293967..13c54eac0cc3 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -900,7 +900,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
900 /* Create PCM DAIs */ 900 /* Create PCM DAIs */
901 stream = &cdns->pcm; 901 stream = &cdns->pcm;
902 902
903 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, stream->num_in, 903 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
904 off, stream->num_ch_in, true); 904 off, stream->num_ch_in, true);
905 if (ret) 905 if (ret)
906 return ret; 906 return ret;
@@ -931,7 +931,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
931 if (ret) 931 if (ret)
932 return ret; 932 return ret;
933 933
934 off += cdns->pdm.num_bd; 934 off += cdns->pdm.num_out;
935 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd, 935 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
936 off, stream->num_ch_bd, false); 936 off, stream->num_ch_bd, false);
937 if (ret) 937 if (ret)
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 48a63ca130d2..6473fa602f82 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -128,7 +128,8 @@ int sdw_of_find_slaves(struct sdw_bus *bus)
128 struct device_node *node; 128 struct device_node *node;
129 129
130 for_each_child_of_node(bus->dev->of_node, node) { 130 for_each_child_of_node(bus->dev->of_node, node) {
131 int link_id, sdw_version, ret, len; 131 int link_id, ret, len;
132 unsigned int sdw_version;
132 const char *compat = NULL; 133 const char *compat = NULL;
133 struct sdw_slave_id id; 134 struct sdw_slave_id id;
134 const __be32 *addr; 135 const __be32 *addr;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 6f1fa4c849a1..927d29eb92c6 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,4 +125,6 @@ source "drivers/staging/exfat/Kconfig"
125 125
126source "drivers/staging/qlge/Kconfig" 126source "drivers/staging/qlge/Kconfig"
127 127
128source "drivers/staging/vboxsf/Kconfig"
129
128endif # STAGING 130endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a90f9b308c8d..f01f04199073 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -53,3 +53,4 @@ obj-$(CONFIG_UWB) += uwb/
53obj-$(CONFIG_USB_WUSB) += wusbcore/ 53obj-$(CONFIG_USB_WUSB) += wusbcore/
54obj-$(CONFIG_EXFAT_FS) += exfat/ 54obj-$(CONFIG_EXFAT_FS) += exfat/
55obj-$(CONFIG_QLGE) += qlge/ 55obj-$(CONFIG_QLGE) += qlge/
56obj-$(CONFIG_VBOXSF_FS) += vboxsf/
diff --git a/drivers/staging/vboxsf/Kconfig b/drivers/staging/vboxsf/Kconfig
new file mode 100644
index 000000000000..b84586ae08b3
--- /dev/null
+++ b/drivers/staging/vboxsf/Kconfig
@@ -0,0 +1,10 @@
1config VBOXSF_FS
2 tristate "VirtualBox guest shared folder (vboxsf) support"
3 depends on X86 && VBOXGUEST
4 select NLS
5 help
6 VirtualBox hosts can share folders with guests, this driver
7 implements the Linux-guest side of this allowing folders exported
8 by the host to be mounted under Linux.
9
10 If you want to use shared folders in VirtualBox guests, answer Y or M.
diff --git a/drivers/staging/vboxsf/Makefile b/drivers/staging/vboxsf/Makefile
new file mode 100644
index 000000000000..9e4328e79623
--- /dev/null
+++ b/drivers/staging/vboxsf/Makefile
@@ -0,0 +1,5 @@
1# SPDX-License-Identifier: MIT
2
3obj-$(CONFIG_VBOXSF_FS) += vboxsf.o
4
5vboxsf-y := dir.o file.o utils.o vboxsf_wrappers.o super.o
diff --git a/drivers/staging/vboxsf/TODO b/drivers/staging/vboxsf/TODO
new file mode 100644
index 000000000000..8b9193d0d4f0
--- /dev/null
+++ b/drivers/staging/vboxsf/TODO
@@ -0,0 +1,7 @@
1TODO:
2- Find a file-system developer to review this and give their Reviewed-By
3- Address any items coming up during review
4- Move to fs/vboxfs
5
6Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7and Hans de Goede <hdegoede@redhat.com>
diff --git a/drivers/staging/vboxsf/dir.c b/drivers/staging/vboxsf/dir.c
new file mode 100644
index 000000000000..f260b5cc1646
--- /dev/null
+++ b/drivers/staging/vboxsf/dir.c
@@ -0,0 +1,418 @@
1// SPDX-License-Identifier: MIT
2/*
3 * VirtualBox Guest Shared Folders support: Directory inode and file operations
4 *
5 * Copyright (C) 2006-2018 Oracle Corporation
6 */
7
8#include <linux/namei.h>
9#include <linux/vbox_utils.h>
10#include "vfsmod.h"
11
12static int vboxsf_dir_open(struct inode *inode, struct file *file)
13{
14 struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
15 struct shfl_createparms params = {};
16 struct vboxsf_dir_info *sf_d;
17 int err;
18
19 sf_d = vboxsf_dir_info_alloc();
20 if (!sf_d)
21 return -ENOMEM;
22
23 params.handle = SHFL_HANDLE_NIL;
24 params.create_flags = SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS |
25 SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ;
26
27 err = vboxsf_create_at_dentry(file_dentry(file), &params);
28 if (err)
29 goto err_free_dir_info;
30
31 if (params.result != SHFL_FILE_EXISTS) {
32 err = -ENOENT;
33 goto err_close;
34 }
35
36 err = vboxsf_dir_read_all(sbi, sf_d, params.handle);
37 if (err)
38 goto err_close;
39
40 vboxsf_close(sbi->root, params.handle);
41 file->private_data = sf_d;
42 return 0;
43
44err_close:
45 vboxsf_close(sbi->root, params.handle);
46err_free_dir_info:
47 vboxsf_dir_info_free(sf_d);
48 return err;
49}
50
51static int vboxsf_dir_release(struct inode *inode, struct file *file)
52{
53 if (file->private_data)
54 vboxsf_dir_info_free(file->private_data);
55
56 return 0;
57}
58
59static unsigned int vboxsf_get_d_type(u32 mode)
60{
61 unsigned int d_type;
62
63 switch (mode & SHFL_TYPE_MASK) {
64 case SHFL_TYPE_FIFO:
65 d_type = DT_FIFO;
66 break;
67 case SHFL_TYPE_DEV_CHAR:
68 d_type = DT_CHR;
69 break;
70 case SHFL_TYPE_DIRECTORY:
71 d_type = DT_DIR;
72 break;
73 case SHFL_TYPE_DEV_BLOCK:
74 d_type = DT_BLK;
75 break;
76 case SHFL_TYPE_FILE:
77 d_type = DT_REG;
78 break;
79 case SHFL_TYPE_SYMLINK:
80 d_type = DT_LNK;
81 break;
82 case SHFL_TYPE_SOCKET:
83 d_type = DT_SOCK;
84 break;
85 case SHFL_TYPE_WHITEOUT:
86 d_type = DT_WHT;
87 break;
88 default:
89 d_type = DT_UNKNOWN;
90 break;
91 }
92 return d_type;
93}
94
95static bool vboxsf_dir_emit(struct file *dir, struct dir_context *ctx)
96{
97 struct vboxsf_sbi *sbi = VBOXSF_SBI(file_inode(dir)->i_sb);
98 struct vboxsf_dir_info *sf_d = dir->private_data;
99 struct shfl_dirinfo *info;
100 struct vboxsf_dir_buf *b;
101 unsigned int d_type;
102 loff_t i, cur = 0;
103 ino_t fake_ino;
104 size_t size;
105 int err;
106
107 list_for_each_entry(b, &sf_d->info_list, head) {
108try_next_entry:
109 if (ctx->pos >= cur + b->entries) {
110 cur += b->entries;
111 continue;
112 }
113
114 /*
115 * Note the vboxsf_dir_info objects we are iterating over here
116 * are variable sized, so the info pointer may end up being
117 * unaligned. This is how we get the data from the host.
118 * Since vboxsf is only supported on x86 machines this is not
119 * a problem.
120 */
121 for (i = 0, info = b->buf; i < ctx->pos - cur; i++) {
122 size = offsetof(struct shfl_dirinfo, name.string) +
123 info->name.size;
124 info = (struct shfl_dirinfo *)((uintptr_t)info + size);
125 }
126
127 /* Info now points to the right entry, emit it. */
128 d_type = vboxsf_get_d_type(info->info.attr.mode);
129
130 /*
131 * On 32 bit systems pos is 64 signed, while ino is 32 bit
132 * unsigned so fake_ino may overflow, check for this.
133 */
134 if ((ino_t)(ctx->pos + 1) != (u64)(ctx->pos + 1)) {
135 vbg_err("vboxsf: fake ino overflow, truncating dir\n");
136 return false;
137 }
138 fake_ino = ctx->pos + 1;
139
140 if (sbi->nls) {
141 char d_name[NAME_MAX];
142
143 err = vboxsf_nlscpy(sbi, d_name, NAME_MAX,
144 info->name.string.utf8,
145 info->name.length);
146 if (err) {
147 /* skip erroneous entry and proceed */
148 ctx->pos += 1;
149 goto try_next_entry;
150 }
151
152 return dir_emit(ctx, d_name, strlen(d_name),
153 fake_ino, d_type);
154 }
155
156 return dir_emit(ctx, info->name.string.utf8, info->name.length,
157 fake_ino, d_type);
158 }
159
160 return false;
161}
162
163static int vboxsf_dir_iterate(struct file *dir, struct dir_context *ctx)
164{
165 bool keep_iterating;
166
167 for (keep_iterating = true; keep_iterating; ctx->pos += 1)
168 keep_iterating = vboxsf_dir_emit(dir, ctx);
169
170 return 0;
171}
172
173const struct file_operations vboxsf_dir_fops = {
174 .open = vboxsf_dir_open,
175 .iterate = vboxsf_dir_iterate,
176 .release = vboxsf_dir_release,
177 .read = generic_read_dir,
178 .llseek = generic_file_llseek,
179};
180
181/*
182 * This is called during name resolution/lookup to check if the @dentry in
183 * the cache is still valid. the job is handled by vboxsf_inode_revalidate.
184 */
185static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags)
186{
187 if (flags & LOOKUP_RCU)
188 return -ECHILD;
189
190 if (d_really_is_positive(dentry))
191 return vboxsf_inode_revalidate(dentry) == 0;
192 else
193 return vboxsf_stat_dentry(dentry, NULL) == -ENOENT;
194}
195
196const struct dentry_operations vboxsf_dentry_ops = {
197 .d_revalidate = vboxsf_dentry_revalidate
198};
199
200/* iops */
201
202static struct dentry *vboxsf_dir_lookup(struct inode *parent,
203 struct dentry *dentry,
204 unsigned int flags)
205{
206 struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
207 struct shfl_fsobjinfo fsinfo;
208 struct inode *inode;
209 int err;
210
211 dentry->d_time = jiffies;
212
213 err = vboxsf_stat_dentry(dentry, &fsinfo);
214 if (err) {
215 inode = (err == -ENOENT) ? NULL : ERR_PTR(err);
216 } else {
217 inode = vboxsf_new_inode(parent->i_sb);
218 if (!IS_ERR(inode))
219 vboxsf_init_inode(sbi, inode, &fsinfo);
220 }
221
222 return d_splice_alias(inode, dentry);
223}
224
225static int vboxsf_dir_instantiate(struct inode *parent, struct dentry *dentry,
226 struct shfl_fsobjinfo *info)
227{
228 struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
229 struct vboxsf_inode *sf_i;
230 struct inode *inode;
231
232 inode = vboxsf_new_inode(parent->i_sb);
233 if (IS_ERR(inode))
234 return PTR_ERR(inode);
235
236 sf_i = VBOXSF_I(inode);
237 /* The host may have given us different attr then requested */
238 sf_i->force_restat = 1;
239 vboxsf_init_inode(sbi, inode, info);
240
241 d_instantiate(dentry, inode);
242
243 return 0;
244}
245
246static int vboxsf_dir_create(struct inode *parent, struct dentry *dentry,
247 umode_t mode, int is_dir)
248{
249 struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
250 struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
251 struct shfl_createparms params = {};
252 int err;
253
254 params.handle = SHFL_HANDLE_NIL;
255 params.create_flags = SHFL_CF_ACT_CREATE_IF_NEW |
256 SHFL_CF_ACT_FAIL_IF_EXISTS |
257 SHFL_CF_ACCESS_READWRITE |
258 (is_dir ? SHFL_CF_DIRECTORY : 0);
259 params.info.attr.mode = (mode & 0777) |
260 (is_dir ? SHFL_TYPE_DIRECTORY : SHFL_TYPE_FILE);
261 params.info.attr.additional = SHFLFSOBJATTRADD_NOTHING;
262
263 err = vboxsf_create_at_dentry(dentry, &params);
264 if (err)
265 return err;
266
267 if (params.result != SHFL_FILE_CREATED)
268 return -EPERM;
269
270 vboxsf_close(sbi->root, params.handle);
271
272 err = vboxsf_dir_instantiate(parent, dentry, &params.info);
273 if (err)
274 return err;
275
276 /* parent directory access/change time changed */
277 sf_parent_i->force_restat = 1;
278
279 return 0;
280}
281
282static int vboxsf_dir_mkfile(struct inode *parent, struct dentry *dentry,
283 umode_t mode, bool excl)
284{
285 return vboxsf_dir_create(parent, dentry, mode, 0);
286}
287
288static int vboxsf_dir_mkdir(struct inode *parent, struct dentry *dentry,
289 umode_t mode)
290{
291 return vboxsf_dir_create(parent, dentry, mode, 1);
292}
293
294static int vboxsf_dir_unlink(struct inode *parent, struct dentry *dentry)
295{
296 struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
297 struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
298 struct inode *inode = d_inode(dentry);
299 struct shfl_string *path;
300 u32 flags;
301 int err;
302
303 if (S_ISDIR(inode->i_mode))
304 flags = SHFL_REMOVE_DIR;
305 else
306 flags = SHFL_REMOVE_FILE;
307
308 if (S_ISLNK(inode->i_mode))
309 flags |= SHFL_REMOVE_SYMLINK;
310
311 path = vboxsf_path_from_dentry(sbi, dentry);
312 if (IS_ERR(path))
313 return PTR_ERR(path);
314
315 err = vboxsf_remove(sbi->root, path, flags);
316 __putname(path);
317 if (err)
318 return err;
319
320 /* parent directory access/change time changed */
321 sf_parent_i->force_restat = 1;
322
323 return 0;
324}
325
326static int vboxsf_dir_rename(struct inode *old_parent,
327 struct dentry *old_dentry,
328 struct inode *new_parent,
329 struct dentry *new_dentry,
330 unsigned int flags)
331{
332 struct vboxsf_sbi *sbi = VBOXSF_SBI(old_parent->i_sb);
333 struct vboxsf_inode *sf_old_parent_i = VBOXSF_I(old_parent);
334 struct vboxsf_inode *sf_new_parent_i = VBOXSF_I(new_parent);
335 u32 shfl_flags = SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS;
336 struct shfl_string *old_path, *new_path;
337 int err;
338
339 if (flags)
340 return -EINVAL;
341
342 old_path = vboxsf_path_from_dentry(sbi, old_dentry);
343 if (IS_ERR(old_path))
344 return PTR_ERR(old_path);
345
346 new_path = vboxsf_path_from_dentry(sbi, new_dentry);
347 if (IS_ERR(new_path)) {
348 err = PTR_ERR(new_path);
349 goto err_put_old_path;
350 }
351
352 if (d_inode(old_dentry)->i_mode & S_IFDIR)
353 shfl_flags = 0;
354
355 err = vboxsf_rename(sbi->root, old_path, new_path, shfl_flags);
356 if (err == 0) {
357 /* parent directories access/change time changed */
358 sf_new_parent_i->force_restat = 1;
359 sf_old_parent_i->force_restat = 1;
360 }
361
362 __putname(new_path);
363err_put_old_path:
364 __putname(old_path);
365 return err;
366}
367
368static int vboxsf_dir_symlink(struct inode *parent, struct dentry *dentry,
369 const char *symname)
370{
371 struct vboxsf_inode *sf_parent_i = VBOXSF_I(parent);
372 struct vboxsf_sbi *sbi = VBOXSF_SBI(parent->i_sb);
373 int symname_size = strlen(symname) + 1;
374 struct shfl_string *path, *ssymname;
375 struct shfl_fsobjinfo info;
376 int err;
377
378 path = vboxsf_path_from_dentry(sbi, dentry);
379 if (IS_ERR(path))
380 return PTR_ERR(path);
381
382 ssymname = kmalloc(SHFLSTRING_HEADER_SIZE + symname_size, GFP_KERNEL);
383 if (!ssymname) {
384 __putname(path);
385 return -ENOMEM;
386 }
387 ssymname->length = symname_size - 1;
388 ssymname->size = symname_size;
389 memcpy(ssymname->string.utf8, symname, symname_size);
390
391 err = vboxsf_symlink(sbi->root, path, ssymname, &info);
392 kfree(ssymname);
393 __putname(path);
394 if (err) {
395 /* -EROFS means symlinks are note support -> -EPERM */
396 return (err == -EROFS) ? -EPERM : err;
397 }
398
399 err = vboxsf_dir_instantiate(parent, dentry, &info);
400 if (err)
401 return err;
402
403 /* parent directory access/change time changed */
404 sf_parent_i->force_restat = 1;
405 return 0;
406}
407
408const struct inode_operations vboxsf_dir_iops = {
409 .lookup = vboxsf_dir_lookup,
410 .create = vboxsf_dir_mkfile,
411 .mkdir = vboxsf_dir_mkdir,
412 .rmdir = vboxsf_dir_unlink,
413 .unlink = vboxsf_dir_unlink,
414 .rename = vboxsf_dir_rename,
415 .symlink = vboxsf_dir_symlink,
416 .getattr = vboxsf_getattr,
417 .setattr = vboxsf_setattr,
418};
diff --git a/drivers/staging/vboxsf/file.c b/drivers/staging/vboxsf/file.c
new file mode 100644
index 000000000000..4b61ccf83fca
--- /dev/null
+++ b/drivers/staging/vboxsf/file.c
@@ -0,0 +1,370 @@
1// SPDX-License-Identifier: MIT
2/*
3 * VirtualBox Guest Shared Folders support: Regular file inode and file ops.
4 *
5 * Copyright (C) 2006-2018 Oracle Corporation
6 */
7
8#include <linux/mm.h>
9#include <linux/page-flags.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/sizes.h>
13#include "vfsmod.h"
14
15struct vboxsf_handle {
16 u64 handle;
17 u32 root;
18 u32 access_flags;
19 struct kref refcount;
20 struct list_head head;
21};
22
23static int vboxsf_file_open(struct inode *inode, struct file *file)
24{
25 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
26 struct shfl_createparms params = {};
27 struct vboxsf_handle *sf_handle;
28 u32 access_flags = 0;
29 int err;
30
31 sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL);
32 if (!sf_handle)
33 return -ENOMEM;
34
35 /*
36 * We check the value of params.handle afterwards to find out if
37 * the call succeeded or failed, as the API does not seem to cleanly
38 * distinguish error and informational messages.
39 *
40 * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to
41 * make the shared folders host service use our mode parameter.
42 */
43 params.handle = SHFL_HANDLE_NIL;
44 if (file->f_flags & O_CREAT) {
45 params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW;
46 /*
47 * We ignore O_EXCL, as the Linux kernel seems to call create
48 * beforehand itself, so O_EXCL should always fail.
49 */
50 if (file->f_flags & O_TRUNC)
51 params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
52 else
53 params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
54 } else {
55 params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW;
56 if (file->f_flags & O_TRUNC)
57 params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
58 }
59
60 switch (file->f_flags & O_ACCMODE) {
61 case O_RDONLY:
62 access_flags |= SHFL_CF_ACCESS_READ;
63 break;
64
65 case O_WRONLY:
66 access_flags |= SHFL_CF_ACCESS_WRITE;
67 break;
68
69 case O_RDWR:
70 access_flags |= SHFL_CF_ACCESS_READWRITE;
71 break;
72
73 default:
74 WARN_ON(1);
75 }
76
77 if (file->f_flags & O_APPEND)
78 access_flags |= SHFL_CF_ACCESS_APPEND;
79
80 params.create_flags |= access_flags;
81 params.info.attr.mode = inode->i_mode;
82
83 err = vboxsf_create_at_dentry(file_dentry(file), &params);
84 if (err == 0 && params.handle == SHFL_HANDLE_NIL)
85 err = (params.result == SHFL_FILE_EXISTS) ? -EEXIST : -ENOENT;
86 if (err) {
87 kfree(sf_handle);
88 return err;
89 }
90
91 /* the host may have given us different attr then requested */
92 sf_i->force_restat = 1;
93
94 /* init our handle struct and add it to the inode's handles list */
95 sf_handle->handle = params.handle;
96 sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
97 sf_handle->access_flags = access_flags;
98 kref_init(&sf_handle->refcount);
99
100 mutex_lock(&sf_i->handle_list_mutex);
101 list_add(&sf_handle->head, &sf_i->handle_list);
102 mutex_unlock(&sf_i->handle_list_mutex);
103
104 file->private_data = sf_handle;
105 return 0;
106}
107
108static void vboxsf_handle_release(struct kref *refcount)
109{
110 struct vboxsf_handle *sf_handle =
111 container_of(refcount, struct vboxsf_handle, refcount);
112
113 vboxsf_close(sf_handle->root, sf_handle->handle);
114 kfree(sf_handle);
115}
116
117static int vboxsf_file_release(struct inode *inode, struct file *file)
118{
119 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
120 struct vboxsf_handle *sf_handle = file->private_data;
121
122 /*
123 * When a file is closed on our (the guest) side, we want any subsequent
124 * accesses done on the host side to see all changes done from our side.
125 */
126 filemap_write_and_wait(inode->i_mapping);
127
128 mutex_lock(&sf_i->handle_list_mutex);
129 list_del(&sf_handle->head);
130 mutex_unlock(&sf_i->handle_list_mutex);
131
132 kref_put(&sf_handle->refcount, vboxsf_handle_release);
133 return 0;
134}
135
136/*
137 * Write back dirty pages now, because there may not be any suitable
138 * open files later
139 */
140static void vboxsf_vma_close(struct vm_area_struct *vma)
141{
142 filemap_write_and_wait(vma->vm_file->f_mapping);
143}
144
145static const struct vm_operations_struct vboxsf_file_vm_ops = {
146 .close = vboxsf_vma_close,
147 .fault = filemap_fault,
148 .map_pages = filemap_map_pages,
149};
150
151static int vboxsf_file_mmap(struct file *file, struct vm_area_struct *vma)
152{
153 int err;
154
155 err = generic_file_mmap(file, vma);
156 if (!err)
157 vma->vm_ops = &vboxsf_file_vm_ops;
158
159 return err;
160}
161
162/*
163 * Note that since we are accessing files on the host's filesystem, files
164 * may always be changed underneath us by the host!
165 *
166 * The vboxsf API between the guest and the host does not offer any functions
167 * to deal with this. There is no inode-generation to check for changes, no
168 * events / callback on changes and no way to lock files.
169 *
170 * To avoid returning stale data when a file gets *opened* on our (the guest)
171 * side, we do a "stat" on the host side, then compare the mtime with the
172 * last known mtime and invalidate the page-cache if they differ.
173 * This is done from vboxsf_inode_revalidate().
174 *
175 * When reads are done through the read_iter fop, it is possible to do
176 * further cache revalidation then, there are 3 options to deal with this:
177 *
178 * 1) Rely solely on the revalidation done at open time
179 * 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf
180 * host API does not allow stat on handles, so we would need to use
181 * file->f_path.dentry and the stat will then fail if the file was unlinked
182 * or renamed (and there is no thing like NFS' silly-rename). So we get:
183 * 2a) "stat" and compare mtime, on stat failure invalidate the cache
184 * 2b) "stat" and compare mtime, on stat failure do nothing
185 * 3) Simply always call invalidate_inode_pages2_range on the range of the read
186 *
187 * Currently we are keeping things KISS and using option 1. this allows
188 * directly using generic_file_read_iter without wrapping it.
189 *
190 * This means that only data written on the host side before open() on
191 * the guest side is guaranteed to be seen by the guest. If necessary
192 * we may provide other read-cache strategies in the future and make this
193 * configurable through a mount option.
194 */
195const struct file_operations vboxsf_reg_fops = {
196 .llseek = generic_file_llseek,
197 .read_iter = generic_file_read_iter,
198 .write_iter = generic_file_write_iter,
199 .mmap = vboxsf_file_mmap,
200 .open = vboxsf_file_open,
201 .release = vboxsf_file_release,
202 .fsync = noop_fsync,
203 .splice_read = generic_file_splice_read,
204};
205
206const struct inode_operations vboxsf_reg_iops = {
207 .getattr = vboxsf_getattr,
208 .setattr = vboxsf_setattr
209};
210
211static int vboxsf_readpage(struct file *file, struct page *page)
212{
213 struct vboxsf_handle *sf_handle = file->private_data;
214 loff_t off = page_offset(page);
215 u32 nread = PAGE_SIZE;
216 u8 *buf;
217 int err;
218
219 buf = kmap(page);
220
221 err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf);
222 if (err == 0) {
223 memset(&buf[nread], 0, PAGE_SIZE - nread);
224 flush_dcache_page(page);
225 SetPageUptodate(page);
226 } else {
227 SetPageError(page);
228 }
229
230 kunmap(page);
231 unlock_page(page);
232 return err;
233}
234
235static struct vboxsf_handle *vboxsf_get_write_handle(struct vboxsf_inode *sf_i)
236{
237 struct vboxsf_handle *h, *sf_handle = NULL;
238
239 mutex_lock(&sf_i->handle_list_mutex);
240 list_for_each_entry(h, &sf_i->handle_list, head) {
241 if (h->access_flags == SHFL_CF_ACCESS_WRITE ||
242 h->access_flags == SHFL_CF_ACCESS_READWRITE) {
243 kref_get(&h->refcount);
244 sf_handle = h;
245 break;
246 }
247 }
248 mutex_unlock(&sf_i->handle_list_mutex);
249
250 return sf_handle;
251}
252
253static int vboxsf_writepage(struct page *page, struct writeback_control *wbc)
254{
255 struct inode *inode = page->mapping->host;
256 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
257 struct vboxsf_handle *sf_handle;
258 loff_t off = page_offset(page);
259 loff_t size = i_size_read(inode);
260 u32 nwrite = PAGE_SIZE;
261 u8 *buf;
262 int err;
263
264 if (off + PAGE_SIZE > size)
265 nwrite = size & ~PAGE_MASK;
266
267 sf_handle = vboxsf_get_write_handle(sf_i);
268 if (!sf_handle)
269 return -EBADF;
270
271 buf = kmap(page);
272 err = vboxsf_write(sf_handle->root, sf_handle->handle,
273 off, &nwrite, buf);
274 kunmap(page);
275
276 kref_put(&sf_handle->refcount, vboxsf_handle_release);
277
278 if (err == 0) {
279 ClearPageError(page);
280 /* mtime changed */
281 sf_i->force_restat = 1;
282 } else {
283 ClearPageUptodate(page);
284 }
285
286 unlock_page(page);
287 return err;
288}
289
290static int vboxsf_write_end(struct file *file, struct address_space *mapping,
291 loff_t pos, unsigned int len, unsigned int copied,
292 struct page *page, void *fsdata)
293{
294 struct inode *inode = mapping->host;
295 struct vboxsf_handle *sf_handle = file->private_data;
296 unsigned int from = pos & ~PAGE_MASK;
297 u32 nwritten = len;
298 u8 *buf;
299 int err;
300
301 buf = kmap(page);
302 err = vboxsf_write(sf_handle->root, sf_handle->handle,
303 pos, &nwritten, buf + from);
304 kunmap(page);
305
306 if (err) {
307 nwritten = 0;
308 goto out;
309 }
310
311 /* mtime changed */
312 VBOXSF_I(inode)->force_restat = 1;
313
314 if (!PageUptodate(page) && nwritten == PAGE_SIZE)
315 SetPageUptodate(page);
316
317 pos += nwritten;
318 if (pos > inode->i_size)
319 i_size_write(inode, pos);
320
321out:
322 unlock_page(page);
323 put_page(page);
324
325 return nwritten;
326}
327
328const struct address_space_operations vboxsf_reg_aops = {
329 .readpage = vboxsf_readpage,
330 .writepage = vboxsf_writepage,
331 .set_page_dirty = __set_page_dirty_nobuffers,
332 .write_begin = simple_write_begin,
333 .write_end = vboxsf_write_end,
334};
335
336static const char *vboxsf_get_link(struct dentry *dentry, struct inode *inode,
337 struct delayed_call *done)
338{
339 struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
340 struct shfl_string *path;
341 char *link;
342 int err;
343
344 if (!dentry)
345 return ERR_PTR(-ECHILD);
346
347 path = vboxsf_path_from_dentry(sbi, dentry);
348 if (IS_ERR(path))
349 return (char *)path;
350
351 link = kzalloc(PATH_MAX, GFP_KERNEL);
352 if (!link) {
353 __putname(path);
354 return ERR_PTR(-ENOMEM);
355 }
356
357 err = vboxsf_readlink(sbi->root, path, PATH_MAX, link);
358 __putname(path);
359 if (err) {
360 kfree(link);
361 return ERR_PTR(err);
362 }
363
364 set_delayed_call(done, kfree_link, link);
365 return link;
366}
367
368const struct inode_operations vboxsf_lnk_iops = {
369 .get_link = vboxsf_get_link
370};
diff --git a/drivers/staging/vboxsf/shfl_hostintf.h b/drivers/staging/vboxsf/shfl_hostintf.h
new file mode 100644
index 000000000000..aca829062c12
--- /dev/null
+++ b/drivers/staging/vboxsf/shfl_hostintf.h
@@ -0,0 +1,901 @@
1/* SPDX-License-Identifier: MIT */
2/*
3 * VirtualBox Shared Folders: host interface definition.
4 *
5 * Copyright (C) 2006-2018 Oracle Corporation
6 */
7
8#ifndef SHFL_HOSTINTF_H
9#define SHFL_HOSTINTF_H
10
11#include <linux/vbox_vmmdev_types.h>
12
13/* The max in/out buffer size for a FN_READ or FN_WRITE call */
14#define SHFL_MAX_RW_COUNT (16 * SZ_1M)
15
16/*
17 * Structures shared between guest and the service
18 * can be relocated and use offsets to point to variable
19 * length parts.
20 *
21 * Shared folders protocol works with handles.
22 * Before doing any action on a file system object,
23 * one have to obtain the object handle via a SHFL_FN_CREATE
24 * request. A handle must be closed with SHFL_FN_CLOSE.
25 */
26
27enum {
28 SHFL_FN_QUERY_MAPPINGS = 1, /* Query mappings changes. */
29 SHFL_FN_QUERY_MAP_NAME = 2, /* Query map name. */
30 SHFL_FN_CREATE = 3, /* Open/create object. */
31 SHFL_FN_CLOSE = 4, /* Close object handle. */
32 SHFL_FN_READ = 5, /* Read object content. */
33 SHFL_FN_WRITE = 6, /* Write new object content. */
34 SHFL_FN_LOCK = 7, /* Lock/unlock a range in the object. */
35 SHFL_FN_LIST = 8, /* List object content. */
36 SHFL_FN_INFORMATION = 9, /* Query/set object information. */
37 /* Note function number 10 is not used! */
38 SHFL_FN_REMOVE = 11, /* Remove object */
39 SHFL_FN_MAP_FOLDER_OLD = 12, /* Map folder (legacy) */
40 SHFL_FN_UNMAP_FOLDER = 13, /* Unmap folder */
41 SHFL_FN_RENAME = 14, /* Rename object */
42 SHFL_FN_FLUSH = 15, /* Flush file */
43 SHFL_FN_SET_UTF8 = 16, /* Select UTF8 filename encoding */
44 SHFL_FN_MAP_FOLDER = 17, /* Map folder */
45 SHFL_FN_READLINK = 18, /* Read symlink dest (as of VBox 4.0) */
46 SHFL_FN_SYMLINK = 19, /* Create symlink (as of VBox 4.0) */
47 SHFL_FN_SET_SYMLINKS = 20, /* Ask host to show symlinks (4.0+) */
48};
49
50/* Root handles for a mapping are of type u32, Root handles are unique. */
51#define SHFL_ROOT_NIL UINT_MAX
52
53/* Shared folders handle for an opened object are of type u64. */
54#define SHFL_HANDLE_NIL ULLONG_MAX
55
56/* Hardcoded maximum length (in chars) of a shared folder name. */
57#define SHFL_MAX_LEN (256)
58/* Hardcoded maximum number of shared folder mapping available to the guest. */
59#define SHFL_MAX_MAPPINGS (64)
60
61/** Shared folder string buffer structure. */
62struct shfl_string {
63 /** Allocated size of the string member in bytes. */
64 u16 size;
65
66 /** Length of string without trailing nul in bytes. */
67 u16 length;
68
69 /** UTF-8 or UTF-16 string. Nul terminated. */
70 union {
71 u8 utf8[2];
72 u16 utf16[1];
73 u16 ucs2[1]; /* misnomer, use utf16. */
74 } string;
75};
76VMMDEV_ASSERT_SIZE(shfl_string, 6);
77
78/* The size of shfl_string w/o the string part. */
79#define SHFLSTRING_HEADER_SIZE 4
80
81/* Calculate size of the string. */
82static inline u32 shfl_string_buf_size(const struct shfl_string *string)
83{
84 return string ? SHFLSTRING_HEADER_SIZE + string->size : 0;
85}
86
87/* Set user id on execution (S_ISUID). */
88#define SHFL_UNIX_ISUID 0004000U
89/* Set group id on execution (S_ISGID). */
90#define SHFL_UNIX_ISGID 0002000U
91/* Sticky bit (S_ISVTX / S_ISTXT). */
92#define SHFL_UNIX_ISTXT 0001000U
93
94/* Owner readable (S_IRUSR). */
95#define SHFL_UNIX_IRUSR 0000400U
96/* Owner writable (S_IWUSR). */
97#define SHFL_UNIX_IWUSR 0000200U
98/* Owner executable (S_IXUSR). */
99#define SHFL_UNIX_IXUSR 0000100U
100
101/* Group readable (S_IRGRP). */
102#define SHFL_UNIX_IRGRP 0000040U
103/* Group writable (S_IWGRP). */
104#define SHFL_UNIX_IWGRP 0000020U
105/* Group executable (S_IXGRP). */
106#define SHFL_UNIX_IXGRP 0000010U
107
108/* Other readable (S_IROTH). */
109#define SHFL_UNIX_IROTH 0000004U
110/* Other writable (S_IWOTH). */
111#define SHFL_UNIX_IWOTH 0000002U
112/* Other executable (S_IXOTH). */
113#define SHFL_UNIX_IXOTH 0000001U
114
115/* Named pipe (fifo) (S_IFIFO). */
116#define SHFL_TYPE_FIFO 0010000U
117/* Character device (S_IFCHR). */
118#define SHFL_TYPE_DEV_CHAR 0020000U
119/* Directory (S_IFDIR). */
120#define SHFL_TYPE_DIRECTORY 0040000U
121/* Block device (S_IFBLK). */
122#define SHFL_TYPE_DEV_BLOCK 0060000U
123/* Regular file (S_IFREG). */
124#define SHFL_TYPE_FILE 0100000U
125/* Symbolic link (S_IFLNK). */
126#define SHFL_TYPE_SYMLINK 0120000U
127/* Socket (S_IFSOCK). */
128#define SHFL_TYPE_SOCKET 0140000U
129/* Whiteout (S_IFWHT). */
130#define SHFL_TYPE_WHITEOUT 0160000U
131/* Type mask (S_IFMT). */
132#define SHFL_TYPE_MASK 0170000U
133
134/* Checks the mode flags indicate a directory (S_ISDIR). */
135#define SHFL_IS_DIRECTORY(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_DIRECTORY)
136/* Checks the mode flags indicate a symbolic link (S_ISLNK). */
137#define SHFL_IS_SYMLINK(m) (((m) & SHFL_TYPE_MASK) == SHFL_TYPE_SYMLINK)
138
139/** The available additional information in a shfl_fsobjattr object. */
140enum shfl_fsobjattr_add {
141 /** No additional information is available / requested. */
142 SHFLFSOBJATTRADD_NOTHING = 1,
143 /**
144 * The additional unix attributes (shfl_fsobjattr::u::unix_attr) are
145 * available / requested.
146 */
147 SHFLFSOBJATTRADD_UNIX,
148 /**
149 * The additional extended attribute size (shfl_fsobjattr::u::size) is
150 * available / requested.
151 */
152 SHFLFSOBJATTRADD_EASIZE,
153 /**
154 * The last valid item (inclusive).
155 * The valid range is SHFLFSOBJATTRADD_NOTHING thru
156 * SHFLFSOBJATTRADD_LAST.
157 */
158 SHFLFSOBJATTRADD_LAST = SHFLFSOBJATTRADD_EASIZE,
159
160 /** The usual 32-bit hack. */
161 SHFLFSOBJATTRADD_32BIT_SIZE_HACK = 0x7fffffff
162};
163
164/**
165 * Additional unix Attributes, these are available when
166 * shfl_fsobjattr.additional == SHFLFSOBJATTRADD_UNIX.
167 */
168struct shfl_fsobjattr_unix {
169 /**
170 * The user owning the filesystem object (st_uid).
171 * This field is ~0U if not supported.
172 */
173 u32 uid;
174
175 /**
176 * The group the filesystem object is assigned (st_gid).
177 * This field is ~0U if not supported.
178 */
179 u32 gid;
180
181 /**
182 * Number of hard links to this filesystem object (st_nlink).
183 * This field is 1 if the filesystem doesn't support hardlinking or
184 * the information isn't available.
185 */
186 u32 hardlinks;
187
188 /**
189 * The device number of the device which this filesystem object resides
190 * on (st_dev). This field is 0 if this information is not available.
191 */
192 u32 inode_id_device;
193
194 /**
195 * The unique identifier (within the filesystem) of this filesystem
196 * object (st_ino). Together with inode_id_device, this field can be
197 * used as a OS wide unique id, when both their values are not 0.
198 * This field is 0 if the information is not available.
199 */
200 u64 inode_id;
201
202 /**
203 * User flags (st_flags).
204 * This field is 0 if this information is not available.
205 */
206 u32 flags;
207
208 /**
209 * The current generation number (st_gen).
210 * This field is 0 if this information is not available.
211 */
212 u32 generation_id;
213
214 /**
215 * The device number of a char. or block device type object (st_rdev).
216 * This field is 0 if the file isn't a char. or block device or when
217 * the OS doesn't use the major+minor device idenfication scheme.
218 */
219 u32 device;
220} __packed;
221
222/** Extended attribute size. */
223struct shfl_fsobjattr_easize {
224 /** Size of EAs. */
225 s64 cb;
226} __packed;
227
228/** Shared folder filesystem object attributes. */
229struct shfl_fsobjattr {
230 /** Mode flags (st_mode). SHFL_UNIX_*, SHFL_TYPE_*, and SHFL_DOS_*. */
231 u32 mode;
232
233 /** The additional attributes available. */
234 enum shfl_fsobjattr_add additional;
235
236 /**
237 * Additional attributes.
238 *
239 * Unless explicitly specified to an API, the API can provide additional
240 * data as it is provided by the underlying OS.
241 */
242 union {
243 struct shfl_fsobjattr_unix unix_attr;
244 struct shfl_fsobjattr_easize size;
245 } __packed u;
246} __packed;
247VMMDEV_ASSERT_SIZE(shfl_fsobjattr, 44);
248
249struct shfl_timespec {
250 s64 ns_relative_to_unix_epoch;
251};
252
253/** Filesystem object information structure. */
254struct shfl_fsobjinfo {
255 /**
256 * Logical size (st_size).
257 * For normal files this is the size of the file.
258 * For symbolic links, this is the length of the path name contained
259 * in the symbolic link.
260 * For other objects this fields needs to be specified.
261 */
262 s64 size;
263
264 /** Disk allocation size (st_blocks * DEV_BSIZE). */
265 s64 allocated;
266
267 /** Time of last access (st_atime). */
268 struct shfl_timespec access_time;
269
270 /** Time of last data modification (st_mtime). */
271 struct shfl_timespec modification_time;
272
273 /**
274 * Time of last status change (st_ctime).
275 * If not available this is set to modification_time.
276 */
277 struct shfl_timespec change_time;
278
279 /**
280 * Time of file birth (st_birthtime).
281 * If not available this is set to change_time.
282 */
283 struct shfl_timespec birth_time;
284
285 /** Attributes. */
286 struct shfl_fsobjattr attr;
287
288} __packed;
289VMMDEV_ASSERT_SIZE(shfl_fsobjinfo, 92);
290
291/**
292 * result of an open/create request.
293 * Along with handle value the result code
294 * identifies what has happened while
295 * trying to open the object.
296 */
297enum shfl_create_result {
298 SHFL_NO_RESULT,
299 /** Specified path does not exist. */
300 SHFL_PATH_NOT_FOUND,
301 /** Path to file exists, but the last component does not. */
302 SHFL_FILE_NOT_FOUND,
303 /** File already exists and either has been opened or not. */
304 SHFL_FILE_EXISTS,
305 /** New file was created. */
306 SHFL_FILE_CREATED,
307 /** Existing file was replaced or overwritten. */
308 SHFL_FILE_REPLACED
309};
310
311/* No flags. Initialization value. */
312#define SHFL_CF_NONE (0x00000000)
313
314/*
315 * Only lookup the object, do not return a handle. When this is set all other
316 * flags are ignored.
317 */
318#define SHFL_CF_LOOKUP (0x00000001)
319
320/*
321 * Open parent directory of specified object.
322 * Useful for the corresponding Windows FSD flag
323 * and for opening paths like \\dir\\*.* to search the 'dir'.
324 */
325#define SHFL_CF_OPEN_TARGET_DIRECTORY (0x00000002)
326
327/* Create/open a directory. */
328#define SHFL_CF_DIRECTORY (0x00000004)
329
330/*
331 * Open/create action to do if object exists
332 * and if the object does not exists.
333 * REPLACE file means atomically DELETE and CREATE.
334 * OVERWRITE file means truncating the file to 0 and
335 * setting new size.
336 * When opening an existing directory REPLACE and OVERWRITE
337 * actions are considered invalid, and cause returning
338 * FILE_EXISTS with NIL handle.
339 */
340#define SHFL_CF_ACT_MASK_IF_EXISTS (0x000000f0)
341#define SHFL_CF_ACT_MASK_IF_NEW (0x00000f00)
342
343/* What to do if object exists. */
344#define SHFL_CF_ACT_OPEN_IF_EXISTS (0x00000000)
345#define SHFL_CF_ACT_FAIL_IF_EXISTS (0x00000010)
346#define SHFL_CF_ACT_REPLACE_IF_EXISTS (0x00000020)
347#define SHFL_CF_ACT_OVERWRITE_IF_EXISTS (0x00000030)
348
349/* What to do if object does not exist. */
350#define SHFL_CF_ACT_CREATE_IF_NEW (0x00000000)
351#define SHFL_CF_ACT_FAIL_IF_NEW (0x00000100)
352
353/* Read/write requested access for the object. */
354#define SHFL_CF_ACCESS_MASK_RW (0x00003000)
355
356/* No access requested. */
357#define SHFL_CF_ACCESS_NONE (0x00000000)
358/* Read access requested. */
359#define SHFL_CF_ACCESS_READ (0x00001000)
360/* Write access requested. */
361#define SHFL_CF_ACCESS_WRITE (0x00002000)
362/* Read/Write access requested. */
363#define SHFL_CF_ACCESS_READWRITE (0x00003000)
364
365/* Requested share access for the object. */
366#define SHFL_CF_ACCESS_MASK_DENY (0x0000c000)
367
368/* Allow any access. */
369#define SHFL_CF_ACCESS_DENYNONE (0x00000000)
370/* Do not allow read. */
371#define SHFL_CF_ACCESS_DENYREAD (0x00004000)
372/* Do not allow write. */
373#define SHFL_CF_ACCESS_DENYWRITE (0x00008000)
374/* Do not allow access. */
375#define SHFL_CF_ACCESS_DENYALL (0x0000c000)
376
377/* Requested access to attributes of the object. */
378#define SHFL_CF_ACCESS_MASK_ATTR (0x00030000)
379
380/* No access requested. */
381#define SHFL_CF_ACCESS_ATTR_NONE (0x00000000)
382/* Read access requested. */
383#define SHFL_CF_ACCESS_ATTR_READ (0x00010000)
384/* Write access requested. */
385#define SHFL_CF_ACCESS_ATTR_WRITE (0x00020000)
386/* Read/Write access requested. */
387#define SHFL_CF_ACCESS_ATTR_READWRITE (0x00030000)
388
389/*
390 * The file is opened in append mode.
391 * Ignored if SHFL_CF_ACCESS_WRITE is not set.
392 */
393#define SHFL_CF_ACCESS_APPEND (0x00040000)
394
395/** Create parameters buffer struct for SHFL_FN_CREATE call */
396struct shfl_createparms {
397 /** Returned handle of opened object. */
398 u64 handle;
399
400 /** Returned result of the operation */
401 enum shfl_create_result result;
402
403 /** SHFL_CF_* */
404 u32 create_flags;
405
406 /**
407 * Attributes of object to create and
408 * returned actual attributes of opened/created object.
409 */
410 struct shfl_fsobjinfo info;
411} __packed;
412
413/** Shared Folder directory information */
414struct shfl_dirinfo {
415 /** Full information about the object. */
416 struct shfl_fsobjinfo info;
417 /**
418 * The length of the short field (number of UTF16 chars).
419 * It is 16-bit for reasons of alignment.
420 */
421 u16 short_name_len;
422 /**
423 * The short name for 8.3 compatibility.
424 * Empty string if not available.
425 */
426 u16 short_name[14];
427 struct shfl_string name;
428};
429
430/** Shared folder filesystem properties. */
431struct shfl_fsproperties {
432 /**
433 * The maximum size of a filesystem object name.
434 * This does not include the '\\0'.
435 */
436 u32 max_component_len;
437
438 /**
439 * True if the filesystem is remote.
440 * False if the filesystem is local.
441 */
442 bool remote;
443
444 /**
445 * True if the filesystem is case sensitive.
446 * False if the filesystem is case insensitive.
447 */
448 bool case_sensitive;
449
450 /**
451 * True if the filesystem is mounted read only.
452 * False if the filesystem is mounted read write.
453 */
454 bool read_only;
455
456 /**
457 * True if the filesystem can encode unicode object names.
458 * False if it can't.
459 */
460 bool supports_unicode;
461
462 /**
463 * True if the filesystem is compresses.
464 * False if it isn't or we don't know.
465 */
466 bool compressed;
467
468 /**
469 * True if the filesystem compresses of individual files.
470 * False if it doesn't or we don't know.
471 */
472 bool file_compression;
473};
474VMMDEV_ASSERT_SIZE(shfl_fsproperties, 12);
475
476struct shfl_volinfo {
477 s64 total_allocation_bytes;
478 s64 available_allocation_bytes;
479 u32 bytes_per_allocation_unit;
480 u32 bytes_per_sector;
481 u32 serial;
482 struct shfl_fsproperties properties;
483};
484
485
486/** SHFL_FN_MAP_FOLDER Parameters structure. */
487struct shfl_map_folder {
488 /**
489 * pointer, in:
490 * Points to struct shfl_string buffer.
491 */
492 struct vmmdev_hgcm_function_parameter path;
493
494 /**
495 * pointer, out: SHFLROOT (u32)
496 * Root handle of the mapping which name is queried.
497 */
498 struct vmmdev_hgcm_function_parameter root;
499
500 /**
501 * pointer, in: UTF16
502 * Path delimiter
503 */
504 struct vmmdev_hgcm_function_parameter delimiter;
505
506 /**
507 * pointer, in: SHFLROOT (u32)
508 * Case senstive flag
509 */
510 struct vmmdev_hgcm_function_parameter case_sensitive;
511
512};
513
514/* Number of parameters */
515#define SHFL_CPARMS_MAP_FOLDER (4)
516
517
518/** SHFL_FN_UNMAP_FOLDER Parameters structure. */
519struct shfl_unmap_folder {
520 /**
521 * pointer, in: SHFLROOT (u32)
522 * Root handle of the mapping which name is queried.
523 */
524 struct vmmdev_hgcm_function_parameter root;
525
526};
527
528/* Number of parameters */
529#define SHFL_CPARMS_UNMAP_FOLDER (1)
530
531
532/** SHFL_FN_CREATE Parameters structure. */
533struct shfl_create {
534 /**
535 * pointer, in: SHFLROOT (u32)
536 * Root handle of the mapping which name is queried.
537 */
538 struct vmmdev_hgcm_function_parameter root;
539
540 /**
541 * pointer, in:
542 * Points to struct shfl_string buffer.
543 */
544 struct vmmdev_hgcm_function_parameter path;
545
546 /**
547 * pointer, in/out:
548 * Points to struct shfl_createparms buffer.
549 */
550 struct vmmdev_hgcm_function_parameter parms;
551
552};
553
554/* Number of parameters */
555#define SHFL_CPARMS_CREATE (3)
556
557
558/** SHFL_FN_CLOSE Parameters structure. */
559struct shfl_close {
560 /**
561 * pointer, in: SHFLROOT (u32)
562 * Root handle of the mapping which name is queried.
563 */
564 struct vmmdev_hgcm_function_parameter root;
565
566 /**
567 * value64, in:
568 * SHFLHANDLE (u64) of object to close.
569 */
570 struct vmmdev_hgcm_function_parameter handle;
571
572};
573
574/* Number of parameters */
575#define SHFL_CPARMS_CLOSE (2)
576
577
578/** SHFL_FN_READ Parameters structure. */
579struct shfl_read {
580 /**
581 * pointer, in: SHFLROOT (u32)
582 * Root handle of the mapping which name is queried.
583 */
584 struct vmmdev_hgcm_function_parameter root;
585
586 /**
587 * value64, in:
588 * SHFLHANDLE (u64) of object to read from.
589 */
590 struct vmmdev_hgcm_function_parameter handle;
591
592 /**
593 * value64, in:
594 * Offset to read from.
595 */
596 struct vmmdev_hgcm_function_parameter offset;
597
598 /**
599 * value64, in/out:
600 * Bytes to read/How many were read.
601 */
602 struct vmmdev_hgcm_function_parameter cb;
603
604 /**
605 * pointer, out:
606 * Buffer to place data to.
607 */
608 struct vmmdev_hgcm_function_parameter buffer;
609
610};
611
612/* Number of parameters */
613#define SHFL_CPARMS_READ (5)
614
615
616/** SHFL_FN_WRITE Parameters structure. */
617struct shfl_write {
618 /**
619 * pointer, in: SHFLROOT (u32)
620 * Root handle of the mapping which name is queried.
621 */
622 struct vmmdev_hgcm_function_parameter root;
623
624 /**
625 * value64, in:
626 * SHFLHANDLE (u64) of object to write to.
627 */
628 struct vmmdev_hgcm_function_parameter handle;
629
630 /**
631 * value64, in:
632 * Offset to write to.
633 */
634 struct vmmdev_hgcm_function_parameter offset;
635
636 /**
637 * value64, in/out:
638 * Bytes to write/How many were written.
639 */
640 struct vmmdev_hgcm_function_parameter cb;
641
642 /**
643 * pointer, in:
644 * Data to write.
645 */
646 struct vmmdev_hgcm_function_parameter buffer;
647
648};
649
650/* Number of parameters */
651#define SHFL_CPARMS_WRITE (5)
652
653
654/*
655 * SHFL_FN_LIST
656 * Listing information includes variable length RTDIRENTRY[EX] structures.
657 */
658
659#define SHFL_LIST_NONE 0
660#define SHFL_LIST_RETURN_ONE 1
661
662/** SHFL_FN_LIST Parameters structure. */
663struct shfl_list {
664 /**
665 * pointer, in: SHFLROOT (u32)
666 * Root handle of the mapping which name is queried.
667 */
668 struct vmmdev_hgcm_function_parameter root;
669
670 /**
671 * value64, in:
672 * SHFLHANDLE (u64) of object to be listed.
673 */
674 struct vmmdev_hgcm_function_parameter handle;
675
676 /**
677 * value32, in:
678 * List flags SHFL_LIST_*.
679 */
680 struct vmmdev_hgcm_function_parameter flags;
681
682 /**
683 * value32, in/out:
684 * Bytes to be used for listing information/How many bytes were used.
685 */
686 struct vmmdev_hgcm_function_parameter cb;
687
688 /**
689 * pointer, in/optional
690 * Points to struct shfl_string buffer that specifies a search path.
691 */
692 struct vmmdev_hgcm_function_parameter path;
693
694 /**
695 * pointer, out:
696 * Buffer to place listing information to. (struct shfl_dirinfo)
697 */
698 struct vmmdev_hgcm_function_parameter buffer;
699
700 /**
701 * value32, in/out:
702 * Indicates a key where the listing must be resumed.
703 * in: 0 means start from begin of object.
704 * out: 0 means listing completed.
705 */
706 struct vmmdev_hgcm_function_parameter resume_point;
707
708 /**
709 * pointer, out:
710 * Number of files returned
711 */
712 struct vmmdev_hgcm_function_parameter file_count;
713};
714
715/* Number of parameters */
716#define SHFL_CPARMS_LIST (8)
717
718
719/** SHFL_FN_READLINK Parameters structure. */
720struct shfl_readLink {
721 /**
722 * pointer, in: SHFLROOT (u32)
723 * Root handle of the mapping which name is queried.
724 */
725 struct vmmdev_hgcm_function_parameter root;
726
727 /**
728 * pointer, in:
729 * Points to struct shfl_string buffer.
730 */
731 struct vmmdev_hgcm_function_parameter path;
732
733 /**
734 * pointer, out:
735 * Buffer to place data to.
736 */
737 struct vmmdev_hgcm_function_parameter buffer;
738
739};
740
741/* Number of parameters */
742#define SHFL_CPARMS_READLINK (3)
743
744
745/* SHFL_FN_INFORMATION */
746
747/* Mask of Set/Get bit. */
748#define SHFL_INFO_MODE_MASK (0x1)
749/* Get information */
750#define SHFL_INFO_GET (0x0)
751/* Set information */
752#define SHFL_INFO_SET (0x1)
753
754/* Get name of the object. */
755#define SHFL_INFO_NAME (0x2)
756/* Set size of object (extend/trucate); only applies to file objects */
757#define SHFL_INFO_SIZE (0x4)
758/* Get/Set file object info. */
759#define SHFL_INFO_FILE (0x8)
760/* Get volume information. */
761#define SHFL_INFO_VOLUME (0x10)
762
763/** SHFL_FN_INFORMATION Parameters structure. */
764struct shfl_information {
765 /**
766 * pointer, in: SHFLROOT (u32)
767 * Root handle of the mapping which name is queried.
768 */
769 struct vmmdev_hgcm_function_parameter root;
770
771 /**
772 * value64, in:
773 * SHFLHANDLE (u64) of object to be listed.
774 */
775 struct vmmdev_hgcm_function_parameter handle;
776
777 /**
778 * value32, in:
779 * SHFL_INFO_*
780 */
781 struct vmmdev_hgcm_function_parameter flags;
782
783 /**
784 * value32, in/out:
785 * Bytes to be used for information/How many bytes were used.
786 */
787 struct vmmdev_hgcm_function_parameter cb;
788
789 /**
790 * pointer, in/out:
791 * Information to be set/get (shfl_fsobjinfo or shfl_string). Do not
792 * forget to set the shfl_fsobjinfo::attr::additional for a get
793 * operation as well.
794 */
795 struct vmmdev_hgcm_function_parameter info;
796
797};
798
799/* Number of parameters */
800#define SHFL_CPARMS_INFORMATION (5)
801
802
803/* SHFL_FN_REMOVE */
804
805#define SHFL_REMOVE_FILE (0x1)
806#define SHFL_REMOVE_DIR (0x2)
807#define SHFL_REMOVE_SYMLINK (0x4)
808
809/** SHFL_FN_REMOVE Parameters structure. */
810struct shfl_remove {
811 /**
812 * pointer, in: SHFLROOT (u32)
813 * Root handle of the mapping which name is queried.
814 */
815 struct vmmdev_hgcm_function_parameter root;
816
817 /**
818 * pointer, in:
819 * Points to struct shfl_string buffer.
820 */
821 struct vmmdev_hgcm_function_parameter path;
822
823 /**
824 * value32, in:
825 * remove flags (file/directory)
826 */
827 struct vmmdev_hgcm_function_parameter flags;
828
829};
830
831#define SHFL_CPARMS_REMOVE (3)
832
833
834/* SHFL_FN_RENAME */
835
836#define SHFL_RENAME_FILE (0x1)
837#define SHFL_RENAME_DIR (0x2)
838#define SHFL_RENAME_REPLACE_IF_EXISTS (0x4)
839
840/** SHFL_FN_RENAME Parameters structure. */
841struct shfl_rename {
842 /**
843 * pointer, in: SHFLROOT (u32)
844 * Root handle of the mapping which name is queried.
845 */
846 struct vmmdev_hgcm_function_parameter root;
847
848 /**
849 * pointer, in:
850 * Points to struct shfl_string src.
851 */
852 struct vmmdev_hgcm_function_parameter src;
853
854 /**
855 * pointer, in:
856 * Points to struct shfl_string dest.
857 */
858 struct vmmdev_hgcm_function_parameter dest;
859
860 /**
861 * value32, in:
862 * rename flags (file/directory)
863 */
864 struct vmmdev_hgcm_function_parameter flags;
865
866};
867
868#define SHFL_CPARMS_RENAME (4)
869
870
871/** SHFL_FN_SYMLINK Parameters structure. */
872struct shfl_symlink {
873 /**
874 * pointer, in: SHFLROOT (u32)
875 * Root handle of the mapping which name is queried.
876 */
877 struct vmmdev_hgcm_function_parameter root;
878
879 /**
880 * pointer, in:
881 * Points to struct shfl_string of path for the new symlink.
882 */
883 struct vmmdev_hgcm_function_parameter new_path;
884
885 /**
886 * pointer, in:
887 * Points to struct shfl_string of destination for symlink.
888 */
889 struct vmmdev_hgcm_function_parameter old_path;
890
891 /**
892 * pointer, out:
893 * Information about created symlink.
894 */
895 struct vmmdev_hgcm_function_parameter info;
896
897};
898
899#define SHFL_CPARMS_SYMLINK (4)
900
901#endif
diff --git a/drivers/staging/vboxsf/super.c b/drivers/staging/vboxsf/super.c
new file mode 100644
index 000000000000..0bf4d724aefd
--- /dev/null
+++ b/drivers/staging/vboxsf/super.c
@@ -0,0 +1,501 @@
1// SPDX-License-Identifier: MIT
2/*
3 * VirtualBox Guest Shared Folders support: Virtual File System.
4 *
5 * Module initialization/finalization
6 * File system registration/deregistration
7 * Superblock reading
8 * Few utility functions
9 *
10 * Copyright (C) 2006-2018 Oracle Corporation
11 */
12
13#include <linux/idr.h>
14#include <linux/fs_parser.h>
15#include <linux/magic.h>
16#include <linux/module.h>
17#include <linux/nls.h>
18#include <linux/statfs.h>
19#include <linux/vbox_utils.h>
20#include "vfsmod.h"
21
22#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
23
24#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
25#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
26#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
27#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
28
29static int follow_symlinks;
30module_param(follow_symlinks, int, 0444);
31MODULE_PARM_DESC(follow_symlinks,
32 "Let host resolve symlinks rather than showing them");
33
34static DEFINE_IDA(vboxsf_bdi_ida);
35static DEFINE_MUTEX(vboxsf_setup_mutex);
36static bool vboxsf_setup_done;
37static struct super_operations vboxsf_super_ops; /* forward declaration */
38static struct kmem_cache *vboxsf_inode_cachep;
39
40static char * const vboxsf_default_nls = CONFIG_NLS_DEFAULT;
41
42enum { opt_nls, opt_uid, opt_gid, opt_ttl, opt_dmode, opt_fmode,
43 opt_dmask, opt_fmask };
44
45static const struct fs_parameter_spec vboxsf_param_specs[] = {
46 fsparam_string ("nls", opt_nls),
47 fsparam_u32 ("uid", opt_uid),
48 fsparam_u32 ("gid", opt_gid),
49 fsparam_u32 ("ttl", opt_ttl),
50 fsparam_u32oct ("dmode", opt_dmode),
51 fsparam_u32oct ("fmode", opt_fmode),
52 fsparam_u32oct ("dmask", opt_dmask),
53 fsparam_u32oct ("fmask", opt_fmask),
54 {}
55};
56
57static const struct fs_parameter_description vboxsf_fs_parameters = {
58 .name = "vboxsf",
59 .specs = vboxsf_param_specs,
60};
61
62static int vboxsf_parse_param(struct fs_context *fc, struct fs_parameter *param)
63{
64 struct vboxsf_fs_context *ctx = fc->fs_private;
65 struct fs_parse_result result;
66 kuid_t uid;
67 kgid_t gid;
68 int opt;
69
70 opt = fs_parse(fc, &vboxsf_fs_parameters, param, &result);
71 if (opt < 0)
72 return opt;
73
74 switch (opt) {
75 case opt_nls:
76 if (fc->purpose != FS_CONTEXT_FOR_MOUNT) {
77 vbg_err("vboxsf: Cannot reconfigure nls option\n");
78 return -EINVAL;
79 }
80 ctx->nls_name = param->string;
81 param->string = NULL;
82 break;
83 case opt_uid:
84 uid = make_kuid(current_user_ns(), result.uint_32);
85 if (!uid_valid(uid))
86 return -EINVAL;
87 ctx->o.uid = uid;
88 break;
89 case opt_gid:
90 gid = make_kgid(current_user_ns(), result.uint_32);
91 if (!gid_valid(gid))
92 return -EINVAL;
93 ctx->o.gid = gid;
94 break;
95 case opt_ttl:
96 ctx->o.ttl = msecs_to_jiffies(result.uint_32);
97 break;
98 case opt_dmode:
99 if (result.uint_32 & ~0777)
100 return -EINVAL;
101 ctx->o.dmode = result.uint_32;
102 ctx->o.dmode_set = true;
103 break;
104 case opt_fmode:
105 if (result.uint_32 & ~0777)
106 return -EINVAL;
107 ctx->o.fmode = result.uint_32;
108 ctx->o.fmode_set = true;
109 break;
110 case opt_dmask:
111 if (result.uint_32 & ~07777)
112 return -EINVAL;
113 ctx->o.dmask = result.uint_32;
114 break;
115 case opt_fmask:
116 if (result.uint_32 & ~07777)
117 return -EINVAL;
118 ctx->o.fmask = result.uint_32;
119 break;
120 default:
121 return -EINVAL;
122 }
123
124 return 0;
125}
126
127static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
128{
129 struct vboxsf_fs_context *ctx = fc->fs_private;
130 struct shfl_string *folder_name, root_path;
131 struct vboxsf_sbi *sbi;
132 struct dentry *droot;
133 struct inode *iroot;
134 char *nls_name;
135 size_t size;
136 int err;
137
138 if (!fc->source)
139 return -EINVAL;
140
141 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
142 if (!sbi)
143 return -ENOMEM;
144
145 sbi->o = ctx->o;
146 idr_init(&sbi->ino_idr);
147 spin_lock_init(&sbi->ino_idr_lock);
148 sbi->next_generation = 1;
149 sbi->bdi_id = -1;
150
151 /* Load nls if not utf8 */
152 nls_name = ctx->nls_name ? ctx->nls_name : vboxsf_default_nls;
153 if (strcmp(nls_name, "utf8") != 0) {
154 if (nls_name == vboxsf_default_nls)
155 sbi->nls = load_nls_default();
156 else
157 sbi->nls = load_nls(nls_name);
158
159 if (!sbi->nls) {
160 vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
161 err = -EINVAL;
162 goto fail_free;
163 }
164 }
165
166 sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
167 if (sbi->bdi_id < 0) {
168 err = sbi->bdi_id;
169 goto fail_free;
170 }
171
172 err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id);
173 if (err)
174 goto fail_free;
175
176 /* Turn source into a shfl_string and map the folder */
177 size = strlen(fc->source) + 1;
178 folder_name = kmalloc(SHFLSTRING_HEADER_SIZE + size, GFP_KERNEL);
179 if (!folder_name) {
180 err = -ENOMEM;
181 goto fail_free;
182 }
183 folder_name->size = size;
184 folder_name->length = size - 1;
185 strlcpy(folder_name->string.utf8, fc->source, size);
186 err = vboxsf_map_folder(folder_name, &sbi->root);
187 kfree(folder_name);
188 if (err) {
189 vbg_err("vboxsf: Host rejected mount of '%s' with error %d\n",
190 fc->source, err);
191 goto fail_free;
192 }
193
194 root_path.length = 1;
195 root_path.size = 2;
196 root_path.string.utf8[0] = '/';
197 root_path.string.utf8[1] = 0;
198 err = vboxsf_stat(sbi, &root_path, &sbi->root_info);
199 if (err)
200 goto fail_unmap;
201
202 sb->s_magic = VBOXSF_SUPER_MAGIC;
203 sb->s_blocksize = 1024;
204 sb->s_maxbytes = MAX_LFS_FILESIZE;
205 sb->s_op = &vboxsf_super_ops;
206 sb->s_d_op = &vboxsf_dentry_ops;
207
208 iroot = iget_locked(sb, 0);
209 if (!iroot) {
210 err = -ENOMEM;
211 goto fail_unmap;
212 }
213 vboxsf_init_inode(sbi, iroot, &sbi->root_info);
214 unlock_new_inode(iroot);
215
216 droot = d_make_root(iroot);
217 if (!droot) {
218 err = -ENOMEM;
219 goto fail_unmap;
220 }
221
222 sb->s_root = droot;
223 sb->s_fs_info = sbi;
224 return 0;
225
226fail_unmap:
227 vboxsf_unmap_folder(sbi->root);
228fail_free:
229 if (sbi->bdi_id >= 0)
230 ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
231 if (sbi->nls)
232 unload_nls(sbi->nls);
233 idr_destroy(&sbi->ino_idr);
234 kfree(sbi);
235 return err;
236}
237
238static void vboxsf_inode_init_once(void *data)
239{
240 struct vboxsf_inode *sf_i = data;
241
242 mutex_init(&sf_i->handle_list_mutex);
243 inode_init_once(&sf_i->vfs_inode);
244}
245
246static struct inode *vboxsf_alloc_inode(struct super_block *sb)
247{
248 struct vboxsf_inode *sf_i;
249
250 sf_i = kmem_cache_alloc(vboxsf_inode_cachep, GFP_NOFS);
251 if (!sf_i)
252 return NULL;
253
254 sf_i->force_restat = 0;
255 INIT_LIST_HEAD(&sf_i->handle_list);
256
257 return &sf_i->vfs_inode;
258}
259
260static void vboxsf_free_inode(struct inode *inode)
261{
262 struct vboxsf_sbi *sbi = VBOXSF_SBI(inode->i_sb);
263 unsigned long flags;
264
265 spin_lock_irqsave(&sbi->ino_idr_lock, flags);
266 idr_remove(&sbi->ino_idr, inode->i_ino);
267 spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
268 kmem_cache_free(vboxsf_inode_cachep, VBOXSF_I(inode));
269}
270
271static void vboxsf_put_super(struct super_block *sb)
272{
273 struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
274
275 vboxsf_unmap_folder(sbi->root);
276 if (sbi->bdi_id >= 0)
277 ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
278 if (sbi->nls)
279 unload_nls(sbi->nls);
280
281 /*
282 * vboxsf_free_inode uses the idr, make sure all delayed rcu free
283 * inodes are flushed.
284 */
285 rcu_barrier();
286 idr_destroy(&sbi->ino_idr);
287 kfree(sbi);
288}
289
290static int vboxsf_statfs(struct dentry *dentry, struct kstatfs *stat)
291{
292 struct super_block *sb = dentry->d_sb;
293 struct shfl_volinfo shfl_volinfo;
294 struct vboxsf_sbi *sbi;
295 u32 buf_len;
296 int err;
297
298 sbi = VBOXSF_SBI(sb);
299 buf_len = sizeof(shfl_volinfo);
300 err = vboxsf_fsinfo(sbi->root, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME,
301 &buf_len, &shfl_volinfo);
302 if (err)
303 return err;
304
305 stat->f_type = VBOXSF_SUPER_MAGIC;
306 stat->f_bsize = shfl_volinfo.bytes_per_allocation_unit;
307
308 do_div(shfl_volinfo.total_allocation_bytes,
309 shfl_volinfo.bytes_per_allocation_unit);
310 stat->f_blocks = shfl_volinfo.total_allocation_bytes;
311
312 do_div(shfl_volinfo.available_allocation_bytes,
313 shfl_volinfo.bytes_per_allocation_unit);
314 stat->f_bfree = shfl_volinfo.available_allocation_bytes;
315 stat->f_bavail = shfl_volinfo.available_allocation_bytes;
316
317 stat->f_files = 1000;
318 /*
319 * Don't return 0 here since the guest may then think that it is not
320 * possible to create any more files.
321 */
322 stat->f_ffree = 1000000;
323 stat->f_fsid.val[0] = 0;
324 stat->f_fsid.val[1] = 0;
325 stat->f_namelen = 255;
326 return 0;
327}
328
329static struct super_operations vboxsf_super_ops = {
330 .alloc_inode = vboxsf_alloc_inode,
331 .free_inode = vboxsf_free_inode,
332 .put_super = vboxsf_put_super,
333 .statfs = vboxsf_statfs,
334};
335
336static int vboxsf_setup(void)
337{
338 int err;
339
340 mutex_lock(&vboxsf_setup_mutex);
341
342 if (vboxsf_setup_done)
343 goto success;
344
345 vboxsf_inode_cachep =
346 kmem_cache_create("vboxsf_inode_cache",
347 sizeof(struct vboxsf_inode), 0,
348 (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD |
349 SLAB_ACCOUNT),
350 vboxsf_inode_init_once);
351 if (!vboxsf_inode_cachep) {
352 err = -ENOMEM;
353 goto fail_nomem;
354 }
355
356 err = vboxsf_connect();
357 if (err) {
358 vbg_err("vboxsf: err %d connecting to guest PCI-device\n", err);
359 vbg_err("vboxsf: make sure you are inside a VirtualBox VM\n");
360 vbg_err("vboxsf: and check dmesg for vboxguest errors\n");
361 goto fail_free_cache;
362 }
363
364 err = vboxsf_set_utf8();
365 if (err) {
366 vbg_err("vboxsf_setutf8 error %d\n", err);
367 goto fail_disconnect;
368 }
369
370 if (!follow_symlinks) {
371 err = vboxsf_set_symlinks();
372 if (err)
373 vbg_warn("vboxsf: Unable to show symlinks: %d\n", err);
374 }
375
376 vboxsf_setup_done = true;
377success:
378 mutex_unlock(&vboxsf_setup_mutex);
379 return 0;
380
381fail_disconnect:
382 vboxsf_disconnect();
383fail_free_cache:
384 kmem_cache_destroy(vboxsf_inode_cachep);
385fail_nomem:
386 mutex_unlock(&vboxsf_setup_mutex);
387 return err;
388}
389
390static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
391{
392 char *options = data;
393
394 if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
395 options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
396 options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
397 options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
398 vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
399 return -EINVAL;
400 }
401
402 return generic_parse_monolithic(fc, data);
403}
404
405static int vboxsf_get_tree(struct fs_context *fc)
406{
407 int err;
408
409 err = vboxsf_setup();
410 if (err)
411 return err;
412
413 return vfs_get_super(fc, vfs_get_independent_super, vboxsf_fill_super);
414}
415
416static int vboxsf_reconfigure(struct fs_context *fc)
417{
418 struct vboxsf_sbi *sbi = VBOXSF_SBI(fc->root->d_sb);
419 struct vboxsf_fs_context *ctx = fc->fs_private;
420 struct inode *iroot;
421
422 iroot = ilookup(fc->root->d_sb, 0);
423 if (!iroot)
424 return -ENOENT;
425
426 /* Apply changed options to the root inode */
427 sbi->o = ctx->o;
428 vboxsf_init_inode(sbi, iroot, &sbi->root_info);
429
430 return 0;
431}
432
433static void vboxsf_free_fc(struct fs_context *fc)
434{
435 struct vboxsf_fs_context *ctx = fc->fs_private;
436
437 kfree(ctx->nls_name);
438 kfree(ctx);
439}
440
441static const struct fs_context_operations vboxsf_context_ops = {
442 .free = vboxsf_free_fc,
443 .parse_param = vboxsf_parse_param,
444 .parse_monolithic = vboxsf_parse_monolithic,
445 .get_tree = vboxsf_get_tree,
446 .reconfigure = vboxsf_reconfigure,
447};
448
449static int vboxsf_init_fs_context(struct fs_context *fc)
450{
451 struct vboxsf_fs_context *ctx;
452
453 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
454 if (!ctx)
455 return -ENOMEM;
456
457 current_uid_gid(&ctx->o.uid, &ctx->o.gid);
458
459 fc->fs_private = ctx;
460 fc->ops = &vboxsf_context_ops;
461 return 0;
462}
463
464static struct file_system_type vboxsf_fs_type = {
465 .owner = THIS_MODULE,
466 .name = "vboxsf",
467 .init_fs_context = vboxsf_init_fs_context,
468 .parameters = &vboxsf_fs_parameters,
469 .kill_sb = kill_anon_super
470};
471
472/* Module initialization/finalization handlers */
473static int __init vboxsf_init(void)
474{
475 return register_filesystem(&vboxsf_fs_type);
476}
477
478static void __exit vboxsf_fini(void)
479{
480 unregister_filesystem(&vboxsf_fs_type);
481
482 mutex_lock(&vboxsf_setup_mutex);
483 if (vboxsf_setup_done) {
484 vboxsf_disconnect();
485 /*
486 * Make sure all delayed rcu free inodes are flushed
487 * before we destroy the cache.
488 */
489 rcu_barrier();
490 kmem_cache_destroy(vboxsf_inode_cachep);
491 }
492 mutex_unlock(&vboxsf_setup_mutex);
493}
494
495module_init(vboxsf_init);
496module_exit(vboxsf_fini);
497
498MODULE_DESCRIPTION("Oracle VM VirtualBox Module for Host File System Access");
499MODULE_AUTHOR("Oracle Corporation");
500MODULE_LICENSE("GPL v2");
501MODULE_ALIAS_FS("vboxsf");
diff --git a/drivers/staging/vboxsf/utils.c b/drivers/staging/vboxsf/utils.c
new file mode 100644
index 000000000000..34a49e6f74fc
--- /dev/null
+++ b/drivers/staging/vboxsf/utils.c
@@ -0,0 +1,551 @@
1// SPDX-License-Identifier: MIT
2/*
3 * VirtualBox Guest Shared Folders support: Utility functions.
4 * Mainly conversion from/to VirtualBox/Linux data structures.
5 *
6 * Copyright (C) 2006-2018 Oracle Corporation
7 */
8
9#include <linux/namei.h>
10#include <linux/nls.h>
11#include <linux/sizes.h>
12#include <linux/vfs.h>
13#include "vfsmod.h"
14
15struct inode *vboxsf_new_inode(struct super_block *sb)
16{
17 struct vboxsf_sbi *sbi = VBOXSF_SBI(sb);
18 struct inode *inode;
19 unsigned long flags;
20 int cursor, ret;
21 u32 gen;
22
23 inode = new_inode(sb);
24 if (!inode)
25 return ERR_PTR(-ENOMEM);
26
27 idr_preload(GFP_KERNEL);
28 spin_lock_irqsave(&sbi->ino_idr_lock, flags);
29 cursor = idr_get_cursor(&sbi->ino_idr);
30 ret = idr_alloc_cyclic(&sbi->ino_idr, inode, 1, 0, GFP_ATOMIC);
31 if (ret >= 0 && ret < cursor)
32 sbi->next_generation++;
33 gen = sbi->next_generation;
34 spin_unlock_irqrestore(&sbi->ino_idr_lock, flags);
35 idr_preload_end();
36
37 if (ret < 0) {
38 iput(inode);
39 return ERR_PTR(ret);
40 }
41
42 inode->i_ino = ret;
43 inode->i_generation = gen;
44 return inode;
45}
46
47/* set [inode] attributes based on [info], uid/gid based on [sbi] */
48void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
49 const struct shfl_fsobjinfo *info)
50{
51 const struct shfl_fsobjattr *attr;
52 s64 allocated;
53 int mode;
54
55 attr = &info->attr;
56
57#define mode_set(r) ((attr->mode & (SHFL_UNIX_##r)) ? (S_##r) : 0)
58
59 mode = mode_set(IRUSR);
60 mode |= mode_set(IWUSR);
61 mode |= mode_set(IXUSR);
62
63 mode |= mode_set(IRGRP);
64 mode |= mode_set(IWGRP);
65 mode |= mode_set(IXGRP);
66
67 mode |= mode_set(IROTH);
68 mode |= mode_set(IWOTH);
69 mode |= mode_set(IXOTH);
70
71#undef mode_set
72
73 /* We use the host-side values for these */
74 inode->i_flags |= S_NOATIME | S_NOCMTIME;
75 inode->i_mapping->a_ops = &vboxsf_reg_aops;
76
77 if (SHFL_IS_DIRECTORY(attr->mode)) {
78 inode->i_mode = sbi->o.dmode_set ? sbi->o.dmode : mode;
79 inode->i_mode &= ~sbi->o.dmask;
80 inode->i_mode |= S_IFDIR;
81 inode->i_op = &vboxsf_dir_iops;
82 inode->i_fop = &vboxsf_dir_fops;
83 /*
84 * XXX: this probably should be set to the number of entries
85 * in the directory plus two (. ..)
86 */
87 set_nlink(inode, 1);
88 } else if (SHFL_IS_SYMLINK(attr->mode)) {
89 inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
90 inode->i_mode &= ~sbi->o.fmask;
91 inode->i_mode |= S_IFLNK;
92 inode->i_op = &vboxsf_lnk_iops;
93 set_nlink(inode, 1);
94 } else {
95 inode->i_mode = sbi->o.fmode_set ? sbi->o.fmode : mode;
96 inode->i_mode &= ~sbi->o.fmask;
97 inode->i_mode |= S_IFREG;
98 inode->i_op = &vboxsf_reg_iops;
99 inode->i_fop = &vboxsf_reg_fops;
100 set_nlink(inode, 1);
101 }
102
103 inode->i_uid = sbi->o.uid;
104 inode->i_gid = sbi->o.gid;
105
106 inode->i_size = info->size;
107 inode->i_blkbits = 12;
108 /* i_blocks always in units of 512 bytes! */
109 allocated = info->allocated + 511;
110 do_div(allocated, 512);
111 inode->i_blocks = allocated;
112
113 inode->i_atime = ns_to_timespec64(
114 info->access_time.ns_relative_to_unix_epoch);
115 inode->i_ctime = ns_to_timespec64(
116 info->change_time.ns_relative_to_unix_epoch);
117 inode->i_mtime = ns_to_timespec64(
118 info->modification_time.ns_relative_to_unix_epoch);
119}
120
121int vboxsf_create_at_dentry(struct dentry *dentry,
122 struct shfl_createparms *params)
123{
124 struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
125 struct shfl_string *path;
126 int err;
127
128 path = vboxsf_path_from_dentry(sbi, dentry);
129 if (IS_ERR(path))
130 return PTR_ERR(path);
131
132 err = vboxsf_create(sbi->root, path, params);
133 __putname(path);
134
135 return err;
136}
137
138int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
139 struct shfl_fsobjinfo *info)
140{
141 struct shfl_createparms params = {};
142 int err;
143
144 params.handle = SHFL_HANDLE_NIL;
145 params.create_flags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW;
146
147 err = vboxsf_create(sbi->root, path, &params);
148 if (err)
149 return err;
150
151 if (params.result != SHFL_FILE_EXISTS)
152 return -ENOENT;
153
154 if (info)
155 *info = params.info;
156
157 return 0;
158}
159
160int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info)
161{
162 struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
163 struct shfl_string *path;
164 int err;
165
166 path = vboxsf_path_from_dentry(sbi, dentry);
167 if (IS_ERR(path))
168 return PTR_ERR(path);
169
170 err = vboxsf_stat(sbi, path, info);
171 __putname(path);
172 return err;
173}
174
175int vboxsf_inode_revalidate(struct dentry *dentry)
176{
177 struct vboxsf_sbi *sbi;
178 struct vboxsf_inode *sf_i;
179 struct shfl_fsobjinfo info;
180 struct timespec64 prev_mtime;
181 struct inode *inode;
182 int err;
183
184 if (!dentry || !d_really_is_positive(dentry))
185 return -EINVAL;
186
187 inode = d_inode(dentry);
188 prev_mtime = inode->i_mtime;
189 sf_i = VBOXSF_I(inode);
190 sbi = VBOXSF_SBI(dentry->d_sb);
191 if (!sf_i->force_restat) {
192 if (time_before(jiffies, dentry->d_time + sbi->o.ttl))
193 return 0;
194 }
195
196 err = vboxsf_stat_dentry(dentry, &info);
197 if (err)
198 return err;
199
200 dentry->d_time = jiffies;
201 sf_i->force_restat = 0;
202 vboxsf_init_inode(sbi, inode, &info);
203
204 /*
205 * If the file was changed on the host side we need to invalidate the
206 * page-cache for it. Note this also gets triggered by our own writes,
207 * this is unavoidable.
208 */
209 if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
210 invalidate_inode_pages2(inode->i_mapping);
211
212 return 0;
213}
214
215int vboxsf_getattr(const struct path *path, struct kstat *kstat,
216 u32 request_mask, unsigned int flags)
217{
218 int err;
219 struct dentry *dentry = path->dentry;
220 struct inode *inode = d_inode(dentry);
221 struct vboxsf_inode *sf_i = VBOXSF_I(inode);
222
223 switch (flags & AT_STATX_SYNC_TYPE) {
224 case AT_STATX_DONT_SYNC:
225 err = 0;
226 break;
227 case AT_STATX_FORCE_SYNC:
228 sf_i->force_restat = 1;
229 /* fall-through */
230 default:
231 err = vboxsf_inode_revalidate(dentry);
232 }
233 if (err)
234 return err;
235
236 generic_fillattr(d_inode(dentry), kstat);
237 return 0;
238}
239
240int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr)
241{
242 struct vboxsf_inode *sf_i = VBOXSF_I(d_inode(dentry));
243 struct vboxsf_sbi *sbi = VBOXSF_SBI(dentry->d_sb);
244 struct shfl_createparms params = {};
245 struct shfl_fsobjinfo info = {};
246 u32 buf_len;
247 int err;
248
249 params.handle = SHFL_HANDLE_NIL;
250 params.create_flags = SHFL_CF_ACT_OPEN_IF_EXISTS |
251 SHFL_CF_ACT_FAIL_IF_NEW |
252 SHFL_CF_ACCESS_ATTR_WRITE;
253
254 /* this is at least required for Posix hosts */
255 if (iattr->ia_valid & ATTR_SIZE)
256 params.create_flags |= SHFL_CF_ACCESS_WRITE;
257
258 err = vboxsf_create_at_dentry(dentry, &params);
259 if (err || params.result != SHFL_FILE_EXISTS)
260 return err ? err : -ENOENT;
261
262#define mode_set(r) ((iattr->ia_mode & (S_##r)) ? SHFL_UNIX_##r : 0)
263
264 /*
265 * Setting the file size and setting the other attributes has to
266 * be handled separately.
267 */
268 if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) {
269 if (iattr->ia_valid & ATTR_MODE) {
270 info.attr.mode = mode_set(IRUSR);
271 info.attr.mode |= mode_set(IWUSR);
272 info.attr.mode |= mode_set(IXUSR);
273 info.attr.mode |= mode_set(IRGRP);
274 info.attr.mode |= mode_set(IWGRP);
275 info.attr.mode |= mode_set(IXGRP);
276 info.attr.mode |= mode_set(IROTH);
277 info.attr.mode |= mode_set(IWOTH);
278 info.attr.mode |= mode_set(IXOTH);
279
280 if (iattr->ia_mode & S_IFDIR)
281 info.attr.mode |= SHFL_TYPE_DIRECTORY;
282 else
283 info.attr.mode |= SHFL_TYPE_FILE;
284 }
285
286 if (iattr->ia_valid & ATTR_ATIME)
287 info.access_time.ns_relative_to_unix_epoch =
288 timespec64_to_ns(&iattr->ia_atime);
289
290 if (iattr->ia_valid & ATTR_MTIME)
291 info.modification_time.ns_relative_to_unix_epoch =
292 timespec64_to_ns(&iattr->ia_mtime);
293
294 /*
295 * Ignore ctime (inode change time) as it can't be set
296 * from userland anyway.
297 */
298
299 buf_len = sizeof(info);
300 err = vboxsf_fsinfo(sbi->root, params.handle,
301 SHFL_INFO_SET | SHFL_INFO_FILE, &buf_len,
302 &info);
303 if (err) {
304 vboxsf_close(sbi->root, params.handle);
305 return err;
306 }
307
308 /* the host may have given us different attr then requested */
309 sf_i->force_restat = 1;
310 }
311
312#undef mode_set
313
314 if (iattr->ia_valid & ATTR_SIZE) {
315 memset(&info, 0, sizeof(info));
316 info.size = iattr->ia_size;
317 buf_len = sizeof(info);
318 err = vboxsf_fsinfo(sbi->root, params.handle,
319 SHFL_INFO_SET | SHFL_INFO_SIZE, &buf_len,
320 &info);
321 if (err) {
322 vboxsf_close(sbi->root, params.handle);
323 return err;
324 }
325
326 /* the host may have given us different attr then requested */
327 sf_i->force_restat = 1;
328 }
329
330 vboxsf_close(sbi->root, params.handle);
331
332 /* Update the inode with what the host has actually given us. */
333 if (sf_i->force_restat)
334 vboxsf_inode_revalidate(dentry);
335
336 return 0;
337}
338
339/*
340 * [dentry] contains string encoded in coding system that corresponds
341 * to [sbi]->nls, we must convert it to UTF8 here.
342 * Returns a shfl_string allocated through __getname (must be freed using
343 * __putname), or an ERR_PTR on error.
344 */
345struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
346 struct dentry *dentry)
347{
348 struct shfl_string *shfl_path;
349 int path_len, out_len, nb;
350 char *buf, *path;
351 wchar_t uni;
352 u8 *out;
353
354 buf = __getname();
355 if (!buf)
356 return ERR_PTR(-ENOMEM);
357
358 path = dentry_path_raw(dentry, buf, PATH_MAX);
359 if (IS_ERR(path)) {
360 __putname(buf);
361 return (struct shfl_string *)path;
362 }
363 path_len = strlen(path);
364
365 if (sbi->nls) {
366 shfl_path = __getname();
367 if (!shfl_path) {
368 __putname(buf);
369 return ERR_PTR(-ENOMEM);
370 }
371
372 out = shfl_path->string.utf8;
373 out_len = PATH_MAX - SHFLSTRING_HEADER_SIZE - 1;
374
375 while (path_len) {
376 nb = sbi->nls->char2uni(path, path_len, &uni);
377 if (nb < 0) {
378 __putname(shfl_path);
379 __putname(buf);
380 return ERR_PTR(-EINVAL);
381 }
382 path += nb;
383 path_len -= nb;
384
385 nb = utf32_to_utf8(uni, out, out_len);
386 if (nb < 0) {
387 __putname(shfl_path);
388 __putname(buf);
389 return ERR_PTR(-ENAMETOOLONG);
390 }
391 out += nb;
392 out_len -= nb;
393 }
394 *out = 0;
395 shfl_path->length = out - shfl_path->string.utf8;
396 shfl_path->size = shfl_path->length + 1;
397 __putname(buf);
398 } else {
399 if ((SHFLSTRING_HEADER_SIZE + path_len + 1) > PATH_MAX) {
400 __putname(buf);
401 return ERR_PTR(-ENAMETOOLONG);
402 }
403 /*
404 * dentry_path stores the name at the end of buf, but the
405 * shfl_string string we return must be properly aligned.
406 */
407 shfl_path = (struct shfl_string *)buf;
408 memmove(shfl_path->string.utf8, path, path_len);
409 shfl_path->string.utf8[path_len] = 0;
410 shfl_path->length = path_len;
411 shfl_path->size = path_len + 1;
412 }
413
414 return shfl_path;
415}
416
417int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
418 const unsigned char *utf8_name, size_t utf8_len)
419{
420 const char *in;
421 char *out;
422 size_t out_len;
423 size_t out_bound_len;
424 size_t in_bound_len;
425
426 in = utf8_name;
427 in_bound_len = utf8_len;
428
429 out = name;
430 out_len = 0;
431 /* Reserve space for terminating 0 */
432 out_bound_len = name_bound_len - 1;
433
434 while (in_bound_len) {
435 int nb;
436 unicode_t uni;
437
438 nb = utf8_to_utf32(in, in_bound_len, &uni);
439 if (nb < 0)
440 return -EINVAL;
441
442 in += nb;
443 in_bound_len -= nb;
444
445 nb = sbi->nls->uni2char(uni, out, out_bound_len);
446 if (nb < 0)
447 return nb;
448
449 out += nb;
450 out_bound_len -= nb;
451 out_len += nb;
452 }
453
454 *out = 0;
455
456 return 0;
457}
458
459static struct vboxsf_dir_buf *vboxsf_dir_buf_alloc(struct list_head *list)
460{
461 struct vboxsf_dir_buf *b;
462
463 b = kmalloc(sizeof(*b), GFP_KERNEL);
464 if (!b)
465 return NULL;
466
467 b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL);
468 if (!b->buf) {
469 kfree(b);
470 return NULL;
471 }
472
473 b->entries = 0;
474 b->used = 0;
475 b->free = DIR_BUFFER_SIZE;
476 list_add(&b->head, list);
477
478 return b;
479}
480
481static void vboxsf_dir_buf_free(struct vboxsf_dir_buf *b)
482{
483 list_del(&b->head);
484 kfree(b->buf);
485 kfree(b);
486}
487
488struct vboxsf_dir_info *vboxsf_dir_info_alloc(void)
489{
490 struct vboxsf_dir_info *p;
491
492 p = kmalloc(sizeof(*p), GFP_KERNEL);
493 if (!p)
494 return NULL;
495
496 INIT_LIST_HEAD(&p->info_list);
497 return p;
498}
499
500void vboxsf_dir_info_free(struct vboxsf_dir_info *p)
501{
502 struct list_head *list, *pos, *tmp;
503
504 list = &p->info_list;
505 list_for_each_safe(pos, tmp, list) {
506 struct vboxsf_dir_buf *b;
507
508 b = list_entry(pos, struct vboxsf_dir_buf, head);
509 vboxsf_dir_buf_free(b);
510 }
511 kfree(p);
512}
513
514int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
515 u64 handle)
516{
517 struct vboxsf_dir_buf *b;
518 u32 entries, size;
519 int err = 0;
520 void *buf;
521
522 /* vboxsf_dirinfo returns 1 on end of dir */
523 while (err == 0) {
524 b = vboxsf_dir_buf_alloc(&sf_d->info_list);
525 if (!b) {
526 err = -ENOMEM;
527 break;
528 }
529
530 buf = b->buf;
531 size = b->free;
532
533 err = vboxsf_dirinfo(sbi->root, handle, NULL, 0, 0,
534 &size, buf, &entries);
535 if (err < 0)
536 break;
537
538 b->entries += entries;
539 b->free -= size;
540 b->used += size;
541 }
542
543 if (b && b->used == 0)
544 vboxsf_dir_buf_free(b);
545
546 /* -EILSEQ means the host could not translate a filename, ignore */
547 if (err > 0 || err == -EILSEQ)
548 err = 0;
549
550 return err;
551}
diff --git a/drivers/staging/vboxsf/vboxsf_wrappers.c b/drivers/staging/vboxsf/vboxsf_wrappers.c
new file mode 100644
index 000000000000..bfc78a097dae
--- /dev/null
+++ b/drivers/staging/vboxsf/vboxsf_wrappers.c
@@ -0,0 +1,371 @@
1// SPDX-License-Identifier: MIT
2/*
3 * Wrapper functions for the shfl host calls.
4 *
5 * Copyright (C) 2006-2018 Oracle Corporation
6 */
7
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/vbox_err.h>
11#include <linux/vbox_utils.h>
12#include "vfsmod.h"
13
14#define SHFL_REQUEST \
15 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER | \
16 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
17
18static u32 vboxsf_client_id;
19
20int vboxsf_connect(void)
21{
22 struct vbg_dev *gdev;
23 struct vmmdev_hgcm_service_location loc;
24 int err, vbox_status;
25
26 loc.type = VMMDEV_HGCM_LOC_LOCALHOST_EXISTING;
27 strcpy(loc.u.localhost.service_name, "VBoxSharedFolders");
28
29 gdev = vbg_get_gdev();
30 if (IS_ERR(gdev))
31 return -ENODEV; /* No guest-device */
32
33 err = vbg_hgcm_connect(gdev, SHFL_REQUEST, &loc,
34 &vboxsf_client_id, &vbox_status);
35 vbg_put_gdev(gdev);
36
37 return err ? err : vbg_status_code_to_errno(vbox_status);
38}
39
40void vboxsf_disconnect(void)
41{
42 struct vbg_dev *gdev;
43 int vbox_status;
44
45 gdev = vbg_get_gdev();
46 if (IS_ERR(gdev))
47 return; /* guest-device is gone, already disconnected */
48
49 vbg_hgcm_disconnect(gdev, SHFL_REQUEST, vboxsf_client_id, &vbox_status);
50 vbg_put_gdev(gdev);
51}
52
53static int vboxsf_call(u32 function, void *parms, u32 parm_count, int *status)
54{
55 struct vbg_dev *gdev;
56 int err, vbox_status;
57
58 gdev = vbg_get_gdev();
59 if (IS_ERR(gdev))
60 return -ESHUTDOWN; /* guest-dev removed underneath us */
61
62 err = vbg_hgcm_call(gdev, SHFL_REQUEST, vboxsf_client_id, function,
63 U32_MAX, parms, parm_count, &vbox_status);
64 vbg_put_gdev(gdev);
65
66 if (err < 0)
67 return err;
68
69 if (status)
70 *status = vbox_status;
71
72 return vbg_status_code_to_errno(vbox_status);
73}
74
75int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root)
76{
77 struct shfl_map_folder parms;
78 int err, status;
79
80 parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
81 parms.path.u.pointer.size = shfl_string_buf_size(folder_name);
82 parms.path.u.pointer.u.linear_addr = (uintptr_t)folder_name;
83
84 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
85 parms.root.u.value32 = 0;
86
87 parms.delimiter.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
88 parms.delimiter.u.value32 = '/';
89
90 parms.case_sensitive.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
91 parms.case_sensitive.u.value32 = 1;
92
93 err = vboxsf_call(SHFL_FN_MAP_FOLDER, &parms, SHFL_CPARMS_MAP_FOLDER,
94 &status);
95 if (err == -ENOSYS && status == VERR_NOT_IMPLEMENTED)
96 vbg_err("%s: Error host is too old\n", __func__);
97
98 *root = parms.root.u.value32;
99 return err;
100}
101
102int vboxsf_unmap_folder(u32 root)
103{
104 struct shfl_unmap_folder parms;
105
106 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
107 parms.root.u.value32 = root;
108
109 return vboxsf_call(SHFL_FN_UNMAP_FOLDER, &parms,
110 SHFL_CPARMS_UNMAP_FOLDER, NULL);
111}
112
113/**
114 * vboxsf_create - Create a new file or folder
115 * @root: Root of the shared folder in which to create the file
116 * @parsed_path: The path of the file or folder relative to the shared folder
117 * @param: create_parms Parameters for file/folder creation.
118 *
119 * Create a new file or folder or open an existing one in a shared folder.
120 * Note this function always returns 0 / success unless an exceptional condition
121 * occurs - out of memory, invalid arguments, etc. If the file or folder could
122 * not be opened or created, create_parms->handle will be set to
123 * SHFL_HANDLE_NIL on return. In this case the value in create_parms->result
124 * provides information as to why (e.g. SHFL_FILE_EXISTS), create_parms->result
125 * is also set on success as additional information.
126 *
127 * Returns:
128 * 0 or negative errno value.
129 */
130int vboxsf_create(u32 root, struct shfl_string *parsed_path,
131 struct shfl_createparms *create_parms)
132{
133 struct shfl_create parms;
134
135 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
136 parms.root.u.value32 = root;
137
138 parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
139 parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
140 parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
141
142 parms.parms.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
143 parms.parms.u.pointer.size = sizeof(struct shfl_createparms);
144 parms.parms.u.pointer.u.linear_addr = (uintptr_t)create_parms;
145
146 return vboxsf_call(SHFL_FN_CREATE, &parms, SHFL_CPARMS_CREATE, NULL);
147}
148
149int vboxsf_close(u32 root, u64 handle)
150{
151 struct shfl_close parms;
152
153 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
154 parms.root.u.value32 = root;
155
156 parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
157 parms.handle.u.value64 = handle;
158
159 return vboxsf_call(SHFL_FN_CLOSE, &parms, SHFL_CPARMS_CLOSE, NULL);
160}
161
162int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags)
163{
164 struct shfl_remove parms;
165
166 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
167 parms.root.u.value32 = root;
168
169 parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
170 parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
171 parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
172
173 parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
174 parms.flags.u.value32 = flags;
175
176 return vboxsf_call(SHFL_FN_REMOVE, &parms, SHFL_CPARMS_REMOVE, NULL);
177}
178
179int vboxsf_rename(u32 root, struct shfl_string *src_path,
180 struct shfl_string *dest_path, u32 flags)
181{
182 struct shfl_rename parms;
183
184 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
185 parms.root.u.value32 = root;
186
187 parms.src.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
188 parms.src.u.pointer.size = shfl_string_buf_size(src_path);
189 parms.src.u.pointer.u.linear_addr = (uintptr_t)src_path;
190
191 parms.dest.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
192 parms.dest.u.pointer.size = shfl_string_buf_size(dest_path);
193 parms.dest.u.pointer.u.linear_addr = (uintptr_t)dest_path;
194
195 parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
196 parms.flags.u.value32 = flags;
197
198 return vboxsf_call(SHFL_FN_RENAME, &parms, SHFL_CPARMS_RENAME, NULL);
199}
200
201int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
202{
203 struct shfl_read parms;
204 int err;
205
206 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
207 parms.root.u.value32 = root;
208
209 parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
210 parms.handle.u.value64 = handle;
211 parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
212 parms.offset.u.value64 = offset;
213 parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
214 parms.cb.u.value32 = *buf_len;
215 parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
216 parms.buffer.u.pointer.size = *buf_len;
217 parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
218
219 err = vboxsf_call(SHFL_FN_READ, &parms, SHFL_CPARMS_READ, NULL);
220
221 *buf_len = parms.cb.u.value32;
222 return err;
223}
224
225int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf)
226{
227 struct shfl_write parms;
228 int err;
229
230 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
231 parms.root.u.value32 = root;
232
233 parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
234 parms.handle.u.value64 = handle;
235 parms.offset.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
236 parms.offset.u.value64 = offset;
237 parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
238 parms.cb.u.value32 = *buf_len;
239 parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
240 parms.buffer.u.pointer.size = *buf_len;
241 parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
242
243 err = vboxsf_call(SHFL_FN_WRITE, &parms, SHFL_CPARMS_WRITE, NULL);
244
245 *buf_len = parms.cb.u.value32;
246 return err;
247}
248
249/* Returns 0 on success, 1 on end-of-dir, negative errno otherwise */
250int vboxsf_dirinfo(u32 root, u64 handle,
251 struct shfl_string *parsed_path, u32 flags, u32 index,
252 u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count)
253{
254 struct shfl_list parms;
255 int err, status;
256
257 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
258 parms.root.u.value32 = root;
259
260 parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
261 parms.handle.u.value64 = handle;
262 parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
263 parms.flags.u.value32 = flags;
264 parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
265 parms.cb.u.value32 = *buf_len;
266 if (parsed_path) {
267 parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
268 parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
269 parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
270 } else {
271 parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_IN;
272 parms.path.u.pointer.size = 0;
273 parms.path.u.pointer.u.linear_addr = 0;
274 }
275
276 parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
277 parms.buffer.u.pointer.size = *buf_len;
278 parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
279
280 parms.resume_point.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
281 parms.resume_point.u.value32 = index;
282 parms.file_count.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
283 parms.file_count.u.value32 = 0; /* out parameter only */
284
285 err = vboxsf_call(SHFL_FN_LIST, &parms, SHFL_CPARMS_LIST, &status);
286 if (err == -ENODATA && status == VERR_NO_MORE_FILES)
287 err = 1;
288
289 *buf_len = parms.cb.u.value32;
290 *file_count = parms.file_count.u.value32;
291 return err;
292}
293
294int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
295 u32 *buf_len, void *buf)
296{
297 struct shfl_information parms;
298 int err;
299
300 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
301 parms.root.u.value32 = root;
302
303 parms.handle.type = VMMDEV_HGCM_PARM_TYPE_64BIT;
304 parms.handle.u.value64 = handle;
305 parms.flags.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
306 parms.flags.u.value32 = flags;
307 parms.cb.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
308 parms.cb.u.value32 = *buf_len;
309 parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL;
310 parms.info.u.pointer.size = *buf_len;
311 parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
312
313 err = vboxsf_call(SHFL_FN_INFORMATION, &parms, SHFL_CPARMS_INFORMATION,
314 NULL);
315
316 *buf_len = parms.cb.u.value32;
317 return err;
318}
319
320int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
321 u32 buf_len, u8 *buf)
322{
323 struct shfl_readLink parms;
324
325 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
326 parms.root.u.value32 = root;
327
328 parms.path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
329 parms.path.u.pointer.size = shfl_string_buf_size(parsed_path);
330 parms.path.u.pointer.u.linear_addr = (uintptr_t)parsed_path;
331
332 parms.buffer.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
333 parms.buffer.u.pointer.size = buf_len;
334 parms.buffer.u.pointer.u.linear_addr = (uintptr_t)buf;
335
336 return vboxsf_call(SHFL_FN_READLINK, &parms, SHFL_CPARMS_READLINK,
337 NULL);
338}
339
340int vboxsf_symlink(u32 root, struct shfl_string *new_path,
341 struct shfl_string *old_path, struct shfl_fsobjinfo *buf)
342{
343 struct shfl_symlink parms;
344
345 parms.root.type = VMMDEV_HGCM_PARM_TYPE_32BIT;
346 parms.root.u.value32 = root;
347
348 parms.new_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
349 parms.new_path.u.pointer.size = shfl_string_buf_size(new_path);
350 parms.new_path.u.pointer.u.linear_addr = (uintptr_t)new_path;
351
352 parms.old_path.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN;
353 parms.old_path.u.pointer.size = shfl_string_buf_size(old_path);
354 parms.old_path.u.pointer.u.linear_addr = (uintptr_t)old_path;
355
356 parms.info.type = VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT;
357 parms.info.u.pointer.size = sizeof(struct shfl_fsobjinfo);
358 parms.info.u.pointer.u.linear_addr = (uintptr_t)buf;
359
360 return vboxsf_call(SHFL_FN_SYMLINK, &parms, SHFL_CPARMS_SYMLINK, NULL);
361}
362
363int vboxsf_set_utf8(void)
364{
365 return vboxsf_call(SHFL_FN_SET_UTF8, NULL, 0, NULL);
366}
367
368int vboxsf_set_symlinks(void)
369{
370 return vboxsf_call(SHFL_FN_SET_SYMLINKS, NULL, 0, NULL);
371}
diff --git a/drivers/staging/vboxsf/vfsmod.h b/drivers/staging/vboxsf/vfsmod.h
new file mode 100644
index 000000000000..18f95b00fc33
--- /dev/null
+++ b/drivers/staging/vboxsf/vfsmod.h
@@ -0,0 +1,137 @@
1/* SPDX-License-Identifier: MIT */
2/*
3 * VirtualBox Guest Shared Folders support: module header.
4 *
5 * Copyright (C) 2006-2018 Oracle Corporation
6 */
7
8#ifndef VFSMOD_H
9#define VFSMOD_H
10
11#include <linux/backing-dev.h>
12#include <linux/idr.h>
13#include "shfl_hostintf.h"
14
15#define DIR_BUFFER_SIZE SZ_16K
16
17/* The cast is to prevent assignment of void * to pointers of arbitrary type */
18#define VBOXSF_SBI(sb) ((struct vboxsf_sbi *)(sb)->s_fs_info)
19#define VBOXSF_I(i) container_of(i, struct vboxsf_inode, vfs_inode)
20
21struct vboxsf_options {
22 unsigned long ttl;
23 kuid_t uid;
24 kgid_t gid;
25 bool dmode_set;
26 bool fmode_set;
27 umode_t dmode;
28 umode_t fmode;
29 umode_t dmask;
30 umode_t fmask;
31};
32
33struct vboxsf_fs_context {
34 struct vboxsf_options o;
35 char *nls_name;
36};
37
38/* per-shared folder information */
39struct vboxsf_sbi {
40 struct vboxsf_options o;
41 struct shfl_fsobjinfo root_info;
42 struct idr ino_idr;
43 spinlock_t ino_idr_lock; /* This protects ino_idr */
44 struct nls_table *nls;
45 u32 next_generation;
46 u32 root;
47 int bdi_id;
48};
49
50/* per-inode information */
51struct vboxsf_inode {
52 /* some information was changed, update data on next revalidate */
53 int force_restat;
54 /* list of open handles for this inode + lock protecting it */
55 struct list_head handle_list;
56 /* This mutex protects handle_list accesses */
57 struct mutex handle_list_mutex;
58 /* The VFS inode struct */
59 struct inode vfs_inode;
60};
61
62struct vboxsf_dir_info {
63 struct list_head info_list;
64};
65
66struct vboxsf_dir_buf {
67 size_t entries;
68 size_t free;
69 size_t used;
70 void *buf;
71 struct list_head head;
72};
73
74/* globals */
75extern const struct inode_operations vboxsf_dir_iops;
76extern const struct inode_operations vboxsf_lnk_iops;
77extern const struct inode_operations vboxsf_reg_iops;
78extern const struct file_operations vboxsf_dir_fops;
79extern const struct file_operations vboxsf_reg_fops;
80extern const struct address_space_operations vboxsf_reg_aops;
81extern const struct dentry_operations vboxsf_dentry_ops;
82
83/* from utils.c */
84struct inode *vboxsf_new_inode(struct super_block *sb);
85void vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
86 const struct shfl_fsobjinfo *info);
87int vboxsf_create_at_dentry(struct dentry *dentry,
88 struct shfl_createparms *params);
89int vboxsf_stat(struct vboxsf_sbi *sbi, struct shfl_string *path,
90 struct shfl_fsobjinfo *info);
91int vboxsf_stat_dentry(struct dentry *dentry, struct shfl_fsobjinfo *info);
92int vboxsf_inode_revalidate(struct dentry *dentry);
93int vboxsf_getattr(const struct path *path, struct kstat *kstat,
94 u32 request_mask, unsigned int query_flags);
95int vboxsf_setattr(struct dentry *dentry, struct iattr *iattr);
96struct shfl_string *vboxsf_path_from_dentry(struct vboxsf_sbi *sbi,
97 struct dentry *dentry);
98int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
99 const unsigned char *utf8_name, size_t utf8_len);
100struct vboxsf_dir_info *vboxsf_dir_info_alloc(void);
101void vboxsf_dir_info_free(struct vboxsf_dir_info *p);
102int vboxsf_dir_read_all(struct vboxsf_sbi *sbi, struct vboxsf_dir_info *sf_d,
103 u64 handle);
104
105/* from vboxsf_wrappers.c */
106int vboxsf_connect(void);
107void vboxsf_disconnect(void);
108
109int vboxsf_create(u32 root, struct shfl_string *parsed_path,
110 struct shfl_createparms *create_parms);
111
112int vboxsf_close(u32 root, u64 handle);
113int vboxsf_remove(u32 root, struct shfl_string *parsed_path, u32 flags);
114int vboxsf_rename(u32 root, struct shfl_string *src_path,
115 struct shfl_string *dest_path, u32 flags);
116
117int vboxsf_read(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
118int vboxsf_write(u32 root, u64 handle, u64 offset, u32 *buf_len, u8 *buf);
119
120int vboxsf_dirinfo(u32 root, u64 handle,
121 struct shfl_string *parsed_path, u32 flags, u32 index,
122 u32 *buf_len, struct shfl_dirinfo *buf, u32 *file_count);
123int vboxsf_fsinfo(u32 root, u64 handle, u32 flags,
124 u32 *buf_len, void *buf);
125
126int vboxsf_map_folder(struct shfl_string *folder_name, u32 *root);
127int vboxsf_unmap_folder(u32 root);
128
129int vboxsf_readlink(u32 root, struct shfl_string *parsed_path,
130 u32 buf_len, u8 *buf);
131int vboxsf_symlink(u32 root, struct shfl_string *new_path,
132 struct shfl_string *old_path, struct shfl_fsobjinfo *buf);
133
134int vboxsf_set_utf8(void);
135int vboxsf_set_symlinks(void);
136
137#endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
index 61cd09cef943..6795851aac95 100644
--- a/drivers/thunderbolt/nhi_ops.c
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -80,7 +80,6 @@ static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd c
80{ 80{
81 u32 data; 81 u32 data;
82 82
83 pci_read_config_dword(nhi->pdev, VS_CAP_19, &data);
84 data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK; 83 data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
85 pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID); 84 pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
86} 85}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 410bf1bceeee..5ea8db667e83 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -896,12 +896,13 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
896 */ 896 */
897bool tb_dp_port_is_enabled(struct tb_port *port) 897bool tb_dp_port_is_enabled(struct tb_port *port)
898{ 898{
899 u32 data; 899 u32 data[2];
900 900
901 if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1)) 901 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
902 ARRAY_SIZE(data)))
902 return false; 903 return false;
903 904
904 return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN)); 905 return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
905} 906}
906 907
907/** 908/**
@@ -914,19 +915,21 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
914 */ 915 */
915int tb_dp_port_enable(struct tb_port *port, bool enable) 916int tb_dp_port_enable(struct tb_port *port, bool enable)
916{ 917{
917 u32 data; 918 u32 data[2];
918 int ret; 919 int ret;
919 920
920 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1); 921 ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
922 ARRAY_SIZE(data));
921 if (ret) 923 if (ret)
922 return ret; 924 return ret;
923 925
924 if (enable) 926 if (enable)
925 data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN; 927 data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
926 else 928 else
927 data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN); 929 data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
928 930
929 return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1); 931 return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
932 ARRAY_SIZE(data));
930} 933}
931 934
932/* switch utility functions */ 935/* switch utility functions */
@@ -1031,13 +1034,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1031 if (sw->authorized) 1034 if (sw->authorized)
1032 goto unlock; 1035 goto unlock;
1033 1036
1034 /*
1035 * Make sure there is no PCIe rescan ongoing when a new PCIe
1036 * tunnel is created. Otherwise the PCIe rescan code might find
1037 * the new tunnel too early.
1038 */
1039 pci_lock_rescan_remove();
1040
1041 switch (val) { 1037 switch (val) {
1042 /* Approve switch */ 1038 /* Approve switch */
1043 case 1: 1039 case 1:
@@ -1057,8 +1053,6 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1057 break; 1053 break;
1058 } 1054 }
1059 1055
1060 pci_unlock_rescan_remove();
1061
1062 if (!ret) { 1056 if (!ret) {
1063 sw->authorized = val; 1057 sw->authorized = val;
1064 /* Notify status change to the userspace */ 1058 /* Notify status change to the userspace */
diff --git a/drivers/video/fbdev/c2p_core.h b/drivers/video/fbdev/c2p_core.h
index e1035a865fb9..45a6d895a7d7 100644
--- a/drivers/video/fbdev/c2p_core.h
+++ b/drivers/video/fbdev/c2p_core.h
@@ -29,7 +29,7 @@ static inline void _transp(u32 d[], unsigned int i1, unsigned int i2,
29 29
30extern void c2p_unsupported(void); 30extern void c2p_unsupported(void);
31 31
32static inline u32 get_mask(unsigned int n) 32static __always_inline u32 get_mask(unsigned int n)
33{ 33{
34 switch (n) { 34 switch (n) {
35 case 1: 35 case 1:
@@ -57,7 +57,7 @@ static inline u32 get_mask(unsigned int n)
57 * Transpose operations on 8 32-bit words 57 * Transpose operations on 8 32-bit words
58 */ 58 */
59 59
60static inline void transp8(u32 d[], unsigned int n, unsigned int m) 60static __always_inline void transp8(u32 d[], unsigned int n, unsigned int m)
61{ 61{
62 u32 mask = get_mask(n); 62 u32 mask = get_mask(n);
63 63
@@ -99,7 +99,7 @@ static inline void transp8(u32 d[], unsigned int n, unsigned int m)
99 * Transpose operations on 4 32-bit words 99 * Transpose operations on 4 32-bit words
100 */ 100 */
101 101
102static inline void transp4(u32 d[], unsigned int n, unsigned int m) 102static __always_inline void transp4(u32 d[], unsigned int n, unsigned int m)
103{ 103{
104 u32 mask = get_mask(n); 104 u32 mask = get_mask(n);
105 105
@@ -126,7 +126,7 @@ static inline void transp4(u32 d[], unsigned int n, unsigned int m)
126 * Transpose operations on 4 32-bit words (reverse order) 126 * Transpose operations on 4 32-bit words (reverse order)
127 */ 127 */
128 128
129static inline void transp4x(u32 d[], unsigned int n, unsigned int m) 129static __always_inline void transp4x(u32 d[], unsigned int n, unsigned int m)
130{ 130{
131 u32 mask = get_mask(n); 131 u32 mask = get_mask(n);
132 132
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
index b0152fef4fc7..bc60e036627a 100644
--- a/drivers/watchdog/bd70528_wdt.c
+++ b/drivers/watchdog/bd70528_wdt.c
@@ -288,3 +288,4 @@ module_platform_driver(bd70528_wdt);
288MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); 288MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
289MODULE_DESCRIPTION("BD70528 watchdog driver"); 289MODULE_DESCRIPTION("BD70528 watchdog driver");
290MODULE_LICENSE("GPL"); 290MODULE_LICENSE("GPL");
291MODULE_ALIAS("platform:bd70528-wdt");
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 9393be584e72..808eeb4779e4 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <linux/timer.h> 28#include <linux/timer.h>
29#include <linux/compat.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/mutex.h> 31#include <linux/mutex.h>
31#include <linux/io.h> 32#include <linux/io.h>
@@ -473,6 +474,11 @@ static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
473 return 0; 474 return 0;
474} 475}
475 476
477static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
478{
479 return cpwd_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
480}
481
476static ssize_t cpwd_write(struct file *file, const char __user *buf, 482static ssize_t cpwd_write(struct file *file, const char __user *buf,
477 size_t count, loff_t *ppos) 483 size_t count, loff_t *ppos)
478{ 484{
@@ -497,7 +503,7 @@ static ssize_t cpwd_read(struct file *file, char __user *buffer,
497static const struct file_operations cpwd_fops = { 503static const struct file_operations cpwd_fops = {
498 .owner = THIS_MODULE, 504 .owner = THIS_MODULE,
499 .unlocked_ioctl = cpwd_ioctl, 505 .unlocked_ioctl = cpwd_ioctl,
500 .compat_ioctl = compat_ptr_ioctl, 506 .compat_ioctl = cpwd_compat_ioctl,
501 .open = cpwd_open, 507 .open = cpwd_open,
502 .write = cpwd_write, 508 .write = cpwd_write,
503 .read = cpwd_read, 509 .read = cpwd_read,
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 7ea5cf54e94a..8ed89f032ebf 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -99,8 +99,14 @@ static int imx_sc_wdt_set_pretimeout(struct watchdog_device *wdog,
99{ 99{
100 struct arm_smccc_res res; 100 struct arm_smccc_res res;
101 101
102 /*
103 * SCU firmware calculates pretimeout based on current time
104 * stamp instead of watchdog timeout stamp, need to convert
105 * the pretimeout to SCU firmware's timeout value.
106 */
102 arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_PRETIME_WDOG, 107 arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_PRETIME_WDOG,
103 pretimeout * 1000, 0, 0, 0, 0, 0, &res); 108 (wdog->timeout - pretimeout) * 1000, 0, 0, 0,
109 0, 0, &res);
104 if (res.a0) 110 if (res.a0)
105 return -EACCES; 111 return -EACCES;
106 112
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index d17c1a6ed723..5a9ca10fbcfa 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -89,8 +89,8 @@ static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
89 89
90 reg = readl(data->reg_base + GXBB_WDT_TCNT_REG); 90 reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
91 91
92 return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) - 92 return ((reg & GXBB_WDT_TCNT_SETUP_MASK) -
93 (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000; 93 (reg >> GXBB_WDT_TCNT_CNT_SHIFT)) / 1000;
94} 94}
95 95
96static const struct watchdog_ops meson_gxbb_wdt_ops = { 96static const struct watchdog_ops meson_gxbb_wdt_ops = {
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 2d3652004e39..1213179f863c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -163,9 +163,17 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
163 163
164 irq = platform_get_irq(pdev, 0); 164 irq = platform_get_irq(pdev, 0);
165 if (irq > 0) { 165 if (irq > 0) {
166 if (devm_request_irq(dev, irq, pm8916_wdt_isr, 0, "pm8916_wdt", 166 err = devm_request_irq(dev, irq, pm8916_wdt_isr, 0,
167 wdt)) 167 "pm8916_wdt", wdt);
168 irq = 0; 168 if (err)
169 return err;
170
171 wdt->wdev.info = &pm8916_wdt_pt_ident;
172 } else {
173 if (irq == -EPROBE_DEFER)
174 return -EPROBE_DEFER;
175
176 wdt->wdev.info = &pm8916_wdt_ident;
169 } 177 }
170 178
171 /* Configure watchdog to hard-reset mode */ 179 /* Configure watchdog to hard-reset mode */
@@ -177,7 +185,6 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
177 return err; 185 return err;
178 } 186 }
179 187
180 wdt->wdev.info = (irq > 0) ? &pm8916_wdt_pt_ident : &pm8916_wdt_ident,
181 wdt->wdev.ops = &pm8916_wdt_ops, 188 wdt->wdev.ops = &pm8916_wdt_ops,
182 wdt->wdev.parent = dev; 189 wdt->wdev.parent = dev;
183 wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT; 190 wdt->wdev.min_timeout = PM8916_WDT_MIN_TIMEOUT;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c3f386b7cc0b..c6dc4dd16cf7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -474,6 +474,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
474 u64 start = async_chunk->start; 474 u64 start = async_chunk->start;
475 u64 end = async_chunk->end; 475 u64 end = async_chunk->end;
476 u64 actual_end; 476 u64 actual_end;
477 u64 i_size;
477 int ret = 0; 478 int ret = 0;
478 struct page **pages = NULL; 479 struct page **pages = NULL;
479 unsigned long nr_pages; 480 unsigned long nr_pages;
@@ -488,7 +489,19 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
488 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, 489 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
489 SZ_16K); 490 SZ_16K);
490 491
491 actual_end = min_t(u64, i_size_read(inode), end + 1); 492 /*
493 * We need to save i_size before now because it could change in between
494 * us evaluating the size and assigning it. This is because we lock and
495 * unlock the page in truncate and fallocate, and then modify the i_size
496 * later on.
497 *
498 * The barriers are to emulate READ_ONCE, remove that once i_size_read
499 * does that for us.
500 */
501 barrier();
502 i_size = i_size_read(inode);
503 barrier();
504 actual_end = min_t(u64, i_size, end + 1);
492again: 505again:
493 will_compress = 0; 506 will_compress = 0;
494 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; 507 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7c145a41decd..23272d9154f3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4195,9 +4195,6 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
4195 u64 transid; 4195 u64 transid;
4196 int ret; 4196 int ret;
4197 4197
4198 btrfs_warn(root->fs_info,
4199 "START_SYNC ioctl is deprecated and will be removed in kernel 5.7");
4200
4201 trans = btrfs_attach_transaction_barrier(root); 4198 trans = btrfs_attach_transaction_barrier(root);
4202 if (IS_ERR(trans)) { 4199 if (IS_ERR(trans)) {
4203 if (PTR_ERR(trans) != -ENOENT) 4200 if (PTR_ERR(trans) != -ENOENT)
@@ -4225,9 +4222,6 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
4225{ 4222{
4226 u64 transid; 4223 u64 transid;
4227 4224
4228 btrfs_warn(fs_info,
4229 "WAIT_SYNC ioctl is deprecated and will be removed in kernel 5.7");
4230
4231 if (argp) { 4225 if (argp) {
4232 if (copy_from_user(&transid, argp, sizeof(transid))) 4226 if (copy_from_user(&transid, argp, sizeof(transid)))
4233 return -EFAULT; 4227 return -EFAULT;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 98dc092a905e..e8a4b0ebe97f 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -893,6 +893,15 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
893 while (ticket->bytes > 0 && ticket->error == 0) { 893 while (ticket->bytes > 0 && ticket->error == 0) {
894 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); 894 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
895 if (ret) { 895 if (ret) {
896 /*
897 * Delete us from the list. After we unlock the space
898 * info, we don't want the async reclaim job to reserve
899 * space for this ticket. If that would happen, then the
900 * ticket's task would not known that space was reserved
901 * despite getting an error, resulting in a space leak
902 * (bytes_may_use counter of our space_info).
903 */
904 list_del_init(&ticket->list);
896 ticket->error = -EINTR; 905 ticket->error = -EINTR;
897 break; 906 break;
898 } 907 }
@@ -945,12 +954,24 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
945 spin_lock(&space_info->lock); 954 spin_lock(&space_info->lock);
946 ret = ticket->error; 955 ret = ticket->error;
947 if (ticket->bytes || ticket->error) { 956 if (ticket->bytes || ticket->error) {
957 /*
958 * Need to delete here for priority tickets. For regular tickets
959 * either the async reclaim job deletes the ticket from the list
960 * or we delete it ourselves at wait_reserve_ticket().
961 */
948 list_del_init(&ticket->list); 962 list_del_init(&ticket->list);
949 if (!ret) 963 if (!ret)
950 ret = -ENOSPC; 964 ret = -ENOSPC;
951 } 965 }
952 spin_unlock(&space_info->lock); 966 spin_unlock(&space_info->lock);
953 ASSERT(list_empty(&ticket->list)); 967 ASSERT(list_empty(&ticket->list));
968 /*
969 * Check that we can't have an error set if the reservation succeeded,
970 * as that would confuse tasks and lead them to error out without
971 * releasing reserved space (if an error happens the expectation is that
972 * space wasn't reserved at all).
973 */
974 ASSERT(!(ticket->bytes == 0 && ticket->error));
954 return ret; 975 return ret;
955} 976}
956 977
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 43e488f5d063..076d5b8014fb 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -686,9 +686,7 @@ static void dev_item_err(const struct extent_buffer *eb, int slot,
686static int check_dev_item(struct extent_buffer *leaf, 686static int check_dev_item(struct extent_buffer *leaf,
687 struct btrfs_key *key, int slot) 687 struct btrfs_key *key, int slot)
688{ 688{
689 struct btrfs_fs_info *fs_info = leaf->fs_info;
690 struct btrfs_dev_item *ditem; 689 struct btrfs_dev_item *ditem;
691 u64 max_devid = max(BTRFS_MAX_DEVS(fs_info), BTRFS_MAX_DEVS_SYS_CHUNK);
692 690
693 if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { 691 if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
694 dev_item_err(leaf, slot, 692 dev_item_err(leaf, slot,
@@ -696,12 +694,6 @@ static int check_dev_item(struct extent_buffer *leaf,
696 key->objectid, BTRFS_DEV_ITEMS_OBJECTID); 694 key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
697 return -EUCLEAN; 695 return -EUCLEAN;
698 } 696 }
699 if (key->offset > max_devid) {
700 dev_item_err(leaf, slot,
701 "invalid devid: has=%llu expect=[0, %llu]",
702 key->offset, max_devid);
703 return -EUCLEAN;
704 }
705 ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); 697 ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
706 if (btrfs_device_id(leaf, ditem) != key->offset) { 698 if (btrfs_device_id(leaf, ditem) != key->offset) {
707 dev_item_err(leaf, slot, 699 dev_item_err(leaf, slot,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index bdfe4493e43a..e04409f85063 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4967,6 +4967,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4967 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4967 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4968 max_stripe_size = SZ_32M; 4968 max_stripe_size = SZ_32M;
4969 max_chunk_size = 2 * max_stripe_size; 4969 max_chunk_size = 2 * max_stripe_size;
4970 devs_max = min_t(int, devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
4970 } else { 4971 } else {
4971 btrfs_err(info, "invalid chunk type 0x%llx requested", 4972 btrfs_err(info, "invalid chunk type 0x%llx requested",
4972 type); 4973 type);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d3b9c9d5c1bd..f5a38910a82b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1058,6 +1058,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
1058 1058
1059 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); 1059 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
1060 1060
1061 /* remove from inode's cap rbtree, and clear auth cap */
1062 rb_erase(&cap->ci_node, &ci->i_caps);
1063 if (ci->i_auth_cap == cap)
1064 ci->i_auth_cap = NULL;
1065
1061 /* remove from session list */ 1066 /* remove from session list */
1062 spin_lock(&session->s_cap_lock); 1067 spin_lock(&session->s_cap_lock);
1063 if (session->s_cap_iterator == cap) { 1068 if (session->s_cap_iterator == cap) {
@@ -1091,11 +1096,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
1091 1096
1092 spin_unlock(&session->s_cap_lock); 1097 spin_unlock(&session->s_cap_lock);
1093 1098
1094 /* remove from inode list */
1095 rb_erase(&cap->ci_node, &ci->i_caps);
1096 if (ci->i_auth_cap == cap)
1097 ci->i_auth_cap = NULL;
1098
1099 if (removed) 1099 if (removed)
1100 ceph_put_cap(mdsc, cap); 1100 ceph_put_cap(mdsc, cap);
1101 1101
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 4ca0b8ff9a72..d17a789fd856 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1553,36 +1553,37 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1553{ 1553{
1554 int valid = 0; 1554 int valid = 0;
1555 struct dentry *parent; 1555 struct dentry *parent;
1556 struct inode *dir; 1556 struct inode *dir, *inode;
1557 1557
1558 if (flags & LOOKUP_RCU) { 1558 if (flags & LOOKUP_RCU) {
1559 parent = READ_ONCE(dentry->d_parent); 1559 parent = READ_ONCE(dentry->d_parent);
1560 dir = d_inode_rcu(parent); 1560 dir = d_inode_rcu(parent);
1561 if (!dir) 1561 if (!dir)
1562 return -ECHILD; 1562 return -ECHILD;
1563 inode = d_inode_rcu(dentry);
1563 } else { 1564 } else {
1564 parent = dget_parent(dentry); 1565 parent = dget_parent(dentry);
1565 dir = d_inode(parent); 1566 dir = d_inode(parent);
1567 inode = d_inode(dentry);
1566 } 1568 }
1567 1569
1568 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, 1570 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
1569 dentry, d_inode(dentry), ceph_dentry(dentry)->offset); 1571 dentry, inode, ceph_dentry(dentry)->offset);
1570 1572
1571 /* always trust cached snapped dentries, snapdir dentry */ 1573 /* always trust cached snapped dentries, snapdir dentry */
1572 if (ceph_snap(dir) != CEPH_NOSNAP) { 1574 if (ceph_snap(dir) != CEPH_NOSNAP) {
1573 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, 1575 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
1574 dentry, d_inode(dentry)); 1576 dentry, inode);
1575 valid = 1; 1577 valid = 1;
1576 } else if (d_really_is_positive(dentry) && 1578 } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1577 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
1578 valid = 1; 1579 valid = 1;
1579 } else { 1580 } else {
1580 valid = dentry_lease_is_valid(dentry, flags); 1581 valid = dentry_lease_is_valid(dentry, flags);
1581 if (valid == -ECHILD) 1582 if (valid == -ECHILD)
1582 return valid; 1583 return valid;
1583 if (valid || dir_lease_is_valid(dir, dentry)) { 1584 if (valid || dir_lease_is_valid(dir, dentry)) {
1584 if (d_really_is_positive(dentry)) 1585 if (inode)
1585 valid = ceph_is_any_caps(d_inode(dentry)); 1586 valid = ceph_is_any_caps(inode);
1586 else 1587 else
1587 valid = 1; 1588 valid = 1;
1588 } 1589 }
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index d277f71abe0b..bd77adb64bfd 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -462,6 +462,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
462 err = ceph_security_init_secctx(dentry, mode, &as_ctx); 462 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
463 if (err < 0) 463 if (err < 0)
464 goto out_ctx; 464 goto out_ctx;
465 } else if (!d_in_lookup(dentry)) {
466 /* If it's not being looked up, it's negative */
467 return -ENOENT;
465 } 468 }
466 469
467 /* do the open */ 470 /* do the open */
@@ -1956,10 +1959,18 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
1956 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM)) 1959 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
1957 return -EOPNOTSUPP; 1960 return -EOPNOTSUPP;
1958 1961
1962 /*
1963 * Striped file layouts require that we copy partial objects, but the
1964 * OSD copy-from operation only supports full-object copies. Limit
1965 * this to non-striped file layouts for now.
1966 */
1959 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) || 1967 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1960 (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) || 1968 (src_ci->i_layout.stripe_count != 1) ||
1961 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) 1969 (dst_ci->i_layout.stripe_count != 1) ||
1970 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
1971 dout("Invalid src/dst files layout\n");
1962 return -EOPNOTSUPP; 1972 return -EOPNOTSUPP;
1973 }
1963 1974
1964 if (len < src_ci->i_layout.object_size) 1975 if (len < src_ci->i_layout.object_size)
1965 return -EOPNOTSUPP; /* no remote copy will be done */ 1976 return -EOPNOTSUPP; /* no remote copy will be done */
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9f135624ae47..c07407586ce8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1434,6 +1434,7 @@ retry_lookup:
1434 dout(" final dn %p\n", dn); 1434 dout(" final dn %p\n", dn);
1435 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1435 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1436 req->r_op == CEPH_MDS_OP_MKSNAP) && 1436 req->r_op == CEPH_MDS_OP_MKSNAP) &&
1437 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1437 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 1438 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1438 struct inode *dir = req->r_parent; 1439 struct inode *dir = req->r_parent;
1439 1440
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index edfd643a8205..b47f43fc2d68 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -268,6 +268,7 @@ static int parse_fsopt_token(char *c, void *private)
268 } 268 }
269 break; 269 break;
270 case Opt_fscache_uniq: 270 case Opt_fscache_uniq:
271#ifdef CONFIG_CEPH_FSCACHE
271 kfree(fsopt->fscache_uniq); 272 kfree(fsopt->fscache_uniq);
272 fsopt->fscache_uniq = kstrndup(argstr[0].from, 273 fsopt->fscache_uniq = kstrndup(argstr[0].from,
273 argstr[0].to-argstr[0].from, 274 argstr[0].to-argstr[0].from,
@@ -276,7 +277,10 @@ static int parse_fsopt_token(char *c, void *private)
276 return -ENOMEM; 277 return -ENOMEM;
277 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE; 278 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
278 break; 279 break;
279 /* misc */ 280#else
281 pr_err("fscache support is disabled\n");
282 return -EINVAL;
283#endif
280 case Opt_wsize: 284 case Opt_wsize:
281 if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE) 285 if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
282 return -EINVAL; 286 return -EINVAL;
@@ -353,10 +357,15 @@ static int parse_fsopt_token(char *c, void *private)
353 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32; 357 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
354 break; 358 break;
355 case Opt_fscache: 359 case Opt_fscache:
360#ifdef CONFIG_CEPH_FSCACHE
356 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE; 361 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
357 kfree(fsopt->fscache_uniq); 362 kfree(fsopt->fscache_uniq);
358 fsopt->fscache_uniq = NULL; 363 fsopt->fscache_uniq = NULL;
359 break; 364 break;
365#else
366 pr_err("fscache support is disabled\n");
367 return -EINVAL;
368#endif
360 case Opt_nofscache: 369 case Opt_nofscache:
361 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; 370 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
362 kfree(fsopt->fscache_uniq); 371 kfree(fsopt->fscache_uniq);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ea735d59c36e..0abfde6d0b05 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -838,6 +838,7 @@ struct create_durable_handle_reconnect_v2 {
838 struct create_context ccontext; 838 struct create_context ccontext;
839 __u8 Name[8]; 839 __u8 Name[8];
840 struct durable_reconnect_context_v2 dcontext; 840 struct durable_reconnect_context_v2 dcontext;
841 __u8 Pad[4];
841} __packed; 842} __packed;
842 843
843/* See MS-SMB2 2.2.13.2.5 */ 844/* See MS-SMB2 2.2.13.2.5 */
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index dc5dbf6a81d7..cb61467478ca 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -101,7 +101,7 @@ static int create_link(struct config_item *parent_item,
101 } 101 }
102 target_sd->s_links++; 102 target_sd->s_links++;
103 spin_unlock(&configfs_dirent_lock); 103 spin_unlock(&configfs_dirent_lock);
104 ret = configfs_get_target_path(item, item, body); 104 ret = configfs_get_target_path(parent_item, item, body);
105 if (!ret) 105 if (!ret)
106 ret = configfs_create_link(target_sd, parent_item->ci_dentry, 106 ret = configfs_create_link(target_sd, parent_item->ci_dentry,
107 dentry, body); 107 dentry, body);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8461a6322039..335607b8c5c0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -576,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
576 spin_unlock(&inode->i_lock); 576 spin_unlock(&inode->i_lock);
577 577
578 /* 578 /*
579 * A dying wb indicates that the memcg-blkcg mapping has changed 579 * A dying wb indicates that either the blkcg associated with the
580 * and a new wb is already serving the memcg. Switch immediately. 580 * memcg changed or the associated memcg is dying. In the first
581 * case, a replacement wb should already be available and we should
582 * refresh the wb immediately. In the second case, trying to
583 * refresh will keep failing.
581 */ 584 */
582 if (unlikely(wb_dying(wbc->wb))) 585 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
583 inode_switch_wbs(inode, wbc->wb_id); 586 inode_switch_wbs(inode, wbc->wb_id);
584} 587}
585EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode); 588EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 53939bf9d7d2..9876db52913a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2098,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2098 return 0; 2098 return 0;
2099} 2099}
2100 2100
2101static int ocfs2_prepare_inode_for_refcount(struct inode *inode, 2101static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2102 struct file *file, 2102 struct buffer_head **di_bh,
2103 loff_t pos, size_t count, 2103 int meta_level,
2104 int *meta_level) 2104 int overwrite_io,
2105 int write_sem,
2106 int wait)
2105{ 2107{
2106 int ret; 2108 int ret = 0;
2107 struct buffer_head *di_bh = NULL;
2108 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2109 u32 clusters =
2110 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2111 2109
2112 ret = ocfs2_inode_lock(inode, &di_bh, 1); 2110 if (wait)
2113 if (ret) { 2111 ret = ocfs2_inode_lock(inode, NULL, meta_level);
2114 mlog_errno(ret); 2112 else
2113 ret = ocfs2_try_inode_lock(inode,
2114 overwrite_io ? NULL : di_bh, meta_level);
2115 if (ret < 0)
2115 goto out; 2116 goto out;
2117
2118 if (wait) {
2119 if (write_sem)
2120 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2121 else
2122 down_read(&OCFS2_I(inode)->ip_alloc_sem);
2123 } else {
2124 if (write_sem)
2125 ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2126 else
2127 ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2128
2129 if (!ret) {
2130 ret = -EAGAIN;
2131 goto out_unlock;
2132 }
2116 } 2133 }
2117 2134
2118 *meta_level = 1; 2135 return ret;
2119 2136
2120 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); 2137out_unlock:
2121 if (ret) 2138 brelse(*di_bh);
2122 mlog_errno(ret); 2139 ocfs2_inode_unlock(inode, meta_level);
2123out: 2140out:
2124 brelse(di_bh);
2125 return ret; 2141 return ret;
2126} 2142}
2127 2143
2144static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2145 struct buffer_head **di_bh,
2146 int meta_level,
2147 int write_sem)
2148{
2149 if (write_sem)
2150 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2151 else
2152 up_read(&OCFS2_I(inode)->ip_alloc_sem);
2153
2154 brelse(*di_bh);
2155 *di_bh = NULL;
2156
2157 if (meta_level >= 0)
2158 ocfs2_inode_unlock(inode, meta_level);
2159}
2160
2128static int ocfs2_prepare_inode_for_write(struct file *file, 2161static int ocfs2_prepare_inode_for_write(struct file *file,
2129 loff_t pos, size_t count, int wait) 2162 loff_t pos, size_t count, int wait)
2130{ 2163{
2131 int ret = 0, meta_level = 0, overwrite_io = 0; 2164 int ret = 0, meta_level = 0, overwrite_io = 0;
2165 int write_sem = 0;
2132 struct dentry *dentry = file->f_path.dentry; 2166 struct dentry *dentry = file->f_path.dentry;
2133 struct inode *inode = d_inode(dentry); 2167 struct inode *inode = d_inode(dentry);
2134 struct buffer_head *di_bh = NULL; 2168 struct buffer_head *di_bh = NULL;
2169 u32 cpos;
2170 u32 clusters;
2135 2171
2136 /* 2172 /*
2137 * We start with a read level meta lock and only jump to an ex 2173 * We start with a read level meta lock and only jump to an ex
2138 * if we need to make modifications here. 2174 * if we need to make modifications here.
2139 */ 2175 */
2140 for(;;) { 2176 for(;;) {
2141 if (wait) 2177 ret = ocfs2_inode_lock_for_extent_tree(inode,
2142 ret = ocfs2_inode_lock(inode, NULL, meta_level); 2178 &di_bh,
2143 else 2179 meta_level,
2144 ret = ocfs2_try_inode_lock(inode, 2180 overwrite_io,
2145 overwrite_io ? NULL : &di_bh, meta_level); 2181 write_sem,
2182 wait);
2146 if (ret < 0) { 2183 if (ret < 0) {
2147 meta_level = -1;
2148 if (ret != -EAGAIN) 2184 if (ret != -EAGAIN)
2149 mlog_errno(ret); 2185 mlog_errno(ret);
2150 goto out; 2186 goto out;
@@ -2156,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
2156 */ 2192 */
2157 if (!wait && !overwrite_io) { 2193 if (!wait && !overwrite_io) {
2158 overwrite_io = 1; 2194 overwrite_io = 1;
2159 if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
2160 ret = -EAGAIN;
2161 goto out_unlock;
2162 }
2163 2195
2164 ret = ocfs2_overwrite_io(inode, di_bh, pos, count); 2196 ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2165 brelse(di_bh);
2166 di_bh = NULL;
2167 up_read(&OCFS2_I(inode)->ip_alloc_sem);
2168 if (ret < 0) { 2197 if (ret < 0) {
2169 if (ret != -EAGAIN) 2198 if (ret != -EAGAIN)
2170 mlog_errno(ret); 2199 mlog_errno(ret);
@@ -2183,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
2183 * set inode->i_size at the end of a write. */ 2212 * set inode->i_size at the end of a write. */
2184 if (should_remove_suid(dentry)) { 2213 if (should_remove_suid(dentry)) {
2185 if (meta_level == 0) { 2214 if (meta_level == 0) {
2186 ocfs2_inode_unlock(inode, meta_level); 2215 ocfs2_inode_unlock_for_extent_tree(inode,
2216 &di_bh,
2217 meta_level,
2218 write_sem);
2187 meta_level = 1; 2219 meta_level = 1;
2188 continue; 2220 continue;
2189 } 2221 }
@@ -2197,18 +2229,32 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
2197 2229
2198 ret = ocfs2_check_range_for_refcount(inode, pos, count); 2230 ret = ocfs2_check_range_for_refcount(inode, pos, count);
2199 if (ret == 1) { 2231 if (ret == 1) {
2200 ocfs2_inode_unlock(inode, meta_level); 2232 ocfs2_inode_unlock_for_extent_tree(inode,
2201 meta_level = -1; 2233 &di_bh,
2202 2234 meta_level,
2203 ret = ocfs2_prepare_inode_for_refcount(inode, 2235 write_sem);
2204 file, 2236 ret = ocfs2_inode_lock_for_extent_tree(inode,
2205 pos, 2237 &di_bh,
2206 count, 2238 meta_level,
2207 &meta_level); 2239 overwrite_io,
2240 1,
2241 wait);
2242 write_sem = 1;
2243 if (ret < 0) {
2244 if (ret != -EAGAIN)
2245 mlog_errno(ret);
2246 goto out;
2247 }
2248
2249 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2250 clusters =
2251 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2252 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2208 } 2253 }
2209 2254
2210 if (ret < 0) { 2255 if (ret < 0) {
2211 mlog_errno(ret); 2256 if (ret != -EAGAIN)
2257 mlog_errno(ret);
2212 goto out_unlock; 2258 goto out_unlock;
2213 } 2259 }
2214 2260
@@ -2219,10 +2265,10 @@ out_unlock:
2219 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, 2265 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2220 pos, count, wait); 2266 pos, count, wait);
2221 2267
2222 brelse(di_bh); 2268 ocfs2_inode_unlock_for_extent_tree(inode,
2223 2269 &di_bh,
2224 if (meta_level >= 0) 2270 meta_level,
2225 ocfs2_inode_unlock(inode, meta_level); 2271 write_sem);
2226 2272
2227out: 2273out:
2228 return ret; 2274 return ret;
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index e94b19782c92..ce4103208619 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -25,13 +25,6 @@ static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
25} 25}
26#endif /* __arch_get_clock_mode */ 26#endif /* __arch_get_clock_mode */
27 27
28#ifndef __arch_use_vsyscall
29static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata)
30{
31 return 1;
32}
33#endif /* __arch_use_vsyscall */
34
35#ifndef __arch_update_vsyscall 28#ifndef __arch_update_vsyscall
36static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, 29static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
37 struct timekeeper *tk) 30 struct timekeeper *tk)
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 01f514521687..7865e6b5d36c 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -44,7 +44,20 @@ struct drm_gem_shmem_object {
44 */ 44 */
45 unsigned int pages_use_count; 45 unsigned int pages_use_count;
46 46
47 /**
48 * @madv: State for madvise
49 *
50 * 0 is active/inuse.
51 * A negative value is the object is purged.
52 * Positive values are driver specific and not used by the helpers.
53 */
47 int madv; 54 int madv;
55
56 /**
57 * @madv_list: List entry for madvise tracking
58 *
59 * Typically used by drivers to track purgeable objects
60 */
48 struct list_head madv_list; 61 struct list_head madv_list;
49 62
50 /** 63 /**
diff --git a/include/drm/drm_self_refresh_helper.h b/include/drm/drm_self_refresh_helper.h
index 5b79d253fb46..520235c20708 100644
--- a/include/drm/drm_self_refresh_helper.h
+++ b/include/drm/drm_self_refresh_helper.h
@@ -13,7 +13,8 @@ struct drm_crtc;
13 13
14void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state); 14void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state);
15void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, 15void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state,
16 unsigned int commit_time_ms); 16 unsigned int commit_time_ms,
17 unsigned int new_self_refresh_mask);
17 18
18int drm_self_refresh_helper_init(struct drm_crtc *crtc); 19int drm_self_refresh_helper_init(struct drm_crtc *crtc);
19void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc); 20void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5b9d22338606..3bf3835d0e86 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map);
656void bpf_map_put(struct bpf_map *map); 656void bpf_map_put(struct bpf_map *map);
657int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 657int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
658void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 658void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
659int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); 659int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
660void bpf_map_charge_finish(struct bpf_map_memory *mem); 660void bpf_map_charge_finish(struct bpf_map_memory *mem);
661void bpf_map_charge_move(struct bpf_map_memory *dst, 661void bpf_map_charge_move(struct bpf_map_memory *dst,
662 struct bpf_map_memory *src); 662 struct bpf_map_memory *src);
663void *bpf_map_area_alloc(size_t size, int numa_node); 663void *bpf_map_area_alloc(u64 size, int numa_node);
664void bpf_map_area_free(void *base); 664void bpf_map_area_free(void *base);
665void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 665void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
666 666
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 4ec8986e5dfb..ac6e946b6767 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -185,7 +185,7 @@ static inline void idr_preload_end(void)
185 * is convenient for a "not found" value. 185 * is convenient for a "not found" value.
186 */ 186 */
187#define idr_for_each_entry(idr, entry, id) \ 187#define idr_for_each_entry(idr, entry, id) \
188 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) 188 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
189 189
190/** 190/**
191 * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. 191 * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cc292273e6ba..a2adf95b3f9c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -695,11 +695,6 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
695 695
696extern void kvfree(const void *addr); 696extern void kvfree(const void *addr);
697 697
698static inline atomic_t *compound_mapcount_ptr(struct page *page)
699{
700 return &page[1].compound_mapcount;
701}
702
703static inline int compound_mapcount(struct page *page) 698static inline int compound_mapcount(struct page *page)
704{ 699{
705 VM_BUG_ON_PAGE(!PageCompound(page), page); 700 VM_BUG_ON_PAGE(!PageCompound(page), page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2222fa795284..270aa8fd2800 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -221,6 +221,11 @@ struct page {
221#endif 221#endif
222} _struct_page_alignment; 222} _struct_page_alignment;
223 223
224static inline atomic_t *compound_mapcount_ptr(struct page *page)
225{
226 return &page[1].compound_mapcount;
227}
228
224/* 229/*
225 * Used for sizing the vmemmap region on some architectures 230 * Used for sizing the vmemmap region on some architectures
226 */ 231 */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f91cb8898ff0..1bf83c8fcaa7 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -622,12 +622,28 @@ static inline int PageTransCompound(struct page *page)
622 * 622 *
623 * Unlike PageTransCompound, this is safe to be called only while 623 * Unlike PageTransCompound, this is safe to be called only while
624 * split_huge_pmd() cannot run from under us, like if protected by the 624 * split_huge_pmd() cannot run from under us, like if protected by the
625 * MMU notifier, otherwise it may result in page->_mapcount < 0 false 625 * MMU notifier, otherwise it may result in page->_mapcount check false
626 * positives. 626 * positives.
627 *
628 * We have to treat page cache THP differently since every subpage of it
629 * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
630 * mapped in the current process so comparing subpage's _mapcount to
631 * compound_mapcount to filter out PTE mapped case.
627 */ 632 */
628static inline int PageTransCompoundMap(struct page *page) 633static inline int PageTransCompoundMap(struct page *page)
629{ 634{
630 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; 635 struct page *head;
636
637 if (!PageTransCompound(page))
638 return 0;
639
640 if (PageAnon(page))
641 return atomic_read(&page->_mapcount) < 0;
642
643 head = compound_head(page);
644 /* File THP is PMD mapped and not PTE mapped */
645 return atomic_read(&page->_mapcount) ==
646 atomic_read(compound_mapcount_ptr(head));
631} 647}
632 648
633/* 649/*
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index b5116013f27e..63e62372443a 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -316,24 +316,6 @@ radix_tree_iter_lookup(const struct radix_tree_root *root,
316} 316}
317 317
318/** 318/**
319 * radix_tree_iter_find - find a present entry
320 * @root: radix tree root
321 * @iter: iterator state
322 * @index: start location
323 *
324 * This function returns the slot containing the entry with the lowest index
325 * which is at least @index. If @index is larger than any present entry, this
326 * function returns NULL. The @iter is updated to describe the entry found.
327 */
328static inline void __rcu **
329radix_tree_iter_find(const struct radix_tree_root *root,
330 struct radix_tree_iter *iter, unsigned long index)
331{
332 radix_tree_iter_init(iter, index);
333 return radix_tree_next_chunk(root, iter, 0);
334}
335
336/**
337 * radix_tree_iter_retry - retry this chunk of the iteration 319 * radix_tree_iter_retry - retry this chunk of the iteration
338 * @iter: iterator state 320 * @iter: iterator state
339 * 321 *
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 9326d671b6e6..eaae6b4e9f24 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -7,7 +7,7 @@
7struct reset_controller_dev; 7struct reset_controller_dev;
8 8
9/** 9/**
10 * struct reset_control_ops 10 * struct reset_control_ops - reset controller driver callbacks
11 * 11 *
12 * @reset: for self-deasserting resets, does all necessary 12 * @reset: for self-deasserting resets, does all necessary
13 * things to reset the device 13 * things to reset the device
@@ -33,7 +33,7 @@ struct of_phandle_args;
33 * @provider: name of the reset controller device controlling this reset line 33 * @provider: name of the reset controller device controlling this reset line
34 * @index: ID of the reset controller in the reset controller device 34 * @index: ID of the reset controller in the reset controller device
35 * @dev_id: name of the device associated with this reset line 35 * @dev_id: name of the device associated with this reset line
36 * @con_id name of the reset line (can be NULL) 36 * @con_id: name of the reset line (can be NULL)
37 */ 37 */
38struct reset_control_lookup { 38struct reset_control_lookup {
39 struct list_head list; 39 struct list_head list;
diff --git a/include/linux/reset.h b/include/linux/reset.h
index e7793fc0fa93..eb597e8aa430 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -143,7 +143,7 @@ static inline int device_reset_optional(struct device *dev)
143 * If this function is called more than once for the same reset_control it will 143 * If this function is called more than once for the same reset_control it will
144 * return -EBUSY. 144 * return -EBUSY.
145 * 145 *
146 * See reset_control_get_shared for details on shared references to 146 * See reset_control_get_shared() for details on shared references to
147 * reset-controls. 147 * reset-controls.
148 * 148 *
149 * Use of id names is optional. 149 * Use of id names is optional.
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index e4b3fb4bb77c..ce7055259877 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -139,6 +139,11 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
139 } 139 }
140} 140}
141 141
142static inline u32 sk_msg_iter_dist(u32 start, u32 end)
143{
144 return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
145}
146
142#define sk_msg_iter_var_prev(var) \ 147#define sk_msg_iter_var_prev(var) \
143 do { \ 148 do { \
144 if (var == 0) \ 149 if (var == 0) \
@@ -198,9 +203,7 @@ static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
198 if (sk_msg_full(msg)) 203 if (sk_msg_full(msg))
199 return MAX_MSG_FRAGS; 204 return MAX_MSG_FRAGS;
200 205
201 return msg->sg.end >= msg->sg.start ? 206 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
202 msg->sg.end - msg->sg.start :
203 msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
204} 207}
205 208
206static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 209static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 1afc125014da..3d56b026bb9e 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -159,7 +159,6 @@ struct slave {
159 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; 159 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
160 s8 link; /* one of BOND_LINK_XXXX */ 160 s8 link; /* one of BOND_LINK_XXXX */
161 s8 link_new_state; /* one of BOND_LINK_XXXX */ 161 s8 link_new_state; /* one of BOND_LINK_XXXX */
162 s8 new_link;
163 u8 backup:1, /* indicates backup slave. Value corresponds with 162 u8 backup:1, /* indicates backup slave. Value corresponds with
164 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ 163 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
165 inactive:1, /* indicates inactive slave */ 164 inactive:1, /* indicates inactive slave */
@@ -549,7 +548,7 @@ static inline void bond_propose_link_state(struct slave *slave, int state)
549 548
550static inline void bond_commit_link_state(struct slave *slave, bool notify) 549static inline void bond_commit_link_state(struct slave *slave, bool notify)
551{ 550{
552 if (slave->link == slave->link_new_state) 551 if (slave->link_new_state == BOND_LINK_NOCHANGE)
553 return; 552 return;
554 553
555 slave->link = slave->link_new_state; 554 slave->link = slave->link_new_state;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 107c0d700ed6..38a9a3d1222b 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -313,7 +313,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
313 fq->limit = 8192; 313 fq->limit = 8192;
314 fq->memory_limit = 16 << 20; /* 16 MBytes */ 314 fq->memory_limit = 16 << 20; /* 16 MBytes */
315 315
316 fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); 316 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
317 if (!fq->flows) 317 if (!fq->flows)
318 return -ENOMEM; 318 return -ENOMEM;
319 319
@@ -331,7 +331,7 @@ static void fq_reset(struct fq *fq,
331 for (i = 0; i < fq->flows_cnt; i++) 331 for (i = 0; i < fq->flows_cnt; i++)
332 fq_flow_reset(fq, &fq->flows[i], free_func); 332 fq_flow_reset(fq, &fq->flows[i], free_func);
333 333
334 kfree(fq->flows); 334 kvfree(fq->flows);
335 fq->flows = NULL; 335 fq->flows = NULL;
336} 336}
337 337
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 50a67bd6a434..b8452cc0e059 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -439,8 +439,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
439{ 439{
440 unsigned long now = jiffies; 440 unsigned long now = jiffies;
441 441
442 if (neigh->used != now) 442 if (READ_ONCE(neigh->used) != now)
443 neigh->used = now; 443 WRITE_ONCE(neigh->used, now);
444 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) 444 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
445 return __neigh_event_send(neigh, skb); 445 return __neigh_event_send(neigh, skb);
446 return 0; 446 return 0;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 001d294edf57..2d0275f13bbf 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -820,7 +820,8 @@ struct nft_expr_ops {
820 */ 820 */
821struct nft_expr { 821struct nft_expr {
822 const struct nft_expr_ops *ops; 822 const struct nft_expr_ops *ops;
823 unsigned char data[]; 823 unsigned char data[]
824 __attribute__((aligned(__alignof__(u64))));
824}; 825};
825 826
826static inline void *nft_expr_priv(const struct nft_expr *expr) 827static inline void *nft_expr_priv(const struct nft_expr *expr)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 637548d54b3e..d80acda231ae 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -15,6 +15,7 @@
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <linux/rwsem.h> 16#include <linux/rwsem.h>
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18#include <linux/hashtable.h>
18#include <net/gen_stats.h> 19#include <net/gen_stats.h>
19#include <net/rtnetlink.h> 20#include <net/rtnetlink.h>
20#include <net/flow_offload.h> 21#include <net/flow_offload.h>
@@ -362,6 +363,7 @@ struct tcf_proto {
362 bool deleting; 363 bool deleting;
363 refcount_t refcnt; 364 refcount_t refcnt;
364 struct rcu_head rcu; 365 struct rcu_head rcu;
366 struct hlist_node destroy_ht_node;
365}; 367};
366 368
367struct qdisc_skb_cb { 369struct qdisc_skb_cb {
@@ -414,6 +416,8 @@ struct tcf_block {
414 struct list_head filter_chain_list; 416 struct list_head filter_chain_list;
415 } chain0; 417 } chain0;
416 struct rcu_head rcu; 418 struct rcu_head rcu;
419 DECLARE_HASHTABLE(proto_destroy_ht, 7);
420 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
417}; 421};
418 422
419#ifdef CONFIG_PROVE_LOCKING 423#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/net/sock.h b/include/net/sock.h
index 8f9adcfac41b..718e62fbe869 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2342,7 +2342,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
2342 2342
2343 return kt; 2343 return kt;
2344#else 2344#else
2345 return sk->sk_stamp; 2345 return READ_ONCE(sk->sk_stamp);
2346#endif 2346#endif
2347} 2347}
2348 2348
@@ -2353,7 +2353,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2353 sk->sk_stamp = kt; 2353 sk->sk_stamp = kt;
2354 write_sequnlock(&sk->sk_stamp_seq); 2354 write_sequnlock(&sk->sk_stamp_seq);
2355#else 2355#else
2356 sk->sk_stamp = kt; 2356 WRITE_ONCE(sk->sk_stamp, kt);
2357#endif 2357#endif
2358} 2358}
2359 2359
diff --git a/include/net/tls.h b/include/net/tls.h
index c664e6dba0d1..794e297483ea 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -40,6 +40,7 @@
40#include <linux/socket.h> 40#include <linux/socket.h>
41#include <linux/tcp.h> 41#include <linux/tcp.h>
42#include <linux/skmsg.h> 42#include <linux/skmsg.h>
43#include <linux/mutex.h>
43#include <linux/netdevice.h> 44#include <linux/netdevice.h>
44#include <linux/rcupdate.h> 45#include <linux/rcupdate.h>
45 46
@@ -269,6 +270,10 @@ struct tls_context {
269 270
270 bool in_tcp_sendpages; 271 bool in_tcp_sendpages;
271 bool pending_open_record_frags; 272 bool pending_open_record_frags;
273
274 struct mutex tx_lock; /* protects partially_sent_* fields and
275 * per-type TX fields
276 */
272 unsigned long flags; 277 unsigned long flags;
273 278
274 /* cache cold stuff */ 279 /* cache cold stuff */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 1e988fdeba34..6a6d2c7655ff 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can.h 3 * linux/can.h
4 * 4 *
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 0fb328d93148..dd2b925b09ac 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/bcm.h 3 * linux/can/bcm.h
4 * 4 *
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index bfc4b5d22a5e..34633283de64 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/error.h 3 * linux/can/error.h
4 * 4 *
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3aea5388c8e4..c2190bbe21d8 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/gw.h 3 * linux/can/gw.h
4 * 4 *
diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h
index c32325342d30..df6e821075c1 100644
--- a/include/uapi/linux/can/j1939.h
+++ b/include/uapi/linux/can/j1939.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * j1939.h 3 * j1939.h
4 * 4 *
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 1bc70d3a4d39..6f598b73839e 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * linux/can/netlink.h 3 * linux/can/netlink.h
4 * 4 *
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index be3b36e7ff61..6a11d308eb5c 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/raw.h 3 * linux/can/raw.h
4 * 4 *
diff --git a/include/uapi/linux/can/vxcan.h b/include/uapi/linux/can/vxcan.h
index 066812d118a2..4fa9d8777a07 100644
--- a/include/uapi/linux/can/vxcan.h
+++ b/include/uapi/linux/can/vxcan.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2#ifndef _UAPI_CAN_VXCAN_H 2#ifndef _UAPI_CAN_VXCAN_H
3#define _UAPI_CAN_VXCAN_H 3#define _UAPI_CAN_VXCAN_H
4 4
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index e168dc59e9a0..d99b5a772698 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -63,6 +63,7 @@ struct nvme_passthru_cmd64 {
63 __u32 cdw14; 63 __u32 cdw14;
64 __u32 cdw15; 64 __u32 cdw15;
65 __u32 timeout_ms; 65 __u32 timeout_ms;
66 __u32 rsvd2;
66 __u64 result; 67 __u64 result;
67}; 68};
68 69
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 99335e1f4a27..25b4fa00bad1 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -51,6 +51,10 @@
51 * sent when the child exits. 51 * sent when the child exits.
52 * @stack: Specify the location of the stack for the 52 * @stack: Specify the location of the stack for the
53 * child process. 53 * child process.
54 * Note, @stack is expected to point to the
55 * lowest address. The stack direction will be
56 * determined by the kernel and set up
57 * appropriately based on @stack_size.
54 * @stack_size: The size of the stack for the child process. 58 * @stack_size: The size of the stack for the child process.
55 * @tls: If CLONE_SETTLS is set, the tls descriptor 59 * @tls: If CLONE_SETTLS is set, the tls descriptor
56 * is set to tls. 60 * is set to tls.
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index ddd8addcdb5c..a3eaf08e7dd3 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1311,12 +1311,12 @@ static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1311 return false; 1311 return false;
1312 1312
1313 switch (off) { 1313 switch (off) {
1314 case offsetof(struct bpf_sysctl, write): 1314 case bpf_ctx_range(struct bpf_sysctl, write):
1315 if (type != BPF_READ) 1315 if (type != BPF_READ)
1316 return false; 1316 return false;
1317 bpf_ctx_record_field_size(info, size_default); 1317 bpf_ctx_record_field_size(info, size_default);
1318 return bpf_ctx_narrow_access_ok(off, size, size_default); 1318 return bpf_ctx_narrow_access_ok(off, size, size_default);
1319 case offsetof(struct bpf_sysctl, file_pos): 1319 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1320 if (type == BPF_READ) { 1320 if (type == BPF_READ) {
1321 bpf_ctx_record_field_size(info, size_default); 1321 bpf_ctx_record_field_size(info, size_default);
1322 return bpf_ctx_narrow_access_ok(off, size, size_default); 1322 return bpf_ctx_narrow_access_ok(off, size, size_default);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0937719b87e2..ace1cfaa24b6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
126 return map; 126 return map;
127} 127}
128 128
129void *bpf_map_area_alloc(size_t size, int numa_node) 129void *bpf_map_area_alloc(u64 size, int numa_node)
130{ 130{
131 /* We really just want to fail instead of triggering OOM killer 131 /* We really just want to fail instead of triggering OOM killer
132 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 132 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node)
141 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 141 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
142 void *area; 142 void *area;
143 143
144 if (size >= SIZE_MAX)
145 return NULL;
146
144 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 147 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
145 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, 148 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
146 numa_node); 149 numa_node);
@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
197 atomic_long_sub(pages, &user->locked_vm); 200 atomic_long_sub(pages, &user->locked_vm);
198} 201}
199 202
200int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) 203int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
201{ 204{
202 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 205 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
203 struct user_struct *user; 206 struct user_struct *user;
diff --git a/kernel/fork.c b/kernel/fork.c
index bcdf53125210..55af6931c6ec 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2561,7 +2561,35 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2561 return 0; 2561 return 0;
2562} 2562}
2563 2563
2564static bool clone3_args_valid(const struct kernel_clone_args *kargs) 2564/**
2565 * clone3_stack_valid - check and prepare stack
2566 * @kargs: kernel clone args
2567 *
2568 * Verify that the stack arguments userspace gave us are sane.
2569 * In addition, set the stack direction for userspace since it's easy for us to
2570 * determine.
2571 */
2572static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
2573{
2574 if (kargs->stack == 0) {
2575 if (kargs->stack_size > 0)
2576 return false;
2577 } else {
2578 if (kargs->stack_size == 0)
2579 return false;
2580
2581 if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
2582 return false;
2583
2584#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
2585 kargs->stack += kargs->stack_size;
2586#endif
2587 }
2588
2589 return true;
2590}
2591
2592static bool clone3_args_valid(struct kernel_clone_args *kargs)
2565{ 2593{
2566 /* 2594 /*
2567 * All lower bits of the flag word are taken. 2595 * All lower bits of the flag word are taken.
@@ -2581,6 +2609,9 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
2581 kargs->exit_signal) 2609 kargs->exit_signal)
2582 return false; 2610 return false;
2583 2611
2612 if (!clone3_stack_valid(kargs))
2613 return false;
2614
2584 return true; 2615 return true;
2585} 2616}
2586 2617
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 132672b74e4b..dd822fd8a7d5 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
51 * @type: Type of irqchip_fwnode. See linux/irqdomain.h 51 * @type: Type of irqchip_fwnode. See linux/irqdomain.h
52 * @name: Optional user provided domain name 52 * @name: Optional user provided domain name
53 * @id: Optional user provided id if name != NULL 53 * @id: Optional user provided id if name != NULL
54 * @data: Optional user-provided data 54 * @pa: Optional user-provided physical address
55 * 55 *
56 * Allocate a struct irqchip_fwid, and return a poiner to the embedded 56 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
57 * fwnode_handle (or NULL on failure). 57 * fwnode_handle (or NULL on failure).
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index dd05a378631a..0f2eb3629070 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1073,6 +1073,7 @@ uclamp_update_active(struct task_struct *p, enum uclamp_id clamp_id)
1073 task_rq_unlock(rq, p, &rf); 1073 task_rq_unlock(rq, p, &rf);
1074} 1074}
1075 1075
1076#ifdef CONFIG_UCLAMP_TASK_GROUP
1076static inline void 1077static inline void
1077uclamp_update_active_tasks(struct cgroup_subsys_state *css, 1078uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1078 unsigned int clamps) 1079 unsigned int clamps)
@@ -1091,7 +1092,6 @@ uclamp_update_active_tasks(struct cgroup_subsys_state *css,
1091 css_task_iter_end(&it); 1092 css_task_iter_end(&it);
1092} 1093}
1093 1094
1094#ifdef CONFIG_UCLAMP_TASK_GROUP
1095static void cpu_util_update_eff(struct cgroup_subsys_state *css); 1095static void cpu_util_update_eff(struct cgroup_subsys_state *css);
1096static void uclamp_update_root_tg(void) 1096static void uclamp_update_root_tg(void)
1097{ 1097{
@@ -3929,13 +3929,22 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3929 } 3929 }
3930 3930
3931restart: 3931restart:
3932#ifdef CONFIG_SMP
3932 /* 3933 /*
3933 * Ensure that we put DL/RT tasks before the pick loop, such that they 3934 * We must do the balancing pass before put_next_task(), such
3934 * can PULL higher prio tasks when we lower the RQ 'priority'. 3935 * that when we release the rq->lock the task is in the same
3936 * state as before we took rq->lock.
3937 *
3938 * We can terminate the balance pass as soon as we know there is
3939 * a runnable task of @class priority or higher.
3935 */ 3940 */
3936 prev->sched_class->put_prev_task(rq, prev, rf); 3941 for_class_range(class, prev->sched_class, &idle_sched_class) {
3937 if (!rq->nr_running) 3942 if (class->balance(rq, prev, rf))
3938 newidle_balance(rq, rf); 3943 break;
3944 }
3945#endif
3946
3947 put_prev_task(rq, prev);
3939 3948
3940 for_each_class(class) { 3949 for_each_class(class) {
3941 p = class->pick_next_task(rq, NULL, NULL); 3950 p = class->pick_next_task(rq, NULL, NULL);
@@ -6201,7 +6210,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
6201 for_each_class(class) { 6210 for_each_class(class) {
6202 next = class->pick_next_task(rq, NULL, NULL); 6211 next = class->pick_next_task(rq, NULL, NULL);
6203 if (next) { 6212 if (next) {
6204 next->sched_class->put_prev_task(rq, next, NULL); 6213 next->sched_class->put_prev_task(rq, next);
6205 return next; 6214 return next;
6206 } 6215 }
6207 } 6216 }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2dc48720f189..a8a08030a8f7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1691,6 +1691,22 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1691 resched_curr(rq); 1691 resched_curr(rq);
1692} 1692}
1693 1693
1694static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695{
1696 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697 /*
1698 * This is OK, because current is on_cpu, which avoids it being
1699 * picked for load-balance and preemption/IRQs are still
1700 * disabled avoiding further scheduler activity on it and we've
1701 * not yet started the picking loop.
1702 */
1703 rq_unpin_lock(rq, rf);
1704 pull_dl_task(rq);
1705 rq_repin_lock(rq, rf);
1706 }
1707
1708 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709}
1694#endif /* CONFIG_SMP */ 1710#endif /* CONFIG_SMP */
1695 1711
1696/* 1712/*
@@ -1758,45 +1774,28 @@ static struct task_struct *
1758pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1774pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1759{ 1775{
1760 struct sched_dl_entity *dl_se; 1776 struct sched_dl_entity *dl_se;
1777 struct dl_rq *dl_rq = &rq->dl;
1761 struct task_struct *p; 1778 struct task_struct *p;
1762 struct dl_rq *dl_rq;
1763 1779
1764 WARN_ON_ONCE(prev || rf); 1780 WARN_ON_ONCE(prev || rf);
1765 1781
1766 dl_rq = &rq->dl; 1782 if (!sched_dl_runnable(rq))
1767
1768 if (unlikely(!dl_rq->dl_nr_running))
1769 return NULL; 1783 return NULL;
1770 1784
1771 dl_se = pick_next_dl_entity(rq, dl_rq); 1785 dl_se = pick_next_dl_entity(rq, dl_rq);
1772 BUG_ON(!dl_se); 1786 BUG_ON(!dl_se);
1773
1774 p = dl_task_of(dl_se); 1787 p = dl_task_of(dl_se);
1775
1776 set_next_task_dl(rq, p); 1788 set_next_task_dl(rq, p);
1777
1778 return p; 1789 return p;
1779} 1790}
1780 1791
1781static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1792static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1782{ 1793{
1783 update_curr_dl(rq); 1794 update_curr_dl(rq);
1784 1795
1785 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1796 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1786 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1797 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1787 enqueue_pushable_dl_task(rq, p); 1798 enqueue_pushable_dl_task(rq, p);
1788
1789 if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1790 /*
1791 * This is OK, because current is on_cpu, which avoids it being
1792 * picked for load-balance and preemption/IRQs are still
1793 * disabled avoiding further scheduler activity on it and we've
1794 * not yet started the picking loop.
1795 */
1796 rq_unpin_lock(rq, rf);
1797 pull_dl_task(rq);
1798 rq_repin_lock(rq, rf);
1799 }
1800} 1799}
1801 1800
1802/* 1801/*
@@ -2442,6 +2441,7 @@ const struct sched_class dl_sched_class = {
2442 .set_next_task = set_next_task_dl, 2441 .set_next_task = set_next_task_dl,
2443 2442
2444#ifdef CONFIG_SMP 2443#ifdef CONFIG_SMP
2444 .balance = balance_dl,
2445 .select_task_rq = select_task_rq_dl, 2445 .select_task_rq = select_task_rq_dl,
2446 .migrate_task_rq = migrate_task_rq_dl, 2446 .migrate_task_rq = migrate_task_rq_dl,
2447 .set_cpus_allowed = set_cpus_allowed_dl, 2447 .set_cpus_allowed = set_cpus_allowed_dl,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 682a754ea3e1..22a2fed29054 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6570,6 +6570,15 @@ static void task_dead_fair(struct task_struct *p)
6570{ 6570{
6571 remove_entity_load_avg(&p->se); 6571 remove_entity_load_avg(&p->se);
6572} 6572}
6573
6574static int
6575balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6576{
6577 if (rq->nr_running)
6578 return 1;
6579
6580 return newidle_balance(rq, rf) != 0;
6581}
6573#endif /* CONFIG_SMP */ 6582#endif /* CONFIG_SMP */
6574 6583
6575static unsigned long wakeup_gran(struct sched_entity *se) 6584static unsigned long wakeup_gran(struct sched_entity *se)
@@ -6746,7 +6755,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
6746 int new_tasks; 6755 int new_tasks;
6747 6756
6748again: 6757again:
6749 if (!cfs_rq->nr_running) 6758 if (!sched_fair_runnable(rq))
6750 goto idle; 6759 goto idle;
6751 6760
6752#ifdef CONFIG_FAIR_GROUP_SCHED 6761#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6884,7 +6893,7 @@ idle:
6884/* 6893/*
6885 * Account for a descheduled task: 6894 * Account for a descheduled task:
6886 */ 6895 */
6887static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6896static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6888{ 6897{
6889 struct sched_entity *se = &prev->se; 6898 struct sched_entity *se = &prev->se;
6890 struct cfs_rq *cfs_rq; 6899 struct cfs_rq *cfs_rq;
@@ -10414,11 +10423,11 @@ const struct sched_class fair_sched_class = {
10414 .check_preempt_curr = check_preempt_wakeup, 10423 .check_preempt_curr = check_preempt_wakeup,
10415 10424
10416 .pick_next_task = pick_next_task_fair, 10425 .pick_next_task = pick_next_task_fair,
10417
10418 .put_prev_task = put_prev_task_fair, 10426 .put_prev_task = put_prev_task_fair,
10419 .set_next_task = set_next_task_fair, 10427 .set_next_task = set_next_task_fair,
10420 10428
10421#ifdef CONFIG_SMP 10429#ifdef CONFIG_SMP
10430 .balance = balance_fair,
10422 .select_task_rq = select_task_rq_fair, 10431 .select_task_rq = select_task_rq_fair,
10423 .migrate_task_rq = migrate_task_rq_fair, 10432 .migrate_task_rq = migrate_task_rq_fair,
10424 10433
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8dad5aa600ea..f65ef1e2f204 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -365,6 +365,12 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
365{ 365{
366 return task_cpu(p); /* IDLE tasks as never migrated */ 366 return task_cpu(p); /* IDLE tasks as never migrated */
367} 367}
368
369static int
370balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
371{
372 return WARN_ON_ONCE(1);
373}
368#endif 374#endif
369 375
370/* 376/*
@@ -375,7 +381,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
375 resched_curr(rq); 381 resched_curr(rq);
376} 382}
377 383
378static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 384static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
379{ 385{
380} 386}
381 387
@@ -460,6 +466,7 @@ const struct sched_class idle_sched_class = {
460 .set_next_task = set_next_task_idle, 466 .set_next_task = set_next_task_idle,
461 467
462#ifdef CONFIG_SMP 468#ifdef CONFIG_SMP
469 .balance = balance_idle,
463 .select_task_rq = select_task_rq_idle, 470 .select_task_rq = select_task_rq_idle,
464 .set_cpus_allowed = set_cpus_allowed_common, 471 .set_cpus_allowed = set_cpus_allowed_common,
465#endif 472#endif
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ebaa4e619684..9b8adc01be3d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1469,6 +1469,22 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1469 resched_curr(rq); 1469 resched_curr(rq);
1470} 1470}
1471 1471
1472static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1473{
1474 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1475 /*
1476 * This is OK, because current is on_cpu, which avoids it being
1477 * picked for load-balance and preemption/IRQs are still
1478 * disabled avoiding further scheduler activity on it and we've
1479 * not yet started the picking loop.
1480 */
1481 rq_unpin_lock(rq, rf);
1482 pull_rt_task(rq);
1483 rq_repin_lock(rq, rf);
1484 }
1485
1486 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1487}
1472#endif /* CONFIG_SMP */ 1488#endif /* CONFIG_SMP */
1473 1489
1474/* 1490/*
@@ -1552,21 +1568,18 @@ static struct task_struct *
1552pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1568pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1553{ 1569{
1554 struct task_struct *p; 1570 struct task_struct *p;
1555 struct rt_rq *rt_rq = &rq->rt;
1556 1571
1557 WARN_ON_ONCE(prev || rf); 1572 WARN_ON_ONCE(prev || rf);
1558 1573
1559 if (!rt_rq->rt_queued) 1574 if (!sched_rt_runnable(rq))
1560 return NULL; 1575 return NULL;
1561 1576
1562 p = _pick_next_task_rt(rq); 1577 p = _pick_next_task_rt(rq);
1563
1564 set_next_task_rt(rq, p); 1578 set_next_task_rt(rq, p);
1565
1566 return p; 1579 return p;
1567} 1580}
1568 1581
1569static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1582static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1570{ 1583{
1571 update_curr_rt(rq); 1584 update_curr_rt(rq);
1572 1585
@@ -1578,18 +1591,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_fla
1578 */ 1591 */
1579 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1592 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1580 enqueue_pushable_task(rq, p); 1593 enqueue_pushable_task(rq, p);
1581
1582 if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1583 /*
1584 * This is OK, because current is on_cpu, which avoids it being
1585 * picked for load-balance and preemption/IRQs are still
1586 * disabled avoiding further scheduler activity on it and we've
1587 * not yet started the picking loop.
1588 */
1589 rq_unpin_lock(rq, rf);
1590 pull_rt_task(rq);
1591 rq_repin_lock(rq, rf);
1592 }
1593} 1594}
1594 1595
1595#ifdef CONFIG_SMP 1596#ifdef CONFIG_SMP
@@ -2366,8 +2367,8 @@ const struct sched_class rt_sched_class = {
2366 .set_next_task = set_next_task_rt, 2367 .set_next_task = set_next_task_rt,
2367 2368
2368#ifdef CONFIG_SMP 2369#ifdef CONFIG_SMP
2370 .balance = balance_rt,
2369 .select_task_rq = select_task_rq_rt, 2371 .select_task_rq = select_task_rq_rt,
2370
2371 .set_cpus_allowed = set_cpus_allowed_common, 2372 .set_cpus_allowed = set_cpus_allowed_common,
2372 .rq_online = rq_online_rt, 2373 .rq_online = rq_online_rt,
2373 .rq_offline = rq_offline_rt, 2374 .rq_offline = rq_offline_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0db2c1b3361e..c8870c5bd7df 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1727,10 +1727,11 @@ struct sched_class {
1727 struct task_struct * (*pick_next_task)(struct rq *rq, 1727 struct task_struct * (*pick_next_task)(struct rq *rq,
1728 struct task_struct *prev, 1728 struct task_struct *prev,
1729 struct rq_flags *rf); 1729 struct rq_flags *rf);
1730 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf); 1730 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1731 void (*set_next_task)(struct rq *rq, struct task_struct *p); 1731 void (*set_next_task)(struct rq *rq, struct task_struct *p);
1732 1732
1733#ifdef CONFIG_SMP 1733#ifdef CONFIG_SMP
1734 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1734 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1735 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1735 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1736 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1736 1737
@@ -1773,7 +1774,7 @@ struct sched_class {
1773static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1774static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1774{ 1775{
1775 WARN_ON_ONCE(rq->curr != prev); 1776 WARN_ON_ONCE(rq->curr != prev);
1776 prev->sched_class->put_prev_task(rq, prev, NULL); 1777 prev->sched_class->put_prev_task(rq, prev);
1777} 1778}
1778 1779
1779static inline void set_next_task(struct rq *rq, struct task_struct *next) 1780static inline void set_next_task(struct rq *rq, struct task_struct *next)
@@ -1787,8 +1788,12 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
1787#else 1788#else
1788#define sched_class_highest (&dl_sched_class) 1789#define sched_class_highest (&dl_sched_class)
1789#endif 1790#endif
1791
1792#define for_class_range(class, _from, _to) \
1793 for (class = (_from); class != (_to); class = class->next)
1794
1790#define for_each_class(class) \ 1795#define for_each_class(class) \
1791 for (class = sched_class_highest; class; class = class->next) 1796 for_class_range(class, sched_class_highest, NULL)
1792 1797
1793extern const struct sched_class stop_sched_class; 1798extern const struct sched_class stop_sched_class;
1794extern const struct sched_class dl_sched_class; 1799extern const struct sched_class dl_sched_class;
@@ -1796,6 +1801,25 @@ extern const struct sched_class rt_sched_class;
1796extern const struct sched_class fair_sched_class; 1801extern const struct sched_class fair_sched_class;
1797extern const struct sched_class idle_sched_class; 1802extern const struct sched_class idle_sched_class;
1798 1803
1804static inline bool sched_stop_runnable(struct rq *rq)
1805{
1806 return rq->stop && task_on_rq_queued(rq->stop);
1807}
1808
1809static inline bool sched_dl_runnable(struct rq *rq)
1810{
1811 return rq->dl.dl_nr_running > 0;
1812}
1813
1814static inline bool sched_rt_runnable(struct rq *rq)
1815{
1816 return rq->rt.rt_queued > 0;
1817}
1818
1819static inline bool sched_fair_runnable(struct rq *rq)
1820{
1821 return rq->cfs.nr_running > 0;
1822}
1799 1823
1800#ifdef CONFIG_SMP 1824#ifdef CONFIG_SMP
1801 1825
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 7e1cee4e65b2..c0640739e05e 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -15,6 +15,12 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
15{ 15{
16 return task_cpu(p); /* stop tasks as never migrate */ 16 return task_cpu(p); /* stop tasks as never migrate */
17} 17}
18
19static int
20balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
21{
22 return sched_stop_runnable(rq);
23}
18#endif /* CONFIG_SMP */ 24#endif /* CONFIG_SMP */
19 25
20static void 26static void
@@ -31,16 +37,13 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
31static struct task_struct * 37static struct task_struct *
32pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 38pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
33{ 39{
34 struct task_struct *stop = rq->stop;
35
36 WARN_ON_ONCE(prev || rf); 40 WARN_ON_ONCE(prev || rf);
37 41
38 if (!stop || !task_on_rq_queued(stop)) 42 if (!sched_stop_runnable(rq))
39 return NULL; 43 return NULL;
40 44
41 set_next_task_stop(rq, stop); 45 set_next_task_stop(rq, rq->stop);
42 46 return rq->stop;
43 return stop;
44} 47}
45 48
46static void 49static void
@@ -60,7 +63,7 @@ static void yield_task_stop(struct rq *rq)
60 BUG(); /* the stop task should never yield, its pointless. */ 63 BUG(); /* the stop task should never yield, its pointless. */
61} 64}
62 65
63static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 66static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
64{ 67{
65 struct task_struct *curr = rq->curr; 68 struct task_struct *curr = rq->curr;
66 u64 delta_exec; 69 u64 delta_exec;
@@ -129,6 +132,7 @@ const struct sched_class stop_sched_class = {
129 .set_next_task = set_next_task_stop, 132 .set_next_task = set_next_task_stop,
130 133
131#ifdef CONFIG_SMP 134#ifdef CONFIG_SMP
135 .balance = balance_stop,
132 .select_task_rq = select_task_rq_stop, 136 .select_task_rq = select_task_rq_stop,
133 .set_cpus_allowed = set_cpus_allowed_common, 137 .set_cpus_allowed = set_cpus_allowed_common,
134#endif 138#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index c4da1ef56fdf..bcd46f547db3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2205,8 +2205,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
2205 */ 2205 */
2206 preempt_disable(); 2206 preempt_disable();
2207 read_unlock(&tasklist_lock); 2207 read_unlock(&tasklist_lock);
2208 preempt_enable_no_resched();
2209 cgroup_enter_frozen(); 2208 cgroup_enter_frozen();
2209 preempt_enable_no_resched();
2210 freezable_schedule(); 2210 freezable_schedule();
2211 cgroup_leave_frozen(true); 2211 cgroup_leave_frozen(true);
2212 } else { 2212 } else {
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 6d1f68b7e528..c9ea7eb2cb1a 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -141,7 +141,8 @@ unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
141 struct stacktrace_cookie c = { 141 struct stacktrace_cookie c = {
142 .store = store, 142 .store = store,
143 .size = size, 143 .size = size,
144 .skip = skipnr + 1, 144 /* skip this function if they are tracing us */
145 .skip = skipnr + !!(current == tsk),
145 }; 146 };
146 147
147 if (!try_get_task_stack(tsk)) 148 if (!try_get_task_stack(tsk))
@@ -298,7 +299,8 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
298 struct stack_trace trace = { 299 struct stack_trace trace = {
299 .entries = store, 300 .entries = store,
300 .max_entries = size, 301 .max_entries = size,
301 .skip = skipnr + 1, 302 /* skip this function if they are tracing us */
303 .skip = skipnr + !!(current == task),
302 }; 304 };
303 305
304 save_stack_trace_tsk(task, &trace); 306 save_stack_trace_tsk(task, &trace);
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 4bc37ac3bb05..5ee0f7709410 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -110,8 +110,7 @@ void update_vsyscall(struct timekeeper *tk)
110 nsec = nsec + tk->wall_to_monotonic.tv_nsec; 110 nsec = nsec + tk->wall_to_monotonic.tv_nsec;
111 vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec); 111 vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
112 112
113 if (__arch_use_vsyscall(vdata)) 113 update_vdso_data(vdata, tk);
114 update_vdso_data(vdata, tk);
115 114
116 __arch_update_vsyscall(vdata, tk); 115 __arch_update_vsyscall(vdata, tk);
117 116
@@ -124,10 +123,8 @@ void update_vsyscall_tz(void)
124{ 123{
125 struct vdso_data *vdata = __arch_get_k_vdso_data(); 124 struct vdso_data *vdata = __arch_get_k_vdso_data();
126 125
127 if (__arch_use_vsyscall(vdata)) { 126 vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
128 vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest; 127 vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
129 vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
130 }
131 128
132 __arch_sync_vdso_data(vdata); 129 __arch_sync_vdso_data(vdata);
133} 130}
diff --git a/lib/Kconfig b/lib/Kconfig
index 183f92a297ca..3321d04dfa5a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -447,7 +447,6 @@ config ASSOCIATIVE_ARRAY
447config HAS_IOMEM 447config HAS_IOMEM
448 bool 448 bool
449 depends on !NO_IOMEM 449 depends on !NO_IOMEM
450 select GENERIC_IO
451 default y 450 default y
452 451
453config HAS_IOPORT_MAP 452config HAS_IOPORT_MAP
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 5cff72f18c4a..33ffbf308853 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -106,7 +106,12 @@ retry:
106 was_locked = 1; 106 was_locked = 1;
107 } else { 107 } else {
108 local_irq_restore(flags); 108 local_irq_restore(flags);
109 cpu_relax(); 109 /*
110 * Wait for the lock to release before jumping to
111 * atomic_cmpxchg() in order to mitigate the thundering herd
112 * problem.
113 */
114 do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
110 goto retry; 115 goto retry;
111 } 116 }
112 117
diff --git a/lib/idr.c b/lib/idr.c
index 66a374892482..c2cf2c52bbde 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -215,7 +215,7 @@ int idr_for_each(const struct idr *idr,
215EXPORT_SYMBOL(idr_for_each); 215EXPORT_SYMBOL(idr_for_each);
216 216
217/** 217/**
218 * idr_get_next() - Find next populated entry. 218 * idr_get_next_ul() - Find next populated entry.
219 * @idr: IDR handle. 219 * @idr: IDR handle.
220 * @nextid: Pointer to an ID. 220 * @nextid: Pointer to an ID.
221 * 221 *
@@ -224,7 +224,7 @@ EXPORT_SYMBOL(idr_for_each);
224 * to the ID of the found value. To use in a loop, the value pointed to by 224 * to the ID of the found value. To use in a loop, the value pointed to by
225 * nextid must be incremented by the user. 225 * nextid must be incremented by the user.
226 */ 226 */
227void *idr_get_next(struct idr *idr, int *nextid) 227void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
228{ 228{
229 struct radix_tree_iter iter; 229 struct radix_tree_iter iter;
230 void __rcu **slot; 230 void __rcu **slot;
@@ -245,18 +245,14 @@ void *idr_get_next(struct idr *idr, int *nextid)
245 } 245 }
246 if (!slot) 246 if (!slot)
247 return NULL; 247 return NULL;
248 id = iter.index + base;
249
250 if (WARN_ON_ONCE(id > INT_MAX))
251 return NULL;
252 248
253 *nextid = id; 249 *nextid = iter.index + base;
254 return entry; 250 return entry;
255} 251}
256EXPORT_SYMBOL(idr_get_next); 252EXPORT_SYMBOL(idr_get_next_ul);
257 253
258/** 254/**
259 * idr_get_next_ul() - Find next populated entry. 255 * idr_get_next() - Find next populated entry.
260 * @idr: IDR handle. 256 * @idr: IDR handle.
261 * @nextid: Pointer to an ID. 257 * @nextid: Pointer to an ID.
262 * 258 *
@@ -265,22 +261,17 @@ EXPORT_SYMBOL(idr_get_next);
265 * to the ID of the found value. To use in a loop, the value pointed to by 261 * to the ID of the found value. To use in a loop, the value pointed to by
266 * nextid must be incremented by the user. 262 * nextid must be incremented by the user.
267 */ 263 */
268void *idr_get_next_ul(struct idr *idr, unsigned long *nextid) 264void *idr_get_next(struct idr *idr, int *nextid)
269{ 265{
270 struct radix_tree_iter iter;
271 void __rcu **slot;
272 unsigned long base = idr->idr_base;
273 unsigned long id = *nextid; 266 unsigned long id = *nextid;
267 void *entry = idr_get_next_ul(idr, &id);
274 268
275 id = (id < base) ? 0 : id - base; 269 if (WARN_ON_ONCE(id > INT_MAX))
276 slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
277 if (!slot)
278 return NULL; 270 return NULL;
279 271 *nextid = id;
280 *nextid = iter.index + base; 272 return entry;
281 return rcu_dereference_raw(*slot);
282} 273}
283EXPORT_SYMBOL(idr_get_next_ul); 274EXPORT_SYMBOL(idr_get_next);
284 275
285/** 276/**
286 * idr_replace() - replace pointer for given ID. 277 * idr_replace() - replace pointer for given ID.
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 18c1dfbb1765..c8fa1d274530 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1529,7 +1529,7 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
1529 offset = radix_tree_find_next_bit(node, IDR_FREE, 1529 offset = radix_tree_find_next_bit(node, IDR_FREE,
1530 offset + 1); 1530 offset + 1);
1531 start = next_index(start, node, offset); 1531 start = next_index(start, node, offset);
1532 if (start > max) 1532 if (start > max || start == 0)
1533 return ERR_PTR(-ENOSPC); 1533 return ERR_PTR(-ENOSPC);
1534 while (offset == RADIX_TREE_MAP_SIZE) { 1534 while (offset == RADIX_TREE_MAP_SIZE) {
1535 offset = node->offset + 1; 1535 offset = node->offset + 1;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 9d631a7b6a70..7df4f7f395bf 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -1110,6 +1110,28 @@ static noinline void check_find_entry(struct xarray *xa)
1110 XA_BUG_ON(xa, !xa_empty(xa)); 1110 XA_BUG_ON(xa, !xa_empty(xa));
1111} 1111}
1112 1112
1113static noinline void check_move_tiny(struct xarray *xa)
1114{
1115 XA_STATE(xas, xa, 0);
1116
1117 XA_BUG_ON(xa, !xa_empty(xa));
1118 rcu_read_lock();
1119 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1120 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1121 rcu_read_unlock();
1122 xa_store_index(xa, 0, GFP_KERNEL);
1123 rcu_read_lock();
1124 xas_set(&xas, 0);
1125 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
1126 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1127 xas_set(&xas, 0);
1128 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
1129 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1130 rcu_read_unlock();
1131 xa_erase_index(xa, 0);
1132 XA_BUG_ON(xa, !xa_empty(xa));
1133}
1134
1113static noinline void check_move_small(struct xarray *xa, unsigned long idx) 1135static noinline void check_move_small(struct xarray *xa, unsigned long idx)
1114{ 1136{
1115 XA_STATE(xas, xa, 0); 1137 XA_STATE(xas, xa, 0);
@@ -1217,6 +1239,8 @@ static noinline void check_move(struct xarray *xa)
1217 1239
1218 xa_destroy(xa); 1240 xa_destroy(xa);
1219 1241
1242 check_move_tiny(xa);
1243
1220 for (i = 0; i < 16; i++) 1244 for (i = 0; i < 16; i++)
1221 check_move_small(xa, 1UL << i); 1245 check_move_small(xa, 1UL << i);
1222 1246
diff --git a/lib/xarray.c b/lib/xarray.c
index 446b956c9188..1237c213f52b 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -994,6 +994,8 @@ void *__xas_prev(struct xa_state *xas)
994 994
995 if (!xas_frozen(xas->xa_node)) 995 if (!xas_frozen(xas->xa_node))
996 xas->xa_index--; 996 xas->xa_index--;
997 if (!xas->xa_node)
998 return set_bounds(xas);
997 if (xas_not_node(xas->xa_node)) 999 if (xas_not_node(xas->xa_node))
998 return xas_load(xas); 1000 return xas_load(xas);
999 1001
@@ -1031,6 +1033,8 @@ void *__xas_next(struct xa_state *xas)
1031 1033
1032 if (!xas_frozen(xas->xa_node)) 1034 if (!xas_frozen(xas->xa_node))
1033 xas->xa_index++; 1035 xas->xa_index++;
1036 if (!xas->xa_node)
1037 return set_bounds(xas);
1034 if (xas_not_node(xas->xa_node)) 1038 if (xas_not_node(xas->xa_node))
1035 return xas_load(xas); 1039 return xas_load(xas);
1036 1040
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0a1b4b484ac5..f05d27b7183d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1028,12 +1028,13 @@ static void collapse_huge_page(struct mm_struct *mm,
1028 1028
1029 anon_vma_lock_write(vma->anon_vma); 1029 anon_vma_lock_write(vma->anon_vma);
1030 1030
1031 pte = pte_offset_map(pmd, address);
1032 pte_ptl = pte_lockptr(mm, pmd);
1033
1034 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, 1031 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1035 address, address + HPAGE_PMD_SIZE); 1032 address, address + HPAGE_PMD_SIZE);
1036 mmu_notifier_invalidate_range_start(&range); 1033 mmu_notifier_invalidate_range_start(&range);
1034
1035 pte = pte_offset_map(pmd, address);
1036 pte_ptl = pte_lockptr(mm, pmd);
1037
1037 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ 1038 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1038 /* 1039 /*
1039 * After this gup_fast can't run anymore. This also removes 1040 * After this gup_fast can't run anymore. This also removes
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 363106578876..37592dd7ae32 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -484,7 +484,7 @@ ino_t page_cgroup_ino(struct page *page)
484 unsigned long ino = 0; 484 unsigned long ino = 0;
485 485
486 rcu_read_lock(); 486 rcu_read_lock();
487 if (PageHead(page) && PageSlab(page)) 487 if (PageSlab(page) && !PageTail(page))
488 memcg = memcg_from_slab_page(page); 488 memcg = memcg_from_slab_page(page);
489 else 489 else
490 memcg = READ_ONCE(page->mem_cgroup); 490 memcg = READ_ONCE(page->mem_cgroup);
@@ -2535,6 +2535,15 @@ retry:
2535 } 2535 }
2536 2536
2537 /* 2537 /*
2538 * Memcg doesn't have a dedicated reserve for atomic
2539 * allocations. But like the global atomic pool, we need to
2540 * put the burden of reclaim on regular allocation requests
2541 * and let these go through as privileged allocations.
2542 */
2543 if (gfp_mask & __GFP_ATOMIC)
2544 goto force;
2545
2546 /*
2538 * Unlike in global OOM situations, memcg is not in a physical 2547 * Unlike in global OOM situations, memcg is not in a physical
2539 * memory shortage. Allow dying and OOM-killed tasks to 2548 * memory shortage. Allow dying and OOM-killed tasks to
2540 * bypass the last charges so that they can exit quickly and 2549 * bypass the last charges so that they can exit quickly and
@@ -5014,12 +5023,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
5014{ 5023{
5015 int node; 5024 int node;
5016 5025
5017 /*
5018 * Flush percpu vmstats and vmevents to guarantee the value correctness
5019 * on parent's and all ancestor levels.
5020 */
5021 memcg_flush_percpu_vmstats(memcg, false);
5022 memcg_flush_percpu_vmevents(memcg);
5023 for_each_node(node) 5026 for_each_node(node)
5024 free_mem_cgroup_per_node_info(memcg, node); 5027 free_mem_cgroup_per_node_info(memcg, node);
5025 free_percpu(memcg->vmstats_percpu); 5028 free_percpu(memcg->vmstats_percpu);
@@ -5030,6 +5033,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
5030static void mem_cgroup_free(struct mem_cgroup *memcg) 5033static void mem_cgroup_free(struct mem_cgroup *memcg)
5031{ 5034{
5032 memcg_wb_domain_exit(memcg); 5035 memcg_wb_domain_exit(memcg);
5036 /*
5037 * Flush percpu vmstats and vmevents to guarantee the value correctness
5038 * on parent's and all ancestor levels.
5039 */
5040 memcg_flush_percpu_vmstats(memcg, false);
5041 memcg_flush_percpu_vmevents(memcg);
5033 __mem_cgroup_free(memcg); 5042 __mem_cgroup_free(memcg);
5034} 5043}
5035 5044
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index df570e5c71cc..07e5c67f48a8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -447,6 +447,14 @@ static void update_pgdat_span(struct pglist_data *pgdat)
447 zone->spanned_pages; 447 zone->spanned_pages;
448 448
449 /* No need to lock the zones, they can't change. */ 449 /* No need to lock the zones, they can't change. */
450 if (!zone->spanned_pages)
451 continue;
452 if (!node_end_pfn) {
453 node_start_pfn = zone->zone_start_pfn;
454 node_end_pfn = zone_end_pfn;
455 continue;
456 }
457
450 if (zone_end_pfn > node_end_pfn) 458 if (zone_end_pfn > node_end_pfn)
451 node_end_pfn = zone_end_pfn; 459 node_end_pfn = zone_end_pfn;
452 if (zone->zone_start_pfn < node_start_pfn) 460 if (zone->zone_start_pfn < node_start_pfn)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 7fde88695f35..9a889e456168 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
180 mn->ops->invalidate_range_start, _ret, 180 mn->ops->invalidate_range_start, _ret,
181 !mmu_notifier_range_blockable(range) ? "non-" : ""); 181 !mmu_notifier_range_blockable(range) ? "non-" : "");
182 WARN_ON(mmu_notifier_range_blockable(range) || 182 WARN_ON(mmu_notifier_range_blockable(range) ||
183 ret != -EAGAIN); 183 _ret != -EAGAIN);
184 ret = _ret; 184 ret = _ret;
185 } 185 }
186 } 186 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ecc3dbad606b..f391c0c4ed1d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1948,6 +1948,14 @@ void __init page_alloc_init_late(void)
1948 wait_for_completion(&pgdat_init_all_done_comp); 1948 wait_for_completion(&pgdat_init_all_done_comp);
1949 1949
1950 /* 1950 /*
1951 * The number of managed pages has changed due to the initialisation
1952 * so the pcpu batch and high limits needs to be updated or the limits
1953 * will be artificially small.
1954 */
1955 for_each_populated_zone(zone)
1956 zone_pcp_update(zone);
1957
1958 /*
1951 * We initialized the rest of the deferred pages. Permanently disable 1959 * We initialized the rest of the deferred pages. Permanently disable
1952 * on-demand struct page initialization. 1960 * on-demand struct page initialization.
1953 */ 1961 */
@@ -3720,10 +3728,6 @@ try_this_zone:
3720static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3728static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3721{ 3729{
3722 unsigned int filter = SHOW_MEM_FILTER_NODES; 3730 unsigned int filter = SHOW_MEM_FILTER_NODES;
3723 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
3724
3725 if (!__ratelimit(&show_mem_rs))
3726 return;
3727 3731
3728 /* 3732 /*
3729 * This documents exceptions given to allocations in certain 3733 * This documents exceptions given to allocations in certain
@@ -3744,8 +3748,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3744{ 3748{
3745 struct va_format vaf; 3749 struct va_format vaf;
3746 va_list args; 3750 va_list args;
3747 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL, 3751 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3748 DEFAULT_RATELIMIT_BURST);
3749 3752
3750 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) 3753 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3751 return; 3754 return;
@@ -8514,7 +8517,6 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
8514 WARN(count != 0, "%d pages are still in use!\n", count); 8517 WARN(count != 0, "%d pages are still in use!\n", count);
8515} 8518}
8516 8519
8517#ifdef CONFIG_MEMORY_HOTPLUG
8518/* 8520/*
8519 * The zone indicated has a new number of managed_pages; batch sizes and percpu 8521 * The zone indicated has a new number of managed_pages; batch sizes and percpu
8520 * page high values need to be recalulated. 8522 * page high values need to be recalulated.
@@ -8528,7 +8530,6 @@ void __meminit zone_pcp_update(struct zone *zone)
8528 per_cpu_ptr(zone->pageset, cpu)); 8530 per_cpu_ptr(zone->pageset, cpu));
8529 mutex_unlock(&pcp_batch_high_lock); 8531 mutex_unlock(&pcp_batch_high_lock);
8530} 8532}
8531#endif
8532 8533
8533void zone_pcp_reset(struct zone *zone) 8534void zone_pcp_reset(struct zone *zone)
8534{ 8535{
diff --git a/mm/slab.h b/mm/slab.h
index 68e455f2b698..b2b01694dc43 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -323,8 +323,8 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
323 * Expects a pointer to a slab page. Please note, that PageSlab() check 323 * Expects a pointer to a slab page. Please note, that PageSlab() check
324 * isn't sufficient, as it returns true also for tail compound slab pages, 324 * isn't sufficient, as it returns true also for tail compound slab pages,
325 * which do not have slab_cache pointer set. 325 * which do not have slab_cache pointer set.
326 * So this function assumes that the page can pass PageHead() and PageSlab() 326 * So this function assumes that the page can pass PageSlab() && !PageTail()
327 * checks. 327 * check.
328 * 328 *
329 * The kmem_cache can be reparented asynchronously. The caller must ensure 329 * The kmem_cache can be reparented asynchronously. The caller must ensure
330 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. 330 * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6afc892a148a..a8222041bd44 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1383,12 +1383,29 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
1383 unsigned long freecount = 0; 1383 unsigned long freecount = 0;
1384 struct free_area *area; 1384 struct free_area *area;
1385 struct list_head *curr; 1385 struct list_head *curr;
1386 bool overflow = false;
1386 1387
1387 area = &(zone->free_area[order]); 1388 area = &(zone->free_area[order]);
1388 1389
1389 list_for_each(curr, &area->free_list[mtype]) 1390 list_for_each(curr, &area->free_list[mtype]) {
1390 freecount++; 1391 /*
1391 seq_printf(m, "%6lu ", freecount); 1392 * Cap the free_list iteration because it might
1393 * be really large and we are under a spinlock
1394 * so a long time spent here could trigger a
1395 * hard lockup detector. Anyway this is a
1396 * debugging tool so knowing there is a handful
1397 * of pages of this order should be more than
1398 * sufficient.
1399 */
1400 if (++freecount >= 100000) {
1401 overflow = true;
1402 break;
1403 }
1404 }
1405 seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1406 spin_unlock_irq(&zone->lock);
1407 cond_resched();
1408 spin_lock_irq(&zone->lock);
1392 } 1409 }
1393 seq_putc(m, '\n'); 1410 seq_putc(m, '\n');
1394 } 1411 }
@@ -1972,7 +1989,7 @@ void __init init_mm_internals(void)
1972#endif 1989#endif
1973#ifdef CONFIG_PROC_FS 1990#ifdef CONFIG_PROC_FS
1974 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op); 1991 proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
1975 proc_create_seq("pagetypeinfo", 0444, NULL, &pagetypeinfo_op); 1992 proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
1976 proc_create_seq("vmstat", 0444, NULL, &vmstat_op); 1993 proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
1977 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op); 1994 proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
1978#endif 1995#endif
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index ed91ea31978a..12a4f4d93681 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -20,7 +20,6 @@ static unsigned int
20ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par) 20ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
21{ 21{
22 const struct ebt_nat_info *info = par->targinfo; 22 const struct ebt_nat_info *info = par->targinfo;
23 struct net_device *dev;
24 23
25 if (skb_ensure_writable(skb, ETH_ALEN)) 24 if (skb_ensure_writable(skb, ETH_ALEN))
26 return EBT_DROP; 25 return EBT_DROP;
@@ -33,10 +32,22 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
33 else 32 else
34 skb->pkt_type = PACKET_MULTICAST; 33 skb->pkt_type = PACKET_MULTICAST;
35 } else { 34 } else {
36 if (xt_hooknum(par) != NF_BR_BROUTING) 35 const struct net_device *dev;
37 dev = br_port_get_rcu(xt_in(par))->br->dev; 36
38 else 37 switch (xt_hooknum(par)) {
38 case NF_BR_BROUTING:
39 dev = xt_in(par); 39 dev = xt_in(par);
40 break;
41 case NF_BR_PRE_ROUTING:
42 dev = br_port_get_rcu(xt_in(par))->br->dev;
43 break;
44 default:
45 dev = NULL;
46 break;
47 }
48
49 if (!dev) /* NF_BR_LOCAL_OUT */
50 return info->target;
40 51
41 if (ether_addr_equal(info->mac, dev->dev_addr)) 52 if (ether_addr_equal(info->mac, dev->dev_addr))
42 skb->pkt_type = PACKET_HOST; 53 skb->pkt_type = PACKET_HOST;
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 37c1040bcb9c..4d8ba701e15d 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -580,6 +580,7 @@ static int j1939_sk_release(struct socket *sock)
580 j1939_netdev_stop(priv); 580 j1939_netdev_stop(priv);
581 } 581 }
582 582
583 kfree(jsk->filters);
583 sock_orphan(sk); 584 sock_orphan(sk);
584 sock->sk = NULL; 585 sock->sk = NULL;
585 586
@@ -909,8 +910,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
909 memset(serr, 0, sizeof(*serr)); 910 memset(serr, 0, sizeof(*serr));
910 switch (type) { 911 switch (type) {
911 case J1939_ERRQUEUE_ACK: 912 case J1939_ERRQUEUE_ACK:
912 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) 913 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) {
914 kfree_skb(skb);
913 return; 915 return;
916 }
914 917
915 serr->ee.ee_errno = ENOMSG; 918 serr->ee.ee_errno = ENOMSG;
916 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 919 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
@@ -918,8 +921,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
918 state = "ACK"; 921 state = "ACK";
919 break; 922 break;
920 case J1939_ERRQUEUE_SCHED: 923 case J1939_ERRQUEUE_SCHED:
921 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) 924 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) {
925 kfree_skb(skb);
922 return; 926 return;
927 }
923 928
924 serr->ee.ee_errno = ENOMSG; 929 serr->ee.ee_errno = ENOMSG;
925 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 930 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fe000ea757ea..e5f1a56994c6 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1273,9 +1273,27 @@ j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
1273static void 1273static void
1274j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb) 1274j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
1275{ 1275{
1276 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1277 const u8 *dat;
1278 int len;
1279
1276 if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) 1280 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1277 return; 1281 return;
1278 1282
1283 dat = skb->data;
1284
1285 if (skcb->addr.type == J1939_ETP)
1286 len = j1939_etp_ctl_to_size(dat);
1287 else
1288 len = j1939_tp_ctl_to_size(dat);
1289
1290 if (session->total_message_size != len) {
1291 netdev_warn_once(session->priv->ndev,
1292 "%s: 0x%p: Incorrect size. Expected: %i; got: %i.\n",
1293 __func__, session, session->total_message_size,
1294 len);
1295 }
1296
1279 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); 1297 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
1280 1298
1281 session->pkt.tx_acked = session->pkt.total; 1299 session->pkt.tx_acked = session->pkt.total;
@@ -1432,7 +1450,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
1432 skcb = j1939_skb_to_cb(skb); 1450 skcb = j1939_skb_to_cb(skb);
1433 memcpy(skcb, rel_skcb, sizeof(*skcb)); 1451 memcpy(skcb, rel_skcb, sizeof(*skcb));
1434 1452
1435 session = j1939_session_new(priv, skb, skb->len); 1453 session = j1939_session_new(priv, skb, size);
1436 if (!session) { 1454 if (!session) {
1437 kfree_skb(skb); 1455 kfree_skb(skb);
1438 return NULL; 1456 return NULL;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index cf390e0aa73d..ad31e4e53d0a 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -270,18 +270,28 @@ void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
270 270
271 msg->sg.data[i].length -= trim; 271 msg->sg.data[i].length -= trim;
272 sk_mem_uncharge(sk, trim); 272 sk_mem_uncharge(sk, trim);
273 /* Adjust copybreak if it falls into the trimmed part of last buf */
274 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
275 msg->sg.copybreak = msg->sg.data[i].length;
273out: 276out:
274 /* If we trim data before curr pointer update copybreak and current 277 sk_msg_iter_var_next(i);
275 * so that any future copy operations start at new copy location. 278 msg->sg.end = i;
279
280 /* If we trim data a full sg elem before curr pointer update
281 * copybreak and current so that any future copy operations
282 * start at new copy location.
276 * However trimed data that has not yet been used in a copy op 283 * However trimed data that has not yet been used in a copy op
277 * does not require an update. 284 * does not require an update.
278 */ 285 */
279 if (msg->sg.curr >= i) { 286 if (!msg->sg.size) {
287 msg->sg.curr = msg->sg.start;
288 msg->sg.copybreak = 0;
289 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
290 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
291 sk_msg_iter_var_prev(i);
280 msg->sg.curr = i; 292 msg->sg.curr = i;
281 msg->sg.copybreak = msg->sg.data[i].length; 293 msg->sg.copybreak = msg->sg.data[i].length;
282 } 294 }
283 sk_msg_iter_var_next(i);
284 msg->sg.end = i;
285} 295}
286EXPORT_SYMBOL_GPL(sk_msg_trim); 296EXPORT_SYMBOL_GPL(sk_msg_trim);
287 297
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 0d8f782c25cc..d19557c6d04b 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -416,7 +416,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
416 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); 416 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
417 newinet->mc_index = inet_iif(skb); 417 newinet->mc_index = inet_iif(skb);
418 newinet->mc_ttl = ip_hdr(skb)->ttl; 418 newinet->mc_ttl = ip_hdr(skb)->ttl;
419 newinet->inet_id = jiffies; 419 newinet->inet_id = prandom_u32();
420 420
421 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) 421 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
422 goto put_and_exit; 422 goto put_and_exit;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 0913a090b2bf..f1888c683426 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1814,8 +1814,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
1814 int ret = 0; 1814 int ret = 0;
1815 unsigned int hash = fib_laddr_hashfn(local); 1815 unsigned int hash = fib_laddr_hashfn(local);
1816 struct hlist_head *head = &fib_info_laddrhash[hash]; 1816 struct hlist_head *head = &fib_info_laddrhash[hash];
1817 int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
1817 struct net *net = dev_net(dev); 1818 struct net *net = dev_net(dev);
1818 int tb_id = l3mdev_fib_table(dev);
1819 struct fib_info *fi; 1819 struct fib_info *fi;
1820 1820
1821 if (!fib_info_laddrhash || local == 0) 1821 if (!fib_info_laddrhash || local == 0)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a63ff85fe141..e60bf8e7dd1a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -621,6 +621,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
621{ 621{
622 struct __rt6_probe_work *work = NULL; 622 struct __rt6_probe_work *work = NULL;
623 const struct in6_addr *nh_gw; 623 const struct in6_addr *nh_gw;
624 unsigned long last_probe;
624 struct neighbour *neigh; 625 struct neighbour *neigh;
625 struct net_device *dev; 626 struct net_device *dev;
626 struct inet6_dev *idev; 627 struct inet6_dev *idev;
@@ -639,6 +640,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
639 nh_gw = &fib6_nh->fib_nh_gw6; 640 nh_gw = &fib6_nh->fib_nh_gw6;
640 dev = fib6_nh->fib_nh_dev; 641 dev = fib6_nh->fib_nh_dev;
641 rcu_read_lock_bh(); 642 rcu_read_lock_bh();
643 last_probe = READ_ONCE(fib6_nh->last_probe);
642 idev = __in6_dev_get(dev); 644 idev = __in6_dev_get(dev);
643 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); 645 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
644 if (neigh) { 646 if (neigh) {
@@ -654,13 +656,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
654 __neigh_set_probe_once(neigh); 656 __neigh_set_probe_once(neigh);
655 } 657 }
656 write_unlock(&neigh->lock); 658 write_unlock(&neigh->lock);
657 } else if (time_after(jiffies, fib6_nh->last_probe + 659 } else if (time_after(jiffies, last_probe +
658 idev->cnf.rtr_probe_interval)) { 660 idev->cnf.rtr_probe_interval)) {
659 work = kmalloc(sizeof(*work), GFP_ATOMIC); 661 work = kmalloc(sizeof(*work), GFP_ATOMIC);
660 } 662 }
661 663
662 if (work) { 664 if (!work || cmpxchg(&fib6_nh->last_probe,
663 fib6_nh->last_probe = jiffies; 665 last_probe, jiffies) != last_probe) {
666 kfree(work);
667 } else {
664 INIT_WORK(&work->work, rt6_probe_deferred); 668 INIT_WORK(&work->work, rt6_probe_deferred);
665 work->target = *nh_gw; 669 work->target = *nh_gw;
666 dev_hold(dev); 670 dev_hold(dev);
@@ -3383,6 +3387,9 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3383 int err; 3387 int err;
3384 3388
3385 fib6_nh->fib_nh_family = AF_INET6; 3389 fib6_nh->fib_nh_family = AF_INET6;
3390#ifdef CONFIG_IPV6_ROUTER_PREF
3391 fib6_nh->last_probe = jiffies;
3392#endif
3386 3393
3387 err = -ENODEV; 3394 err = -ENODEV;
3388 if (cfg->fc_ifindex) { 3395 if (cfg->fc_ifindex) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aba094b4ccfc..2d05c4cfaf6d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1292,8 +1292,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1292 ieee80211_remove_interfaces(local); 1292 ieee80211_remove_interfaces(local);
1293 fail_rate: 1293 fail_rate:
1294 rtnl_unlock(); 1294 rtnl_unlock();
1295 ieee80211_led_exit(local);
1296 fail_flows: 1295 fail_flows:
1296 ieee80211_led_exit(local);
1297 destroy_workqueue(local->workqueue); 1297 destroy_workqueue(local->workqueue);
1298 fail_workqueue: 1298 fail_workqueue:
1299 wiphy_unregister(local->hw.wiphy); 1299 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index bd11fef2139f..8d3a2389b055 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2457,7 +2457,8 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
2457{ 2457{
2458 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2458 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
2459 2459
2460 if (time_after(stats->last_rx, sta->status_stats.last_ack)) 2460 if (!sta->status_stats.last_ack ||
2461 time_after(stats->last_rx, sta->status_stats.last_ack))
2461 return stats->last_rx; 2462 return stats->last_rx;
2462 return sta->status_stats.last_ack; 2463 return sta->status_stats.last_ack;
2463} 2464}
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e64d5f9a89dd..d73d1828216a 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -296,7 +296,8 @@ ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
296 296
297 if (unlikely(!flag_nested(nla))) 297 if (unlikely(!flag_nested(nla)))
298 return -IPSET_ERR_PROTOCOL; 298 return -IPSET_ERR_PROTOCOL;
299 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) 299 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
300 ipaddr_policy, NULL))
300 return -IPSET_ERR_PROTOCOL; 301 return -IPSET_ERR_PROTOCOL;
301 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) 302 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
302 return -IPSET_ERR_PROTOCOL; 303 return -IPSET_ERR_PROTOCOL;
@@ -314,7 +315,8 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
314 if (unlikely(!flag_nested(nla))) 315 if (unlikely(!flag_nested(nla)))
315 return -IPSET_ERR_PROTOCOL; 316 return -IPSET_ERR_PROTOCOL;
316 317
317 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) 318 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
319 ipaddr_policy, NULL))
318 return -IPSET_ERR_PROTOCOL; 320 return -IPSET_ERR_PROTOCOL;
319 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) 321 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
320 return -IPSET_ERR_PROTOCOL; 322 return -IPSET_ERR_PROTOCOL;
@@ -934,7 +936,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
934 936
935 /* Without holding any locks, create private part. */ 937 /* Without holding any locks, create private part. */
936 if (attr[IPSET_ATTR_DATA] && 938 if (attr[IPSET_ATTR_DATA] &&
937 nla_parse_nested_deprecated(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) { 939 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
940 set->type->create_policy, NULL)) {
938 ret = -IPSET_ERR_PROTOCOL; 941 ret = -IPSET_ERR_PROTOCOL;
939 goto put_out; 942 goto put_out;
940 } 943 }
@@ -1281,6 +1284,14 @@ dump_attrs(struct nlmsghdr *nlh)
1281 } 1284 }
1282} 1285}
1283 1286
1287static const struct nla_policy
1288ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
1289 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1290 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1291 .len = IPSET_MAXNAMELEN - 1 },
1292 [IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
1293};
1294
1284static int 1295static int
1285dump_init(struct netlink_callback *cb, struct ip_set_net *inst) 1296dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
1286{ 1297{
@@ -1292,9 +1303,9 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
1292 ip_set_id_t index; 1303 ip_set_id_t index;
1293 int ret; 1304 int ret;
1294 1305
1295 ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, attr, 1306 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
1296 nlh->nlmsg_len - min_len, 1307 nlh->nlmsg_len - min_len,
1297 ip_set_setname_policy, NULL); 1308 ip_set_dump_policy, NULL);
1298 if (ret) 1309 if (ret)
1299 return ret; 1310 return ret;
1300 1311
@@ -1543,9 +1554,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1543 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len); 1554 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
1544 cmdattr = (void *)&errmsg->msg + min_len; 1555 cmdattr = (void *)&errmsg->msg + min_len;
1545 1556
1546 ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr, 1557 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
1547 nlh->nlmsg_len - min_len, 1558 nlh->nlmsg_len - min_len, ip_set_adt_policy,
1548 ip_set_adt_policy, NULL); 1559 NULL);
1549 1560
1550 if (ret) { 1561 if (ret) {
1551 nlmsg_free(skb2); 1562 nlmsg_free(skb2);
@@ -1596,7 +1607,9 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
1596 1607
1597 use_lineno = !!attr[IPSET_ATTR_LINENO]; 1608 use_lineno = !!attr[IPSET_ATTR_LINENO];
1598 if (attr[IPSET_ATTR_DATA]) { 1609 if (attr[IPSET_ATTR_DATA]) {
1599 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) 1610 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
1611 attr[IPSET_ATTR_DATA],
1612 set->type->adt_policy, NULL))
1600 return -IPSET_ERR_PROTOCOL; 1613 return -IPSET_ERR_PROTOCOL;
1601 ret = call_ad(ctnl, skb, set, tb, adt, flags, 1614 ret = call_ad(ctnl, skb, set, tb, adt, flags,
1602 use_lineno); 1615 use_lineno);
@@ -1606,7 +1619,8 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
1606 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) { 1619 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
1607 if (nla_type(nla) != IPSET_ATTR_DATA || 1620 if (nla_type(nla) != IPSET_ATTR_DATA ||
1608 !flag_nested(nla) || 1621 !flag_nested(nla) ||
1609 nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL)) 1622 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
1623 set->type->adt_policy, NULL))
1610 return -IPSET_ERR_PROTOCOL; 1624 return -IPSET_ERR_PROTOCOL;
1611 ret = call_ad(ctnl, skb, set, tb, adt, 1625 ret = call_ad(ctnl, skb, set, tb, adt,
1612 flags, use_lineno); 1626 flags, use_lineno);
@@ -1655,7 +1669,8 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1655 if (!set) 1669 if (!set)
1656 return -ENOENT; 1670 return -ENOENT;
1657 1671
1658 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) 1672 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
1673 set->type->adt_policy, NULL))
1659 return -IPSET_ERR_PROTOCOL; 1674 return -IPSET_ERR_PROTOCOL;
1660 1675
1661 rcu_read_lock_bh(); 1676 rcu_read_lock_bh();
@@ -1961,7 +1976,7 @@ static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
1961 [IPSET_CMD_LIST] = { 1976 [IPSET_CMD_LIST] = {
1962 .call = ip_set_dump, 1977 .call = ip_set_dump,
1963 .attr_count = IPSET_ATTR_CMD_MAX, 1978 .attr_count = IPSET_ATTR_CMD_MAX,
1964 .policy = ip_set_setname_policy, 1979 .policy = ip_set_dump_policy,
1965 }, 1980 },
1966 [IPSET_CMD_SAVE] = { 1981 [IPSET_CMD_SAVE] = {
1967 .call = ip_set_dump, 1982 .call = ip_set_dump,
@@ -2069,8 +2084,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2069 } 2084 }
2070 2085
2071 req_version->version = IPSET_PROTOCOL; 2086 req_version->version = IPSET_PROTOCOL;
2072 ret = copy_to_user(user, req_version, 2087 if (copy_to_user(user, req_version,
2073 sizeof(struct ip_set_req_version)); 2088 sizeof(struct ip_set_req_version)))
2089 ret = -EFAULT;
2074 goto done; 2090 goto done;
2075 } 2091 }
2076 case IP_SET_OP_GET_BYNAME: { 2092 case IP_SET_OP_GET_BYNAME: {
@@ -2129,7 +2145,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2129 } /* end of switch(op) */ 2145 } /* end of switch(op) */
2130 2146
2131copy: 2147copy:
2132 ret = copy_to_user(user, data, copylen); 2148 if (copy_to_user(user, data, copylen))
2149 ret = -EFAULT;
2133 2150
2134done: 2151done:
2135 vfree(data); 2152 vfree(data);
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index 24d8f4df4230..4ce563eb927d 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -209,7 +209,7 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
209 (skb_mac_header(skb) + ETH_HLEN) > skb->data) 209 (skb_mac_header(skb) + ETH_HLEN) > skb->data)
210 return -EINVAL; 210 return -EINVAL;
211 211
212 if (opt->flags & IPSET_DIM_ONE_SRC) 212 if (opt->flags & IPSET_DIM_TWO_SRC)
213 ether_addr_copy(e.ether, eth_hdr(skb)->h_source); 213 ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
214 else 214 else
215 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); 215 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c259cbc3ef45..3d932de0ad29 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -368,6 +368,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
368 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, 368 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
369 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 369 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
370 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 370 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
371 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
371 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, 372 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
372 [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, 373 [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
373 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, 374 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index a3ae69bfee66..4398322fad59 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -476,6 +476,7 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
476 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 476 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
477 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, 477 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
478 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 478 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
479 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
479 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, 480 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
480 [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, 481 [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
481 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, 482 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d481f9baca2f..712a428509ad 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1922,6 +1922,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1922 if (nlh->nlmsg_flags & NLM_F_REPLACE) 1922 if (nlh->nlmsg_flags & NLM_F_REPLACE)
1923 return -EOPNOTSUPP; 1923 return -EOPNOTSUPP;
1924 1924
1925 flags |= chain->flags & NFT_BASE_CHAIN;
1925 return nf_tables_updchain(&ctx, genmask, policy, flags); 1926 return nf_tables_updchain(&ctx, genmask, policy, flags);
1926 } 1927 }
1927 1928
@@ -5143,9 +5144,6 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
5143 struct nft_trans *trans; 5144 struct nft_trans *trans;
5144 int err; 5145 int err;
5145 5146
5146 if (!obj->ops->update)
5147 return -EOPNOTSUPP;
5148
5149 trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, 5147 trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
5150 sizeof(struct nft_trans_obj)); 5148 sizeof(struct nft_trans_obj));
5151 if (!trans) 5149 if (!trans)
@@ -6499,7 +6497,8 @@ static void nft_obj_commit_update(struct nft_trans *trans)
6499 obj = nft_trans_obj(trans); 6497 obj = nft_trans_obj(trans);
6500 newobj = nft_trans_obj_newobj(trans); 6498 newobj = nft_trans_obj_newobj(trans);
6501 6499
6502 obj->ops->update(obj, newobj); 6500 if (obj->ops->update)
6501 obj->ops->update(obj, newobj);
6503 6502
6504 kfree(newobj); 6503 kfree(newobj);
6505} 6504}
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index ad783f4840ef..e25dab8128db 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -334,7 +334,8 @@ int nft_flow_rule_offload_commit(struct net *net)
334 334
335 switch (trans->msg_type) { 335 switch (trans->msg_type) {
336 case NFT_MSG_NEWCHAIN: 336 case NFT_MSG_NEWCHAIN:
337 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 337 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
338 nft_trans_chain_update(trans))
338 continue; 339 continue;
339 340
340 policy = nft_trans_chain_policy(trans); 341 policy = nft_trans_chain_policy(trans);
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 974300178fa9..02afa752dd2e 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -134,12 +134,13 @@ static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
134 const struct nft_expr *expr) 134 const struct nft_expr *expr)
135{ 135{
136 const struct nft_bitwise *priv = nft_expr_priv(expr); 136 const struct nft_bitwise *priv = nft_expr_priv(expr);
137 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
137 138
138 if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) || 139 if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
139 priv->sreg != priv->dreg) 140 priv->sreg != priv->dreg || priv->len != reg->len)
140 return -EOPNOTSUPP; 141 return -EOPNOTSUPP;
141 142
142 memcpy(&ctx->regs[priv->dreg].mask, &priv->mask, sizeof(priv->mask)); 143 memcpy(&reg->mask, &priv->mask, sizeof(priv->mask));
143 144
144 return 0; 145 return 0;
145} 146}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index bd173b1824c6..0744b2bb46da 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -116,7 +116,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
116 u8 *mask = (u8 *)&flow->match.mask; 116 u8 *mask = (u8 *)&flow->match.mask;
117 u8 *key = (u8 *)&flow->match.key; 117 u8 *key = (u8 *)&flow->match.key;
118 118
119 if (priv->op != NFT_CMP_EQ) 119 if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
120 return -EOPNOTSUPP; 120 return -EOPNOTSUPP;
121 121
122 memcpy(key + reg->offset, &priv->data, priv->len); 122 memcpy(key + reg->offset, &priv->data, priv->len);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 17e6ca62f1be..afde0d763039 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1099,7 +1099,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
1099 1099
1100 local = nfc_llcp_find_local(dev); 1100 local = nfc_llcp_find_local(dev);
1101 if (!local) { 1101 if (!local) {
1102 nfc_put_device(dev);
1103 rc = -ENODEV; 1102 rc = -ENODEV;
1104 goto exit; 1103 goto exit;
1105 } 1104 }
@@ -1159,7 +1158,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
1159 1158
1160 local = nfc_llcp_find_local(dev); 1159 local = nfc_llcp_find_local(dev);
1161 if (!local) { 1160 if (!local) {
1162 nfc_put_device(dev);
1163 rc = -ENODEV; 1161 rc = -ENODEV;
1164 goto exit; 1162 goto exit;
1165 } 1163 }
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8717c0b26c90..20d60b8fcb70 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/idr.h> 22#include <linux/idr.h>
23#include <linux/rhashtable.h> 23#include <linux/rhashtable.h>
24#include <linux/jhash.h>
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/sock.h> 26#include <net/sock.h>
26#include <net/netlink.h> 27#include <net/netlink.h>
@@ -47,6 +48,62 @@ static LIST_HEAD(tcf_proto_base);
47/* Protects list of registered TC modules. It is pure SMP lock. */ 48/* Protects list of registered TC modules. It is pure SMP lock. */
48static DEFINE_RWLOCK(cls_mod_lock); 49static DEFINE_RWLOCK(cls_mod_lock);
49 50
51static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
52{
53 return jhash_3words(tp->chain->index, tp->prio,
54 (__force __u32)tp->protocol, 0);
55}
56
57static void tcf_proto_signal_destroying(struct tcf_chain *chain,
58 struct tcf_proto *tp)
59{
60 struct tcf_block *block = chain->block;
61
62 mutex_lock(&block->proto_destroy_lock);
63 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
64 destroy_obj_hashfn(tp));
65 mutex_unlock(&block->proto_destroy_lock);
66}
67
68static bool tcf_proto_cmp(const struct tcf_proto *tp1,
69 const struct tcf_proto *tp2)
70{
71 return tp1->chain->index == tp2->chain->index &&
72 tp1->prio == tp2->prio &&
73 tp1->protocol == tp2->protocol;
74}
75
76static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
77 struct tcf_proto *tp)
78{
79 u32 hash = destroy_obj_hashfn(tp);
80 struct tcf_proto *iter;
81 bool found = false;
82
83 rcu_read_lock();
84 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
85 destroy_ht_node, hash) {
86 if (tcf_proto_cmp(tp, iter)) {
87 found = true;
88 break;
89 }
90 }
91 rcu_read_unlock();
92
93 return found;
94}
95
96static void
97tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
98{
99 struct tcf_block *block = chain->block;
100
101 mutex_lock(&block->proto_destroy_lock);
102 if (hash_hashed(&tp->destroy_ht_node))
103 hash_del_rcu(&tp->destroy_ht_node);
104 mutex_unlock(&block->proto_destroy_lock);
105}
106
50/* Find classifier type by string name */ 107/* Find classifier type by string name */
51 108
52static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 109static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
@@ -234,9 +291,11 @@ static void tcf_proto_get(struct tcf_proto *tp)
234static void tcf_chain_put(struct tcf_chain *chain); 291static void tcf_chain_put(struct tcf_chain *chain);
235 292
236static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 293static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
237 struct netlink_ext_ack *extack) 294 bool sig_destroy, struct netlink_ext_ack *extack)
238{ 295{
239 tp->ops->destroy(tp, rtnl_held, extack); 296 tp->ops->destroy(tp, rtnl_held, extack);
297 if (sig_destroy)
298 tcf_proto_signal_destroyed(tp->chain, tp);
240 tcf_chain_put(tp->chain); 299 tcf_chain_put(tp->chain);
241 module_put(tp->ops->owner); 300 module_put(tp->ops->owner);
242 kfree_rcu(tp, rcu); 301 kfree_rcu(tp, rcu);
@@ -246,7 +305,7 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
246 struct netlink_ext_ack *extack) 305 struct netlink_ext_ack *extack)
247{ 306{
248 if (refcount_dec_and_test(&tp->refcnt)) 307 if (refcount_dec_and_test(&tp->refcnt))
249 tcf_proto_destroy(tp, rtnl_held, extack); 308 tcf_proto_destroy(tp, rtnl_held, true, extack);
250} 309}
251 310
252static int walker_check_empty(struct tcf_proto *tp, void *fh, 311static int walker_check_empty(struct tcf_proto *tp, void *fh,
@@ -370,6 +429,7 @@ static bool tcf_chain_detach(struct tcf_chain *chain)
370static void tcf_block_destroy(struct tcf_block *block) 429static void tcf_block_destroy(struct tcf_block *block)
371{ 430{
372 mutex_destroy(&block->lock); 431 mutex_destroy(&block->lock);
432 mutex_destroy(&block->proto_destroy_lock);
373 kfree_rcu(block, rcu); 433 kfree_rcu(block, rcu);
374} 434}
375 435
@@ -545,6 +605,12 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
545 605
546 mutex_lock(&chain->filter_chain_lock); 606 mutex_lock(&chain->filter_chain_lock);
547 tp = tcf_chain_dereference(chain->filter_chain, chain); 607 tp = tcf_chain_dereference(chain->filter_chain, chain);
608 while (tp) {
609 tp_next = rcu_dereference_protected(tp->next, 1);
610 tcf_proto_signal_destroying(chain, tp);
611 tp = tp_next;
612 }
613 tp = tcf_chain_dereference(chain->filter_chain, chain);
548 RCU_INIT_POINTER(chain->filter_chain, NULL); 614 RCU_INIT_POINTER(chain->filter_chain, NULL);
549 tcf_chain0_head_change(chain, NULL); 615 tcf_chain0_head_change(chain, NULL);
550 chain->flushing = true; 616 chain->flushing = true;
@@ -844,6 +910,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
844 return ERR_PTR(-ENOMEM); 910 return ERR_PTR(-ENOMEM);
845 } 911 }
846 mutex_init(&block->lock); 912 mutex_init(&block->lock);
913 mutex_init(&block->proto_destroy_lock);
847 init_rwsem(&block->cb_lock); 914 init_rwsem(&block->cb_lock);
848 flow_block_init(&block->flow_block); 915 flow_block_init(&block->flow_block);
849 INIT_LIST_HEAD(&block->chain_list); 916 INIT_LIST_HEAD(&block->chain_list);
@@ -1621,6 +1688,12 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1621 1688
1622 mutex_lock(&chain->filter_chain_lock); 1689 mutex_lock(&chain->filter_chain_lock);
1623 1690
1691 if (tcf_proto_exists_destroying(chain, tp_new)) {
1692 mutex_unlock(&chain->filter_chain_lock);
1693 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1694 return ERR_PTR(-EAGAIN);
1695 }
1696
1624 tp = tcf_chain_tp_find(chain, &chain_info, 1697 tp = tcf_chain_tp_find(chain, &chain_info,
1625 protocol, prio, false); 1698 protocol, prio, false);
1626 if (!tp) 1699 if (!tp)
@@ -1628,10 +1701,10 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1628 mutex_unlock(&chain->filter_chain_lock); 1701 mutex_unlock(&chain->filter_chain_lock);
1629 1702
1630 if (tp) { 1703 if (tp) {
1631 tcf_proto_destroy(tp_new, rtnl_held, NULL); 1704 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1632 tp_new = tp; 1705 tp_new = tp;
1633 } else if (err) { 1706 } else if (err) {
1634 tcf_proto_destroy(tp_new, rtnl_held, NULL); 1707 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1635 tp_new = ERR_PTR(err); 1708 tp_new = ERR_PTR(err);
1636 } 1709 }
1637 1710
@@ -1669,6 +1742,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1669 return; 1742 return;
1670 } 1743 }
1671 1744
1745 tcf_proto_signal_destroying(chain, tp);
1672 next = tcf_chain_dereference(chain_info.next, chain); 1746 next = tcf_chain_dereference(chain_info.next, chain);
1673 if (tp == chain->filter_chain) 1747 if (tp == chain->filter_chain)
1674 tcf_chain0_head_change(chain, next); 1748 tcf_chain0_head_change(chain, next);
@@ -2188,6 +2262,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2188 err = -EINVAL; 2262 err = -EINVAL;
2189 goto errout_locked; 2263 goto errout_locked;
2190 } else if (t->tcm_handle == 0) { 2264 } else if (t->tcm_handle == 0) {
2265 tcf_proto_signal_destroying(chain, tp);
2191 tcf_chain_tp_remove(chain, &chain_info, tp); 2266 tcf_chain_tp_remove(chain, &chain_info, tp);
2192 mutex_unlock(&chain->filter_chain_lock); 2267 mutex_unlock(&chain->filter_chain_lock);
2193 2268
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 2121187229cd..7cd68628c637 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1224,8 +1224,6 @@ static int taprio_enable_offload(struct net_device *dev,
1224 goto done; 1224 goto done;
1225 } 1225 }
1226 1226
1227 taprio_offload_config_changed(q);
1228
1229done: 1227done:
1230 taprio_offload_free(offload); 1228 taprio_offload_free(offload);
1231 1229
@@ -1505,6 +1503,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1505 call_rcu(&admin->rcu, taprio_free_sched_cb); 1503 call_rcu(&admin->rcu, taprio_free_sched_cb);
1506 1504
1507 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1505 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1506
1507 if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
1508 taprio_offload_config_changed(q);
1508 } 1509 }
1509 1510
1510 new_admin = NULL; 1511 new_admin = NULL;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 2920b006f65c..571e6d84da3b 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -376,8 +376,6 @@ static int smc_pnet_fill_entry(struct net *net,
376 return 0; 376 return 0;
377 377
378error: 378error:
379 if (pnetelem->ndev)
380 dev_put(pnetelem->ndev);
381 return rc; 379 return rc;
382} 380}
383 381
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f959487c5cd1..683d00837693 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -523,8 +523,10 @@ last_record:
523int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 523int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
524{ 524{
525 unsigned char record_type = TLS_RECORD_TYPE_DATA; 525 unsigned char record_type = TLS_RECORD_TYPE_DATA;
526 struct tls_context *tls_ctx = tls_get_ctx(sk);
526 int rc; 527 int rc;
527 528
529 mutex_lock(&tls_ctx->tx_lock);
528 lock_sock(sk); 530 lock_sock(sk);
529 531
530 if (unlikely(msg->msg_controllen)) { 532 if (unlikely(msg->msg_controllen)) {
@@ -538,12 +540,14 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
538 540
539out: 541out:
540 release_sock(sk); 542 release_sock(sk);
543 mutex_unlock(&tls_ctx->tx_lock);
541 return rc; 544 return rc;
542} 545}
543 546
544int tls_device_sendpage(struct sock *sk, struct page *page, 547int tls_device_sendpage(struct sock *sk, struct page *page,
545 int offset, size_t size, int flags) 548 int offset, size_t size, int flags)
546{ 549{
550 struct tls_context *tls_ctx = tls_get_ctx(sk);
547 struct iov_iter msg_iter; 551 struct iov_iter msg_iter;
548 char *kaddr = kmap(page); 552 char *kaddr = kmap(page);
549 struct kvec iov; 553 struct kvec iov;
@@ -552,6 +556,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
552 if (flags & MSG_SENDPAGE_NOTLAST) 556 if (flags & MSG_SENDPAGE_NOTLAST)
553 flags |= MSG_MORE; 557 flags |= MSG_MORE;
554 558
559 mutex_lock(&tls_ctx->tx_lock);
555 lock_sock(sk); 560 lock_sock(sk);
556 561
557 if (flags & MSG_OOB) { 562 if (flags & MSG_OOB) {
@@ -568,6 +573,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
568 573
569out: 574out:
570 release_sock(sk); 575 release_sock(sk);
576 mutex_unlock(&tls_ctx->tx_lock);
571 return rc; 577 return rc;
572} 578}
573 579
@@ -623,9 +629,11 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
623 629
624void tls_device_write_space(struct sock *sk, struct tls_context *ctx) 630void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
625{ 631{
626 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) { 632 if (tls_is_partially_sent_record(ctx)) {
627 gfp_t sk_allocation = sk->sk_allocation; 633 gfp_t sk_allocation = sk->sk_allocation;
628 634
635 WARN_ON_ONCE(sk->sk_write_pending);
636
629 sk->sk_allocation = GFP_ATOMIC; 637 sk->sk_allocation = GFP_ATOMIC;
630 tls_push_partial_record(sk, ctx, 638 tls_push_partial_record(sk, ctx,
631 MSG_DONTWAIT | MSG_NOSIGNAL | 639 MSG_DONTWAIT | MSG_NOSIGNAL |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index ac88877dcade..0775ae40fcfb 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -267,6 +267,7 @@ void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
267 267
268 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 268 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
269 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 269 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
270 mutex_destroy(&ctx->tx_lock);
270 271
271 if (sk) 272 if (sk)
272 kfree_rcu(ctx, rcu); 273 kfree_rcu(ctx, rcu);
@@ -612,6 +613,7 @@ static struct tls_context *create_ctx(struct sock *sk)
612 if (!ctx) 613 if (!ctx)
613 return NULL; 614 return NULL;
614 615
616 mutex_init(&ctx->tx_lock);
615 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 617 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
616 ctx->sk_proto = sk->sk_prot; 618 ctx->sk_proto = sk->sk_prot;
617 return ctx; 619 return ctx;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index c2b5e0d2ba1a..446f23c1f3ce 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -897,15 +897,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
897 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 897 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
898 return -ENOTSUPP; 898 return -ENOTSUPP;
899 899
900 mutex_lock(&tls_ctx->tx_lock);
900 lock_sock(sk); 901 lock_sock(sk);
901 902
902 /* Wait till there is any pending write on socket */
903 if (unlikely(sk->sk_write_pending)) {
904 ret = wait_on_pending_writer(sk, &timeo);
905 if (unlikely(ret))
906 goto send_end;
907 }
908
909 if (unlikely(msg->msg_controllen)) { 903 if (unlikely(msg->msg_controllen)) {
910 ret = tls_proccess_cmsg(sk, msg, &record_type); 904 ret = tls_proccess_cmsg(sk, msg, &record_type);
911 if (ret) { 905 if (ret) {
@@ -1091,6 +1085,7 @@ send_end:
1091 ret = sk_stream_error(sk, msg->msg_flags, ret); 1085 ret = sk_stream_error(sk, msg->msg_flags, ret);
1092 1086
1093 release_sock(sk); 1087 release_sock(sk);
1088 mutex_unlock(&tls_ctx->tx_lock);
1094 return copied ? copied : ret; 1089 return copied ? copied : ret;
1095} 1090}
1096 1091
@@ -1114,13 +1109,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1114 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 1109 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1115 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1110 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1116 1111
1117 /* Wait till there is any pending write on socket */
1118 if (unlikely(sk->sk_write_pending)) {
1119 ret = wait_on_pending_writer(sk, &timeo);
1120 if (unlikely(ret))
1121 goto sendpage_end;
1122 }
1123
1124 /* Call the sk_stream functions to manage the sndbuf mem. */ 1112 /* Call the sk_stream functions to manage the sndbuf mem. */
1125 while (size > 0) { 1113 while (size > 0) {
1126 size_t copy, required_size; 1114 size_t copy, required_size;
@@ -1219,15 +1207,18 @@ sendpage_end:
1219int tls_sw_sendpage(struct sock *sk, struct page *page, 1207int tls_sw_sendpage(struct sock *sk, struct page *page,
1220 int offset, size_t size, int flags) 1208 int offset, size_t size, int flags)
1221{ 1209{
1210 struct tls_context *tls_ctx = tls_get_ctx(sk);
1222 int ret; 1211 int ret;
1223 1212
1224 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1213 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1225 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 1214 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1226 return -ENOTSUPP; 1215 return -ENOTSUPP;
1227 1216
1217 mutex_lock(&tls_ctx->tx_lock);
1228 lock_sock(sk); 1218 lock_sock(sk);
1229 ret = tls_sw_do_sendpage(sk, page, offset, size, flags); 1219 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1230 release_sock(sk); 1220 release_sock(sk);
1221 mutex_unlock(&tls_ctx->tx_lock);
1231 return ret; 1222 return ret;
1232} 1223}
1233 1224
@@ -2170,9 +2161,11 @@ static void tx_work_handler(struct work_struct *work)
2170 2161
2171 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2162 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2172 return; 2163 return;
2164 mutex_lock(&tls_ctx->tx_lock);
2173 lock_sock(sk); 2165 lock_sock(sk);
2174 tls_tx_records(sk, -1); 2166 tls_tx_records(sk, -1);
2175 release_sock(sk); 2167 release_sock(sk);
2168 mutex_unlock(&tls_ctx->tx_lock);
2176} 2169}
2177 2170
2178void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2171void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
@@ -2180,12 +2173,9 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2180 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2173 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2181 2174
2182 /* Schedule the transmission if tx list is ready */ 2175 /* Schedule the transmission if tx list is ready */
2183 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 2176 if (is_tx_ready(tx_ctx) &&
2184 /* Schedule the transmission */ 2177 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2185 if (!test_and_set_bit(BIT_TX_SCHEDULED, 2178 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2186 &tx_ctx->tx_bitmask))
2187 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2188 }
2189} 2179}
2190 2180
2191void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2181void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 481f7f8a1655..fb2060dffb0a 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -947,9 +947,11 @@ virtio_transport_recv_connected(struct sock *sk,
947 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 947 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
948 vsk->peer_shutdown |= SEND_SHUTDOWN; 948 vsk->peer_shutdown |= SEND_SHUTDOWN;
949 if (vsk->peer_shutdown == SHUTDOWN_MASK && 949 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
950 vsock_stream_has_data(vsk) <= 0) { 950 vsock_stream_has_data(vsk) <= 0 &&
951 sock_set_flag(sk, SOCK_DONE); 951 !sock_flag(sk, SOCK_DONE)) {
952 sk->sk_state = TCP_CLOSING; 952 (void)virtio_transport_reset(vsk, NULL);
953
954 virtio_transport_do_close(vsk, true);
953 } 955 }
954 if (le32_to_cpu(pkt->hdr.flags)) 956 if (le32_to_cpu(pkt->hdr.flags))
955 sk->sk_state_change(sk); 957 sk->sk_state_change(sk);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 1d9be26b4edd..42b571cde177 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -176,6 +176,7 @@ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
176KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ 176KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
177KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include 177KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
178KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf 178KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
179KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0
179 180
180HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable 181HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
181 182
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 7b7c2fafbc68..be984aa29b75 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -99,7 +99,8 @@ lx-symbols command."""
99 attrs[n]['name'].string(): attrs[n]['address'] 99 attrs[n]['name'].string(): attrs[n]['address']
100 for n in range(int(sect_attrs['nsections']))} 100 for n in range(int(sect_attrs['nsections']))}
101 args = [] 101 args = []
102 for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]: 102 for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
103 ".text", ".text.hot", ".text.unlikely"]:
103 address = section_name_to_address.get(section_name) 104 address = section_name_to_address.get(section_name)
104 if address: 105 if address:
105 args.append(" -s {name} {addr}".format( 106 args.append(" -s {name} {addr}".format(
diff --git a/scripts/nsdeps b/scripts/nsdeps
index dda6fbac016e..04cea0921673 100644
--- a/scripts/nsdeps
+++ b/scripts/nsdeps
@@ -31,12 +31,12 @@ generate_deps() {
31 local mod_file=`echo $@ | sed -e 's/\.ko/\.mod/'` 31 local mod_file=`echo $@ | sed -e 's/\.ko/\.mod/'`
32 local ns_deps_file=`echo $@ | sed -e 's/\.ko/\.ns_deps/'` 32 local ns_deps_file=`echo $@ | sed -e 's/\.ko/\.ns_deps/'`
33 if [ ! -f "$ns_deps_file" ]; then return; fi 33 if [ ! -f "$ns_deps_file" ]; then return; fi
34 local mod_source_files=`cat $mod_file | sed -n 1p \ 34 local mod_source_files="`cat $mod_file | sed -n 1p \
35 | sed -e 's/\.o/\.c/g' \ 35 | sed -e 's/\.o/\.c/g' \
36 | sed "s|[^ ]* *|${srctree}/&|g"` 36 | sed "s|[^ ]* *|${srctree}/&|g"`"
37 for ns in `cat $ns_deps_file`; do 37 for ns in `cat $ns_deps_file`; do
38 echo "Adding namespace $ns to module $mod_name (if needed)." 38 echo "Adding namespace $ns to module $mod_name (if needed)."
39 generate_deps_for_ns $ns $mod_source_files 39 generate_deps_for_ns $ns "$mod_source_files"
40 # sort the imports 40 # sort the imports
41 for source_file in $mod_source_files; do 41 for source_file in $mod_source_files; do
42 sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp 42 sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 41905afada63..f34ce564d92c 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -528,7 +528,7 @@ static int snd_compress_check_input(struct snd_compr_params *params)
528{ 528{
529 /* first let's check the buffer parameter's */ 529 /* first let's check the buffer parameter's */
530 if (params->buffer.fragment_size == 0 || 530 if (params->buffer.fragment_size == 0 ||
531 params->buffer.fragments > INT_MAX / params->buffer.fragment_size || 531 params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
532 params->buffer.fragments == 0) 532 params->buffer.fragments == 0)
533 return -EINVAL; 533 return -EINVAL;
534 534
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 6b724d2ee2de..59ae21b0bb93 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -284,11 +284,11 @@ int snd_timer_open(struct snd_timer_instance **ti,
284 goto unlock; 284 goto unlock;
285 } 285 }
286 if (!list_empty(&timer->open_list_head)) { 286 if (!list_empty(&timer->open_list_head)) {
287 timeri = list_entry(timer->open_list_head.next, 287 struct snd_timer_instance *t =
288 list_entry(timer->open_list_head.next,
288 struct snd_timer_instance, open_list); 289 struct snd_timer_instance, open_list);
289 if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) { 290 if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
290 err = -EBUSY; 291 err = -EBUSY;
291 timeri = NULL;
292 goto unlock; 292 goto unlock;
293 } 293 }
294 } 294 }
diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
index 32b864bee25f..06d6a37cd853 100644
--- a/sound/firewire/bebob/bebob_focusrite.c
+++ b/sound/firewire/bebob/bebob_focusrite.c
@@ -27,6 +27,8 @@
27#define SAFFIRE_CLOCK_SOURCE_SPDIF 1 27#define SAFFIRE_CLOCK_SOURCE_SPDIF 1
28 28
29/* clock sources as returned from register of Saffire Pro 10 and 26 */ 29/* clock sources as returned from register of Saffire Pro 10 and 26 */
30#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK 0x000000ff
31#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK 0x0000ff00
30#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 32#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
31#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */ 33#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
32#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 34#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
@@ -189,6 +191,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
189 map = saffirepro_clk_maps[1]; 191 map = saffirepro_clk_maps[1];
190 192
191 /* In a case that this driver cannot handle the value of register. */ 193 /* In a case that this driver cannot handle the value of register. */
194 value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK;
192 if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) { 195 if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
193 err = -EIO; 196 err = -EIO;
194 goto end; 197 goto end;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 6d1fb7c11f17..b7a1abb3e231 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -7604,7 +7604,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
7604 /* Delay enabling the HP amp, to let the mic-detection 7604 /* Delay enabling the HP amp, to let the mic-detection
7605 * state machine run. 7605 * state machine run.
7606 */ 7606 */
7607 cancel_delayed_work_sync(&spec->unsol_hp_work); 7607 cancel_delayed_work(&spec->unsol_hp_work);
7608 schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); 7608 schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
7609 tbl = snd_hda_jack_tbl_get(codec, cb->nid); 7609 tbl = snd_hda_jack_tbl_get(codec, cb->nid);
7610 if (tbl) 7610 if (tbl)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b72553710ffb..3c720703ebb8 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2851,6 +2851,18 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
2851 return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); 2851 return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
2852} 2852}
2853 2853
2854static int patch_i915_tgl_hdmi(struct hda_codec *codec)
2855{
2856 /*
2857 * pin to port mapping table where the value indicate the pin number and
2858 * the index indicate the port number with 1 base.
2859 */
2860 static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
2861
2862 return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
2863}
2864
2865
2854/* Intel Baytrail and Braswell; with eld notifier */ 2866/* Intel Baytrail and Braswell; with eld notifier */
2855static int patch_i915_byt_hdmi(struct hda_codec *codec) 2867static int patch_i915_byt_hdmi(struct hda_codec *codec)
2856{ 2868{
@@ -4153,6 +4165,7 @@ HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
4153HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi), 4165HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
4154HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi), 4166HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
4155HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), 4167HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
4168HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
4156HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), 4169HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
4157HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), 4170HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
4158HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), 4171HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index 91242b6f8ea7..4570f662fb48 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -410,8 +410,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component)
410 return; 410 return;
411 } 411 }
412 412
413 snd_hdac_ext_bus_link_put(hdev->bus, hlink);
414 pm_runtime_disable(&hdev->dev); 413 pm_runtime_disable(&hdev->dev);
414 snd_hdac_ext_bus_link_put(hdev->bus, hlink);
415} 415}
416 416
417static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = { 417static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b5fd8f08726e..f8b5b960e597 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -274,7 +274,7 @@ struct hdmi_codec_priv {
274 uint8_t eld[MAX_ELD_BYTES]; 274 uint8_t eld[MAX_ELD_BYTES];
275 struct snd_pcm_chmap *chmap_info; 275 struct snd_pcm_chmap *chmap_info;
276 unsigned int chmap_idx; 276 unsigned int chmap_idx;
277 struct mutex lock; 277 unsigned long busy;
278 struct snd_soc_jack *jack; 278 struct snd_soc_jack *jack;
279 unsigned int jack_status; 279 unsigned int jack_status;
280}; 280};
@@ -390,8 +390,8 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
390 struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai); 390 struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
391 int ret = 0; 391 int ret = 0;
392 392
393 ret = mutex_trylock(&hcp->lock); 393 ret = test_and_set_bit(0, &hcp->busy);
394 if (!ret) { 394 if (ret) {
395 dev_err(dai->dev, "Only one simultaneous stream supported!\n"); 395 dev_err(dai->dev, "Only one simultaneous stream supported!\n");
396 return -EINVAL; 396 return -EINVAL;
397 } 397 }
@@ -419,7 +419,7 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
419 419
420err: 420err:
421 /* Release the exclusive lock on error */ 421 /* Release the exclusive lock on error */
422 mutex_unlock(&hcp->lock); 422 clear_bit(0, &hcp->busy);
423 return ret; 423 return ret;
424} 424}
425 425
@@ -431,7 +431,7 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
431 hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN; 431 hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
432 hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data); 432 hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
433 433
434 mutex_unlock(&hcp->lock); 434 clear_bit(0, &hcp->busy);
435} 435}
436 436
437static int hdmi_codec_hw_params(struct snd_pcm_substream *substream, 437static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
@@ -811,8 +811,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
811 return -ENOMEM; 811 return -ENOMEM;
812 812
813 hcp->hcd = *hcd; 813 hcp->hcd = *hcd;
814 mutex_init(&hcp->lock);
815
816 daidrv = devm_kcalloc(dev, dai_count, sizeof(*daidrv), GFP_KERNEL); 814 daidrv = devm_kcalloc(dev, dai_count, sizeof(*daidrv), GFP_KERNEL);
817 if (!daidrv) 815 if (!daidrv)
818 return -ENOMEM; 816 return -ENOMEM;
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index eb709d528259..cae1def8902d 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -960,11 +960,11 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
960 960
961 /* Power on device */ 961 /* Power on device */
962 if (gpio_is_valid(max98373->reset_gpio)) { 962 if (gpio_is_valid(max98373->reset_gpio)) {
963 ret = gpio_request(max98373->reset_gpio, "MAX98373_RESET"); 963 ret = devm_gpio_request(&i2c->dev, max98373->reset_gpio,
964 "MAX98373_RESET");
964 if (ret) { 965 if (ret) {
965 dev_err(&i2c->dev, "%s: Failed to request gpio %d\n", 966 dev_err(&i2c->dev, "%s: Failed to request gpio %d\n",
966 __func__, max98373->reset_gpio); 967 __func__, max98373->reset_gpio);
967 gpio_free(max98373->reset_gpio);
968 return -EINVAL; 968 return -EINVAL;
969 } 969 }
970 gpio_direction_output(max98373->reset_gpio, 0); 970 gpio_direction_output(max98373->reset_gpio, 0);
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 667e9f73aba3..e3d311fb510e 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -306,7 +306,7 @@ struct pm8916_wcd_analog_priv {
306}; 306};
307 307
308static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" }; 308static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
309static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" }; 309static const char *const rdac2_mux_text[] = { "RX1", "RX2" };
310static const char *const hph_text[] = { "ZERO", "Switch", }; 310static const char *const hph_text[] = { "ZERO", "Switch", };
311 311
312static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT( 312static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
@@ -321,7 +321,7 @@ static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT(
321 321
322/* RDAC2 MUX */ 322/* RDAC2 MUX */
323static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE( 323static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE(
324 CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text); 324 CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 2, rdac2_mux_text);
325 325
326static const struct snd_kcontrol_new spkr_switch[] = { 326static const struct snd_kcontrol_new spkr_switch[] = {
327 SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0) 327 SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0)
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 61226fefe1c4..2a4ffe945177 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -555,10 +555,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
555 return PTR_ERR(priv->clk); 555 return PTR_ERR(priv->clk);
556 } 556 }
557 557
558 err = clk_prepare_enable(priv->clk);
559 if (err < 0)
560 return err;
561
562 priv->extclk = devm_clk_get(&pdev->dev, "extclk"); 558 priv->extclk = devm_clk_get(&pdev->dev, "extclk");
563 if (IS_ERR(priv->extclk)) { 559 if (IS_ERR(priv->extclk)) {
564 if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) 560 if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
@@ -574,6 +570,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
574 } 570 }
575 } 571 }
576 572
573 err = clk_prepare_enable(priv->clk);
574 if (err < 0)
575 return err;
576
577 /* Some sensible defaults - this reflects the powerup values */ 577 /* Some sensible defaults - this reflects the powerup values */
578 priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24; 578 priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24;
579 priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24; 579 priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
@@ -587,7 +587,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
587 priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_128; 587 priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_128;
588 } 588 }
589 589
590 err = devm_snd_soc_register_component(&pdev->dev, &kirkwood_soc_component, 590 err = snd_soc_register_component(&pdev->dev, &kirkwood_soc_component,
591 soc_dai, 2); 591 soc_dai, 2);
592 if (err) { 592 if (err) {
593 dev_err(&pdev->dev, "snd_soc_register_component failed\n"); 593 dev_err(&pdev->dev, "snd_soc_register_component failed\n");
@@ -610,6 +610,7 @@ static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
610{ 610{
611 struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev); 611 struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
612 612
613 snd_soc_unregister_component(&pdev->dev);
613 if (!IS_ERR(priv->extclk)) 614 if (!IS_ERR(priv->extclk))
614 clk_disable_unprepare(priv->extclk); 615 clk_disable_unprepare(priv->extclk);
615 clk_disable_unprepare(priv->clk); 616 clk_disable_unprepare(priv->clk);
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index 0097df1fae66..e80b09143b63 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -66,10 +66,13 @@ static int rk_jack_event(struct notifier_block *nb, unsigned long event,
66 struct snd_soc_jack *jack = (struct snd_soc_jack *)data; 66 struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
67 struct snd_soc_dapm_context *dapm = &jack->card->dapm; 67 struct snd_soc_dapm_context *dapm = &jack->card->dapm;
68 68
69 if (event & SND_JACK_MICROPHONE) 69 if (event & SND_JACK_MICROPHONE) {
70 snd_soc_dapm_force_enable_pin(dapm, "MICBIAS"); 70 snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
71 else 71 snd_soc_dapm_force_enable_pin(dapm, "SHDN");
72 } else {
72 snd_soc_dapm_disable_pin(dapm, "MICBIAS"); 73 snd_soc_dapm_disable_pin(dapm, "MICBIAS");
74 snd_soc_dapm_disable_pin(dapm, "SHDN");
75 }
73 76
74 snd_soc_dapm_sync(dapm); 77 snd_soc_dapm_sync(dapm);
75 78
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 0324a5c39619..28f65eba2bb4 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -508,10 +508,10 @@ static struct rsnd_mod_ops rsnd_dmapp_ops = {
508#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8) 508#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
509#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc) 509#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
510 510
511#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400)) 511#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
512#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j) 512#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
513 513
514#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400)) 514#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
515#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j) 515#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
516 516
517#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i)) 517#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index 54cd431faab7..5529e8eeca46 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -152,8 +152,10 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
152 */ 152 */
153 dentry = file->f_path.dentry; 153 dentry = file->f_path.dentry;
154 if (strcmp(dentry->d_name.name, "ipc_flood_count") && 154 if (strcmp(dentry->d_name.name, "ipc_flood_count") &&
155 strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) 155 strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) {
156 return -EINVAL; 156 ret = -EINVAL;
157 goto out;
158 }
157 159
158 if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms")) 160 if (!strcmp(dentry->d_name.name, "ipc_flood_duration_ms"))
159 flood_duration_test = true; 161 flood_duration_test = true;
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index 2c7447188402..0c11fceb28a7 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -190,7 +190,7 @@ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
190 * Workaround to address a known issue with host DMA that results 190 * Workaround to address a known issue with host DMA that results
191 * in xruns during pause/release in capture scenarios. 191 * in xruns during pause/release in capture scenarios.
192 */ 192 */
193 if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1)) 193 if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
194 if (stream && direction == SNDRV_PCM_STREAM_CAPTURE) 194 if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
195 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 195 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
196 HDA_VS_INTEL_EM2, 196 HDA_VS_INTEL_EM2,
@@ -228,7 +228,7 @@ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
228 spin_unlock_irq(&bus->reg_lock); 228 spin_unlock_irq(&bus->reg_lock);
229 229
230 /* Enable DMI L1 entry if there are no capture streams open */ 230 /* Enable DMI L1 entry if there are no capture streams open */
231 if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1)) 231 if (!IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
232 if (!active_capture_stream) 232 if (!active_capture_stream)
233 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 233 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
234 HDA_VS_INTEL_EM2, 234 HDA_VS_INTEL_EM2,
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index b2f359d2f7e5..086eeeab8679 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -572,8 +572,10 @@ static int sof_set_get_large_ctrl_data(struct snd_sof_dev *sdev,
572 else 572 else
573 err = sof_get_ctrl_copy_params(cdata->type, partdata, cdata, 573 err = sof_get_ctrl_copy_params(cdata->type, partdata, cdata,
574 sparams); 574 sparams);
575 if (err < 0) 575 if (err < 0) {
576 kfree(partdata);
576 return err; 577 return err;
578 }
577 579
578 msg_bytes = sparams->msg_bytes; 580 msg_bytes = sparams->msg_bytes;
579 pl_size = sparams->pl_size; 581 pl_size = sparams->pl_size;
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 0aabb3190ddc..4452594c2e17 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -543,15 +543,16 @@ static int sof_control_load_bytes(struct snd_soc_component *scomp,
543 struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value; 543 struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
544 int max_size = sbe->max; 544 int max_size = sbe->max;
545 545
546 if (le32_to_cpu(control->priv.size) > max_size) { 546 /* init the get/put bytes data */
547 scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
548 le32_to_cpu(control->priv.size);
549
550 if (scontrol->size > max_size) {
547 dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n", 551 dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
548 control->priv.size, max_size); 552 scontrol->size, max_size);
549 return -EINVAL; 553 return -EINVAL;
550 } 554 }
551 555
552 /* init the get/put bytes data */
553 scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
554 le32_to_cpu(control->priv.size);
555 scontrol->control_data = kzalloc(max_size, GFP_KERNEL); 556 scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
556 cdata = scontrol->control_data; 557 cdata = scontrol->control_data;
557 if (!scontrol->control_data) 558 if (!scontrol->control_data)
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index a4060813bc74..48e629ac2d88 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1218,6 +1218,16 @@ static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
1218 return 0; 1218 return 0;
1219} 1219}
1220 1220
1221/* No support of mmap in S/PDIF mode */
1222static const struct snd_pcm_hardware stm32_sai_pcm_hw_spdif = {
1223 .info = SNDRV_PCM_INFO_INTERLEAVED,
1224 .buffer_bytes_max = 8 * PAGE_SIZE,
1225 .period_bytes_min = 1024,
1226 .period_bytes_max = PAGE_SIZE,
1227 .periods_min = 2,
1228 .periods_max = 8,
1229};
1230
1221static const struct snd_pcm_hardware stm32_sai_pcm_hw = { 1231static const struct snd_pcm_hardware stm32_sai_pcm_hw = {
1222 .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP, 1232 .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP,
1223 .buffer_bytes_max = 8 * PAGE_SIZE, 1233 .buffer_bytes_max = 8 * PAGE_SIZE,
@@ -1270,7 +1280,7 @@ static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config = {
1270}; 1280};
1271 1281
1272static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config_spdif = { 1282static const struct snd_dmaengine_pcm_config stm32_sai_pcm_config_spdif = {
1273 .pcm_hardware = &stm32_sai_pcm_hw, 1283 .pcm_hardware = &stm32_sai_pcm_hw_spdif,
1274 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, 1284 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
1275 .process = stm32_sai_pcm_process_spdif, 1285 .process = stm32_sai_pcm_process_spdif,
1276}; 1286};
diff --git a/sound/soc/ti/sdma-pcm.c b/sound/soc/ti/sdma-pcm.c
index a236350beb10..2b0bc234e1b6 100644
--- a/sound/soc/ti/sdma-pcm.c
+++ b/sound/soc/ti/sdma-pcm.c
@@ -62,7 +62,7 @@ int sdma_pcm_platform_register(struct device *dev,
62 config->chan_names[0] = txdmachan; 62 config->chan_names[0] = txdmachan;
63 config->chan_names[1] = rxdmachan; 63 config->chan_names[1] = rxdmachan;
64 64
65 return devm_snd_dmaengine_pcm_register(dev, config, 0); 65 return devm_snd_dmaengine_pcm_register(dev, config, flags);
66} 66}
67EXPORT_SYMBOL_GPL(sdma_pcm_platform_register); 67EXPORT_SYMBOL_GPL(sdma_pcm_platform_register);
68 68
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 6ecdd1067826..1178d302757e 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -3,7 +3,11 @@ include ../scripts/Makefile.include
3 3
4bindir ?= /usr/bin 4bindir ?= /usr/bin
5 5
6ifeq ($(srctree),) 6# This will work when gpio is built in tools env. where srctree
7# isn't set and when invoked from selftests build, where srctree
8# is set to ".". building_out_of_srctree is undefined for in srctree
9# builds
10ifndef building_out_of_srctree
7srctree := $(patsubst %/,%,$(dir $(CURDIR))) 11srctree := $(patsubst %/,%,$(dir $(CURDIR)))
8srctree := $(patsubst %/,%,$(dir $(srctree))) 12srctree := $(patsubst %/,%,$(dir $(srctree)))
9endif 13endif
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 63e4349a772a..15e458e150bd 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -15,7 +15,9 @@ void test_attr__init(void);
15void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 15void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
16 int fd, int group_fd, unsigned long flags); 16 int fd, int group_fd, unsigned long flags);
17 17
18#define HAVE_ATTR_TEST 18#ifndef HAVE_ATTR_TEST
19#define HAVE_ATTR_TEST 1
20#endif
19 21
20static inline int 22static inline int
21sys_perf_event_open(struct perf_event_attr *attr, 23sys_perf_event_open(struct perf_event_attr *attr,
@@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
27 fd = syscall(__NR_perf_event_open, attr, pid, cpu, 29 fd = syscall(__NR_perf_event_open, attr, pid, cpu,
28 group_fd, flags); 30 group_fd, flags);
29 31
30#ifdef HAVE_ATTR_TEST 32#if HAVE_ATTR_TEST
31 if (unlikely(test_attr__enabled)) 33 if (unlikely(test_attr__enabled))
32 test_attr__open(attr, pid, cpu, fd, group_fd, flags); 34 test_attr__open(attr, pid, cpu, fd, group_fd, flags);
33#endif 35#endif
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 679a1d75090c..7b6eaf5e0bda 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1625,7 +1625,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1625 return 0; 1625 return 0;
1626} 1626}
1627 1627
1628static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) 1628static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1629{ 1629{
1630 struct hists *hists = a->hists; 1630 struct hists *hists = a->hists;
1631 struct perf_hpp_fmt *fmt; 1631 struct perf_hpp_fmt *fmt;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 15961854ba67..741f040648b5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -539,10 +539,11 @@ static int perl_stop_script(void)
539 539
540static int perl_generate_script(struct tep_handle *pevent, const char *outfile) 540static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
541{ 541{
542 int i, not_first, count, nr_events;
543 struct tep_event **all_events;
542 struct tep_event *event = NULL; 544 struct tep_event *event = NULL;
543 struct tep_format_field *f; 545 struct tep_format_field *f;
544 char fname[PATH_MAX]; 546 char fname[PATH_MAX];
545 int not_first, count;
546 FILE *ofp; 547 FILE *ofp;
547 548
548 sprintf(fname, "%s.pl", outfile); 549 sprintf(fname, "%s.pl", outfile);
@@ -603,8 +604,11 @@ sub print_backtrace\n\
603}\n\n\ 604}\n\n\
604"); 605");
605 606
607 nr_events = tep_get_events_count(pevent);
608 all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
606 609
607 while ((event = trace_find_next_event(pevent, event))) { 610 for (i = 0; all_events && i < nr_events; i++) {
611 event = all_events[i];
608 fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); 612 fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
609 fprintf(ofp, "\tmy ("); 613 fprintf(ofp, "\tmy (");
610 614
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 5d341efc3237..93c03b39cd9c 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1687,10 +1687,11 @@ static int python_stop_script(void)
1687 1687
1688static int python_generate_script(struct tep_handle *pevent, const char *outfile) 1688static int python_generate_script(struct tep_handle *pevent, const char *outfile)
1689{ 1689{
1690 int i, not_first, count, nr_events;
1691 struct tep_event **all_events;
1690 struct tep_event *event = NULL; 1692 struct tep_event *event = NULL;
1691 struct tep_format_field *f; 1693 struct tep_format_field *f;
1692 char fname[PATH_MAX]; 1694 char fname[PATH_MAX];
1693 int not_first, count;
1694 FILE *ofp; 1695 FILE *ofp;
1695 1696
1696 sprintf(fname, "%s.py", outfile); 1697 sprintf(fname, "%s.py", outfile);
@@ -1735,7 +1736,11 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
1735 fprintf(ofp, "def trace_end():\n"); 1736 fprintf(ofp, "def trace_end():\n");
1736 fprintf(ofp, "\tprint(\"in trace_end\")\n\n"); 1737 fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
1737 1738
1738 while ((event = trace_find_next_event(pevent, event))) { 1739 nr_events = tep_get_events_count(pevent);
1740 all_events = tep_list_events(pevent, TEP_EVENT_SORT_ID);
1741
1742 for (i = 0; all_events && i < nr_events; i++) {
1743 event = all_events[i];
1739 fprintf(ofp, "def %s__%s(", event->system, event->name); 1744 fprintf(ofp, "def %s__%s(", event->system, event->name);
1740 fprintf(ofp, "event_name, "); 1745 fprintf(ofp, "event_name, ");
1741 fprintf(ofp, "context, "); 1746 fprintf(ofp, "context, ");
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 5d6bfc70b210..9634f0ae57be 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -173,37 +173,6 @@ int parse_event_file(struct tep_handle *pevent,
173 return tep_parse_event(pevent, buf, size, sys); 173 return tep_parse_event(pevent, buf, size, sys);
174} 174}
175 175
176struct tep_event *trace_find_next_event(struct tep_handle *pevent,
177 struct tep_event *event)
178{
179 static int idx;
180 int events_count;
181 struct tep_event *all_events;
182
183 all_events = tep_get_first_event(pevent);
184 events_count = tep_get_events_count(pevent);
185 if (!pevent || !all_events || events_count < 1)
186 return NULL;
187
188 if (!event) {
189 idx = 0;
190 return all_events;
191 }
192
193 if (idx < events_count && event == (all_events + idx)) {
194 idx++;
195 if (idx == events_count)
196 return NULL;
197 return (all_events + idx);
198 }
199
200 for (idx = 1; idx < events_count; idx++) {
201 if (event == (all_events + (idx - 1)))
202 return (all_events + idx);
203 }
204 return NULL;
205}
206
207struct flag { 176struct flag {
208 const char *name; 177 const char *name;
209 unsigned long long value; 178 unsigned long long value;
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 2e158387b3d7..72fdf2a3577c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -47,8 +47,6 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
47 47
48ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); 48ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
49 49
50struct tep_event *trace_find_next_event(struct tep_handle *pevent,
51 struct tep_event *event);
52unsigned long long read_size(struct tep_event *event, void *ptr, int size); 50unsigned long long read_size(struct tep_event *event, void *ptr, int size);
53unsigned long long eval_flag(const char *flag); 51unsigned long long eval_flag(const char *flag);
54 52
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index a320e3844b17..7c6e5b173f33 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -161,9 +161,14 @@ static struct sysctl_test tests[] = {
161 .descr = "ctx:file_pos sysctl:read read ok narrow", 161 .descr = "ctx:file_pos sysctl:read read ok narrow",
162 .insns = { 162 .insns = {
163 /* If (file_pos == X) */ 163 /* If (file_pos == X) */
164#if __BYTE_ORDER == __LITTLE_ENDIAN
164 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, 165 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
165 offsetof(struct bpf_sysctl, file_pos)), 166 offsetof(struct bpf_sysctl, file_pos)),
166 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), 167#else
168 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
169 offsetof(struct bpf_sysctl, file_pos) + 3),
170#endif
171 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2),
167 172
168 /* return ALLOW; */ 173 /* return ALLOW; */
169 BPF_MOV64_IMM(BPF_REG_0, 1), 174 BPF_MOV64_IMM(BPF_REG_0, 1),
@@ -176,6 +181,7 @@ static struct sysctl_test tests[] = {
176 .attach_type = BPF_CGROUP_SYSCTL, 181 .attach_type = BPF_CGROUP_SYSCTL,
177 .sysctl = "kernel/ostype", 182 .sysctl = "kernel/ostype",
178 .open_flags = O_RDONLY, 183 .open_flags = O_RDONLY,
184 .seek = 4,
179 .result = SUCCESS, 185 .result = SUCCESS,
180 }, 186 },
181 { 187 {
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 4c285b6e1db8..1c8f194d6556 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -898,6 +898,114 @@ TEST_F(tls, nonblocking)
898 } 898 }
899} 899}
900 900
901static void
902test_mutliproc(struct __test_metadata *_metadata, struct _test_data_tls *self,
903 bool sendpg, unsigned int n_readers, unsigned int n_writers)
904{
905 const unsigned int n_children = n_readers + n_writers;
906 const size_t data = 6 * 1000 * 1000;
907 const size_t file_sz = data / 100;
908 size_t read_bias, write_bias;
909 int i, fd, child_id;
910 char buf[file_sz];
911 pid_t pid;
912
913 /* Only allow multiples for simplicity */
914 ASSERT_EQ(!(n_readers % n_writers) || !(n_writers % n_readers), true);
915 read_bias = n_writers / n_readers ?: 1;
916 write_bias = n_readers / n_writers ?: 1;
917
918 /* prep a file to send */
919 fd = open("/tmp/", O_TMPFILE | O_RDWR, 0600);
920 ASSERT_GE(fd, 0);
921
922 memset(buf, 0xac, file_sz);
923 ASSERT_EQ(write(fd, buf, file_sz), file_sz);
924
925 /* spawn children */
926 for (child_id = 0; child_id < n_children; child_id++) {
927 pid = fork();
928 ASSERT_NE(pid, -1);
929 if (!pid)
930 break;
931 }
932
933 /* parent waits for all children */
934 if (pid) {
935 for (i = 0; i < n_children; i++) {
936 int status;
937
938 wait(&status);
939 EXPECT_EQ(status, 0);
940 }
941
942 return;
943 }
944
945 /* Split threads for reading and writing */
946 if (child_id < n_readers) {
947 size_t left = data * read_bias;
948 char rb[8001];
949
950 while (left) {
951 int res;
952
953 res = recv(self->cfd, rb,
954 left > sizeof(rb) ? sizeof(rb) : left, 0);
955
956 EXPECT_GE(res, 0);
957 left -= res;
958 }
959 } else {
960 size_t left = data * write_bias;
961
962 while (left) {
963 int res;
964
965 ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
966 if (sendpg)
967 res = sendfile(self->fd, fd, NULL,
968 left > file_sz ? file_sz : left);
969 else
970 res = send(self->fd, buf,
971 left > file_sz ? file_sz : left, 0);
972
973 EXPECT_GE(res, 0);
974 left -= res;
975 }
976 }
977}
978
979TEST_F(tls, mutliproc_even)
980{
981 test_mutliproc(_metadata, self, false, 6, 6);
982}
983
984TEST_F(tls, mutliproc_readers)
985{
986 test_mutliproc(_metadata, self, false, 4, 12);
987}
988
989TEST_F(tls, mutliproc_writers)
990{
991 test_mutliproc(_metadata, self, false, 10, 2);
992}
993
994TEST_F(tls, mutliproc_sendpage_even)
995{
996 test_mutliproc(_metadata, self, true, 6, 6);
997}
998
999TEST_F(tls, mutliproc_sendpage_readers)
1000{
1001 test_mutliproc(_metadata, self, true, 4, 12);
1002}
1003
1004TEST_F(tls, mutliproc_sendpage_writers)
1005{
1006 test_mutliproc(_metadata, self, true, 10, 2);
1007}
1008
901TEST_F(tls, control_msg) 1009TEST_F(tls, control_msg)
902{ 1010{
903 if (self->notls) 1011 if (self->notls)
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index cb3fc09645c4..485cf06ef013 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -71,7 +71,7 @@ int main(int argc, char **argv)
71 flags |= MAP_SHARED; 71 flags |= MAP_SHARED;
72 break; 72 break;
73 case 'H': 73 case 'H':
74 flags |= MAP_HUGETLB; 74 flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
75 break; 75 break;
76 default: 76 default:
77 return -1; 77 return -1;