aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-08 09:33:21 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-10-08 09:33:21 -0400
commitba1cb318dcbfc9754acda9656262aea97ebe77e6 (patch)
tree770fadb8b2c77da5d4dd9d33535b2d7adcdb02fd
parent8f523d6db7ed69f69720267af170c0719023f373 (diff)
parent0238df646e6224016a45505d2c111a24669ebe21 (diff)
Merge 4.19-rc7 into char-misc-next
We want the fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst5
-rw-r--r--Documentation/fb/uvesafb.txt5
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts2
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi14
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi3
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm64/kvm/guest.c55
-rw-r--r--arch/arm64/mm/hugetlbpage.c50
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c10
-rw-r--r--arch/powerpc/lib/code-patching.c20
-rw-r--r--arch/powerpc/mm/numa.c5
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/x86/entry/vdso/Makefile16
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c26
-rw-r--r--arch/x86/events/amd/uncore.c10
-rw-r--r--arch/x86/events/intel/uncore_snbep.c14
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/include/asm/uv/uv.h6
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/mmu.c24
-rw-r--r--arch/x86/kvm/vmx.c137
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--drivers/base/firmware_loader/main.c7
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/crypto/caam/caamalg.c8
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c32
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
-rw-r--r--drivers/crypto/mxs-dcp.c53
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/fpga/dfl-fme-region.c4
-rw-r--r--drivers/fpga/fpga-bridge.c2
-rw-r--r--drivers/fpga/of-fpga-region.c3
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c10
-rw-r--r--drivers/gpu/drm/drm_client.c35
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_lease.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h34
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c27
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c22
-rw-r--r--drivers/i2c/busses/i2c-scmi.c1
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-cache-target.c9
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c3
-rw-r--r--drivers/net/bonding/bond_main.c65
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c22
-rw-r--r--drivers/net/ethernet/amd/declance.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c20
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c17
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c45
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/sfc/efx.c26
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c26
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mcr20a.c8
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/phylink.c48
-rw-r--r--drivers/net/phy/sfp.c7
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c37
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/lan78xx.c17
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c3
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/hash.c51
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/pci/controller/pci-mvebu.c52
-rw-r--r--drivers/pci/pci.c27
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.c33
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/scsi/qedi/qedi_main.c1
-rw-r--r--drivers/soc/fsl/qbman/qman.c3
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/thunderbolt/icm.c49
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/sh-sci.c56
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/serial/option.c15
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/smb2ops.c2
-rw-r--r--fs/cifs/transport.c21
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/refcounttree.c16
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/file.c2
-rw-r--r--fs/overlayfs/inode.c2
-rw-r--r--fs/overlayfs/namei.c2
-rw-r--r--fs/overlayfs/overlayfs.h4
-rw-r--r--fs/overlayfs/util.c3
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/pstore/ram.c29
-rw-r--r--fs/read_write.c17
-rw-r--r--fs/xattr.c24
-rw-r--r--fs/xfs/libxfs/xfs_attr.c28
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c10
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c24
-rw-r--r--fs/xfs/libxfs/xfs_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c30
-rw-r--r--fs/xfs/scrub/alloc.c1
-rw-r--r--fs/xfs/scrub/inode.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c20
-rw-r--r--fs/xfs/xfs_buf_item.c119
-rw-r--r--fs/xfs/xfs_buf_item.h1
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log_recover.c10
-rw-r--r--fs/xfs/xfs_reflink.c137
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--fs/xfs/xfs_trans.c10
-rw-r--r--fs/xfs/xfs_trans_buf.c99
-rw-r--r--include/drm/drm_client.h5
-rw-r--r--include/linux/fpga/fpga-mgr.h20
-rw-r--r--include/linux/fs.h17
-rw-r--r--include/linux/hugetlb.h14
-rw-r--r--include/linux/mlx5/transobj.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/virtio_net.h18
-rw-r--r--include/media/v4l2-fh.h4
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/trace/events/migrate.h27
-rw-r--r--include/trace/events/rxrpc.h4
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h2
-rw-r--r--include/uapi/linux/memfd.h2
-rw-r--r--include/uapi/linux/mman.h2
-rw-r--r--include/uapi/linux/shm.h2
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/bpf/local_storage.c5
-rw-r--r--kernel/bpf/verifier.c10
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/locking/test-ww_mutex.c10
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c104
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--lib/xz/xz_crc32.c1
-rw-r--r--lib/xz/xz_private.h4
-rw-r--r--mm/gup_benchmark.c3
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/hugetlb.c90
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/migrate.c62
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/vmscan.c7
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/bluetooth/mgmt.c7
-rw-r--r--net/bluetooth/smp.c29
-rw-r--r--net/bluetooth/smp.h3
-rw-r--r--net/bpfilter/bpfilter_kern.c4
-rw-r--r--net/bridge/br_netfilter_hooks.c3
-rw-r--r--net/core/ethtool.c9
-rw-r--r--net/core/netpoll.c22
-rw-r--r--net/core/rtnetlink.c41
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_mode_transport.c4
-rw-r--r--net/ipv6/raw.c29
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/xfrm6_input.c1
-rw-r--r--net/ipv6/xfrm6_mode_transport.c4
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c9
-rw-r--r--net/mac80211/status.c11
-rw-r--r--net/mac80211/tdls.c8
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c28
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/openvswitch/conntrack.c10
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rxrpc/ar-internal.h36
-rw-r--r--net/rxrpc/call_accept.c45
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_object.c14
-rw-r--r--net/rxrpc/input.c90
-rw-r--r--net/rxrpc/local_object.c32
-rw-r--r--net/rxrpc/output.c54
-rw-r--r--net/rxrpc/peer_event.c46
-rw-r--r--net/rxrpc/peer_object.c52
-rw-r--r--net/rxrpc/protocol.h15
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/sch_api.c24
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/tipc/bearer.c12
-rw-r--r--net/tipc/link.c45
-rw-r--r--net/tipc/link.h3
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/wireless/nl80211.c20
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/scan.c58
-rw-r--r--net/wireless/wext-compat.c14
-rw-r--r--net/xfrm/xfrm_input.c1
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c15
-rw-r--r--sound/hda/hdac_i915.c4
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--tools/hv/hv_fcopy_daemon.c1
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat2
-rw-r--r--tools/testing/selftests/rseq/param_test.c19
-rw-r--r--tools/testing/selftests/x86/test_vdso.c172
314 files changed, 2759 insertions, 1789 deletions
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 4b3825da48d9..82b6dbbd31cd 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
184API for programming an FPGA 184API for programming an FPGA
185--------------------------- 185---------------------------
186 186
187FPGA Manager flags
188
189.. kernel-doc:: include/linux/fpga/fpga-mgr.h
190 :doc: FPGA Manager flags
191
187.. kernel-doc:: include/linux/fpga/fpga-mgr.h 192.. kernel-doc:: include/linux/fpga/fpga-mgr.h
188 :functions: fpga_image_info 193 :functions: fpga_image_info
189 194
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index f6362d88763b..aa924196c366 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
15arches. 15arches.
16 16
17v86d source code can be downloaded from the following website: 17v86d source code can be downloaded from the following website:
18 http://dev.gentoo.org/~spock/projects/uvesafb 18
19 https://github.com/mjanusz/v86d
19 20
20Please refer to the v86d documentation for detailed configuration and 21Please refer to the v86d documentation for detailed configuration and
21installation instructions. 22installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
177 178
178-- 179--
179 Michal Januszewski <spock@gentoo.org> 180 Michal Januszewski <spock@gentoo.org>
180 Last updated: 2009-03-30 181 Last updated: 2017-10-10
181 182
182 Documentation of the uvesafb options is loosely based on vesafb.txt. 183 Documentation of the uvesafb options is loosely based on vesafb.txt.
183 184
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8313a636dd53..960de8fe3f40 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
425 1 - Disabled by default, enabled when an ICMP black hole detected 425 1 - Disabled by default, enabled when an ICMP black hole detected
426 2 - Always enabled, use initial MSS of tcp_base_mss. 426 2 - Always enabled, use initial MSS of tcp_base_mss.
427 427
428tcp_probe_interval - INTEGER 428tcp_probe_interval - UNSIGNED INTEGER
429 Controls how often to start TCP Packetization-Layer Path MTU 429 Controls how often to start TCP Packetization-Layer Path MTU
430 Discovery reprobe. The default is reprobing every 10 minutes as 430 Discovery reprobe. The default is reprobing every 10 minutes as
431 per RFC4821. 431 per RFC4821.
diff --git a/MAINTAINERS b/MAINTAINERS
index 523c63bd12f1..0ad541124e7d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
324F: Documentation/ABI/testing/configfs-acpi 324F: Documentation/ABI/testing/configfs-acpi
325F: drivers/pci/*acpi* 325F: drivers/pci/*acpi*
326F: drivers/pci/*/*acpi* 326F: drivers/pci/*/*acpi*
327F: drivers/pci/*/*/*acpi*
328F: tools/power/acpi/ 327F: tools/power/acpi/
329 328
330ACPI APEI 329ACPI APEI
@@ -1251,7 +1250,7 @@ N: meson
1251 1250
1252ARM/Annapurna Labs ALPINE ARCHITECTURE 1251ARM/Annapurna Labs ALPINE ARCHITECTURE
1253M: Tsahee Zidenberg <tsahee@annapurnalabs.com> 1252M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
1254M: Antoine Tenart <antoine.tenart@free-electrons.com> 1253M: Antoine Tenart <antoine.tenart@bootlin.com>
1255L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1254L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1256S: Maintained 1255S: Maintained
1257F: arch/arm/mach-alpine/ 1256F: arch/arm/mach-alpine/
@@ -2956,7 +2955,6 @@ F: include/linux/bcm963xx_tag.h
2956 2955
2957BROADCOM BNX2 GIGABIT ETHERNET DRIVER 2956BROADCOM BNX2 GIGABIT ETHERNET DRIVER
2958M: Rasesh Mody <rasesh.mody@cavium.com> 2957M: Rasesh Mody <rasesh.mody@cavium.com>
2959M: Harish Patil <harish.patil@cavium.com>
2960M: Dept-GELinuxNICDev@cavium.com 2958M: Dept-GELinuxNICDev@cavium.com
2961L: netdev@vger.kernel.org 2959L: netdev@vger.kernel.org
2962S: Supported 2960S: Supported
@@ -2977,6 +2975,7 @@ F: drivers/scsi/bnx2i/
2977 2975
2978BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER 2976BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
2979M: Ariel Elior <ariel.elior@cavium.com> 2977M: Ariel Elior <ariel.elior@cavium.com>
2978M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
2980M: everest-linux-l2@cavium.com 2979M: everest-linux-l2@cavium.com
2981L: netdev@vger.kernel.org 2980L: netdev@vger.kernel.org
2982S: Supported 2981S: Supported
@@ -5470,7 +5469,8 @@ S: Odd Fixes
5470F: drivers/net/ethernet/agere/ 5469F: drivers/net/ethernet/agere/
5471 5470
5472ETHERNET BRIDGE 5471ETHERNET BRIDGE
5473M: Stephen Hemminger <stephen@networkplumber.org> 5472M: Roopa Prabhu <roopa@cumulusnetworks.com>
5473M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
5474L: bridge@lists.linux-foundation.org (moderated for non-subscribers) 5474L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
5475L: netdev@vger.kernel.org 5475L: netdev@vger.kernel.org
5476W: http://www.linuxfoundation.org/en/Net:Bridge 5476W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -8598,7 +8598,6 @@ F: include/linux/spinlock*.h
8598F: arch/*/include/asm/spinlock*.h 8598F: arch/*/include/asm/spinlock*.h
8599F: include/linux/rwlock*.h 8599F: include/linux/rwlock*.h
8600F: include/linux/mutex*.h 8600F: include/linux/mutex*.h
8601F: arch/*/include/asm/mutex*.h
8602F: include/linux/rwsem*.h 8601F: include/linux/rwsem*.h
8603F: arch/*/include/asm/rwsem.h 8602F: arch/*/include/asm/rwsem.h
8604F: include/linux/seqlock.h 8603F: include/linux/seqlock.h
@@ -11979,7 +11978,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
11979F: drivers/scsi/qla4xxx/ 11978F: drivers/scsi/qla4xxx/
11980 11979
11981QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 11980QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
11982M: Harish Patil <harish.patil@cavium.com> 11981M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
11983M: Manish Chopra <manish.chopra@cavium.com> 11982M: Manish Chopra <manish.chopra@cavium.com>
11984M: Dept-GELinuxNICDev@cavium.com 11983M: Dept-GELinuxNICDev@cavium.com
11985L: netdev@vger.kernel.org 11984L: netdev@vger.kernel.org
@@ -11987,7 +11986,6 @@ S: Supported
11987F: drivers/net/ethernet/qlogic/qlcnic/ 11986F: drivers/net/ethernet/qlogic/qlcnic/
11988 11987
11989QLOGIC QLGE 10Gb ETHERNET DRIVER 11988QLOGIC QLGE 10Gb ETHERNET DRIVER
11990M: Harish Patil <harish.patil@cavium.com>
11991M: Manish Chopra <manish.chopra@cavium.com> 11989M: Manish Chopra <manish.chopra@cavium.com>
11992M: Dept-GELinuxNICDev@cavium.com 11990M: Dept-GELinuxNICDev@cavium.com
11993L: netdev@vger.kernel.org 11991L: netdev@vger.kernel.org
@@ -15395,7 +15393,7 @@ S: Maintained
15395UVESAFB DRIVER 15393UVESAFB DRIVER
15396M: Michal Januszewski <spock@gentoo.org> 15394M: Michal Januszewski <spock@gentoo.org>
15397L: linux-fbdev@vger.kernel.org 15395L: linux-fbdev@vger.kernel.org
15398W: http://dev.gentoo.org/~spock/projects/uvesafb/ 15396W: https://github.com/mjanusz/v86d
15399S: Maintained 15397S: Maintained
15400F: Documentation/fb/uvesafb.txt 15398F: Documentation/fb/uvesafb.txt
15401F: drivers/video/fbdev/uvesafb.* 15399F: drivers/video/fbdev/uvesafb.*
diff --git a/Makefile b/Makefile
index 6c3da3e10f07..9b2df076885a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 19 3PATCHLEVEL = 19
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION = -rc7
6NAME = Merciless Moray 6NAME = Merciless Moray
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index b10dccd0958f..3b1baa8605a7 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -11,6 +11,7 @@
11#include "sama5d2-pinfunc.h" 11#include "sama5d2-pinfunc.h"
12#include <dt-bindings/mfd/atmel-flexcom.h> 12#include <dt-bindings/mfd/atmel-flexcom.h>
13#include <dt-bindings/gpio/gpio.h> 13#include <dt-bindings/gpio/gpio.h>
14#include <dt-bindings/pinctrl/at91.h>
14 15
15/ { 16/ {
16 model = "Atmel SAMA5D2 PTC EK"; 17 model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@
299 <PIN_PA30__NWE_NANDWE>, 300 <PIN_PA30__NWE_NANDWE>,
300 <PIN_PB2__NRD_NANDOE>; 301 <PIN_PB2__NRD_NANDOE>;
301 bias-pull-up; 302 bias-pull-up;
303 atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
302 }; 304 };
303 305
304 ale_cle_rdy_cs { 306 ale_cle_rdy_cs {
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index 43ee992ccdcf..6df61518776f 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -106,21 +106,23 @@
106 global_timer: timer@1e200 { 106 global_timer: timer@1e200 {
107 compatible = "arm,cortex-a9-global-timer"; 107 compatible = "arm,cortex-a9-global-timer";
108 reg = <0x1e200 0x20>; 108 reg = <0x1e200 0x20>;
109 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 109 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
110 clocks = <&axi_clk>; 110 clocks = <&axi_clk>;
111 }; 111 };
112 112
113 local_timer: local-timer@1e600 { 113 local_timer: local-timer@1e600 {
114 compatible = "arm,cortex-a9-twd-timer"; 114 compatible = "arm,cortex-a9-twd-timer";
115 reg = <0x1e600 0x20>; 115 reg = <0x1e600 0x20>;
116 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 116 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
117 IRQ_TYPE_EDGE_RISING)>;
117 clocks = <&axi_clk>; 118 clocks = <&axi_clk>;
118 }; 119 };
119 120
120 twd_watchdog: watchdog@1e620 { 121 twd_watchdog: watchdog@1e620 {
121 compatible = "arm,cortex-a9-twd-wdt"; 122 compatible = "arm,cortex-a9-twd-wdt";
122 reg = <0x1e620 0x20>; 123 reg = <0x1e620 0x20>;
123 interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>; 124 interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
125 IRQ_TYPE_LEVEL_HIGH)>;
124 }; 126 };
125 127
126 armpll: armpll { 128 armpll: armpll {
@@ -158,7 +160,7 @@
158 serial0: serial@600 { 160 serial0: serial@600 {
159 compatible = "brcm,bcm6345-uart"; 161 compatible = "brcm,bcm6345-uart";
160 reg = <0x600 0x1b>; 162 reg = <0x600 0x1b>;
161 interrupts = <GIC_SPI 32 0>; 163 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
162 clocks = <&periph_clk>; 164 clocks = <&periph_clk>;
163 clock-names = "periph"; 165 clock-names = "periph";
164 status = "disabled"; 166 status = "disabled";
@@ -167,7 +169,7 @@
167 serial1: serial@620 { 169 serial1: serial@620 {
168 compatible = "brcm,bcm6345-uart"; 170 compatible = "brcm,bcm6345-uart";
169 reg = <0x620 0x1b>; 171 reg = <0x620 0x1b>;
170 interrupts = <GIC_SPI 33 0>; 172 interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
171 clocks = <&periph_clk>; 173 clocks = <&periph_clk>;
172 clock-names = "periph"; 174 clock-names = "periph";
173 status = "disabled"; 175 status = "disabled";
@@ -180,7 +182,7 @@
180 reg = <0x2000 0x600>, <0xf0 0x10>; 182 reg = <0x2000 0x600>, <0xf0 0x10>;
181 reg-names = "nand", "nand-int-base"; 183 reg-names = "nand", "nand-int-base";
182 status = "disabled"; 184 status = "disabled";
183 interrupts = <GIC_SPI 38 0>; 185 interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
184 interrupt-names = "nand"; 186 interrupt-names = "nand";
185 }; 187 };
186 188
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 661be948ab74..185541a5b69f 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -1078,8 +1078,8 @@
1078 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; 1078 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
1079 clocks = <&rcc SPI6_K>; 1079 clocks = <&rcc SPI6_K>;
1080 resets = <&rcc SPI6_R>; 1080 resets = <&rcc SPI6_R>;
1081 dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>, 1081 dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
1082 <&mdma1 35 0x0 0x40002 0x0 0x0 0>; 1082 <&mdma1 35 0x0 0x40002 0x0 0x0>;
1083 dma-names = "rx", "tx"; 1083 dma-names = "rx", "tx";
1084 status = "disabled"; 1084 status = "disabled";
1085 }; 1085 };
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index ffd9f00f74a4..5f547c161baf 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -800,8 +800,7 @@
800 }; 800 };
801 801
802 hdmi_phy: hdmi-phy@1ef0000 { 802 hdmi_phy: hdmi-phy@1ef0000 {
803 compatible = "allwinner,sun8i-r40-hdmi-phy", 803 compatible = "allwinner,sun8i-r40-hdmi-phy";
804 "allwinner,sun50i-a64-hdmi-phy";
805 reg = <0x01ef0000 0x10000>; 804 reg = <0x01ef0000 0x10000>;
806 clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>, 805 clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
807 <&ccu 7>, <&ccu 16>; 806 <&ccu 7>, <&ccu 16>;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index fc91205ff46c..5bf9443cfbaa 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
473 473
474int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) 474int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
475{ 475{
476 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); 476 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
477 477
478 return ioremap_page_range(PCI_IO_VIRT_BASE + offset, 478 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
479 PCI_IO_VIRT_BASE + offset + SZ_64K, 479 PCI_IO_VIRT_BASE + offset + SZ_64K,
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index fbc74b5fa3ed..8edf93b4490f 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -413,3 +413,4 @@
413396 common pkey_free sys_pkey_free 413396 common pkey_free sys_pkey_free
414397 common statx sys_statx 414397 common statx sys_statx
415398 common rseq sys_rseq 415398 common rseq sys_rseq
416399 common io_pgetevents sys_io_pgetevents
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 07256b08226c..a6c9fbaeaefc 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
58} 58}
59 59
60static int validate_core_offset(const struct kvm_one_reg *reg)
61{
62 u64 off = core_reg_offset_from_id(reg->id);
63 int size;
64
65 switch (off) {
66 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
67 KVM_REG_ARM_CORE_REG(regs.regs[30]):
68 case KVM_REG_ARM_CORE_REG(regs.sp):
69 case KVM_REG_ARM_CORE_REG(regs.pc):
70 case KVM_REG_ARM_CORE_REG(regs.pstate):
71 case KVM_REG_ARM_CORE_REG(sp_el1):
72 case KVM_REG_ARM_CORE_REG(elr_el1):
73 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
74 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
75 size = sizeof(__u64);
76 break;
77
78 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
79 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
80 size = sizeof(__uint128_t);
81 break;
82
83 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
84 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
85 size = sizeof(__u32);
86 break;
87
88 default:
89 return -EINVAL;
90 }
91
92 if (KVM_REG_SIZE(reg->id) == size &&
93 IS_ALIGNED(off, size / sizeof(__u32)))
94 return 0;
95
96 return -EINVAL;
97}
98
60static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 99static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
61{ 100{
62 /* 101 /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
76 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 115 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
77 return -ENOENT; 116 return -ENOENT;
78 117
118 if (validate_core_offset(reg))
119 return -EINVAL;
120
79 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 121 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
80 return -EFAULT; 122 return -EFAULT;
81 123
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
98 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 140 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
99 return -ENOENT; 141 return -ENOENT;
100 142
143 if (validate_core_offset(reg))
144 return -EINVAL;
145
101 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 146 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
102 return -EINVAL; 147 return -EINVAL;
103 148
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
107 } 152 }
108 153
109 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 154 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
110 u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; 155 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
111 switch (mode) { 156 switch (mode) {
112 case PSR_AA32_MODE_USR: 157 case PSR_AA32_MODE_USR:
158 if (!system_supports_32bit_el0())
159 return -EINVAL;
160 break;
113 case PSR_AA32_MODE_FIQ: 161 case PSR_AA32_MODE_FIQ:
114 case PSR_AA32_MODE_IRQ: 162 case PSR_AA32_MODE_IRQ:
115 case PSR_AA32_MODE_SVC: 163 case PSR_AA32_MODE_SVC:
116 case PSR_AA32_MODE_ABT: 164 case PSR_AA32_MODE_ABT:
117 case PSR_AA32_MODE_UND: 165 case PSR_AA32_MODE_UND:
166 if (!vcpu_el1_is_32bit(vcpu))
167 return -EINVAL;
168 break;
118 case PSR_MODE_EL0t: 169 case PSR_MODE_EL0t:
119 case PSR_MODE_EL1t: 170 case PSR_MODE_EL1t:
120 case PSR_MODE_EL1h: 171 case PSR_MODE_EL1h:
172 if (vcpu_el1_is_32bit(vcpu))
173 return -EINVAL;
121 break; 174 break;
122 default: 175 default:
123 err = -EINVAL; 176 err = -EINVAL;
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 192b3ba07075..f58ea503ad01 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
117 117
118 /* 118 /*
119 * If HW_AFDBM is enabled, then the HW could turn on 119 * If HW_AFDBM is enabled, then the HW could turn on
120 * the dirty bit for any page in the set, so check 120 * the dirty or accessed bit for any page in the set,
121 * them all. All hugetlb entries are already young. 121 * so check them all.
122 */ 122 */
123 if (pte_dirty(pte)) 123 if (pte_dirty(pte))
124 orig_pte = pte_mkdirty(orig_pte); 124 orig_pte = pte_mkdirty(orig_pte);
125
126 if (pte_young(pte))
127 orig_pte = pte_mkyoung(orig_pte);
125 } 128 }
126 129
127 if (valid) { 130 if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
320 return get_clear_flush(mm, addr, ptep, pgsize, ncontig); 323 return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
321} 324}
322 325
326/*
327 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
328 * and write permission.
329 *
330 * For a contiguous huge pte range we need to check whether or not write
331 * permission has to change only on the first pte in the set. Then for
332 * all the contiguous ptes we need to check whether or not there is a
333 * discrepancy between dirty or young.
334 */
335static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
336{
337 int i;
338
339 if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
340 return 1;
341
342 for (i = 0; i < ncontig; i++) {
343 pte_t orig_pte = huge_ptep_get(ptep + i);
344
345 if (pte_dirty(pte) != pte_dirty(orig_pte))
346 return 1;
347
348 if (pte_young(pte) != pte_young(orig_pte))
349 return 1;
350 }
351
352 return 0;
353}
354
323int huge_ptep_set_access_flags(struct vm_area_struct *vma, 355int huge_ptep_set_access_flags(struct vm_area_struct *vma,
324 unsigned long addr, pte_t *ptep, 356 unsigned long addr, pte_t *ptep,
325 pte_t pte, int dirty) 357 pte_t pte, int dirty)
326{ 358{
327 int ncontig, i, changed = 0; 359 int ncontig, i;
328 size_t pgsize = 0; 360 size_t pgsize = 0;
329 unsigned long pfn = pte_pfn(pte), dpfn; 361 unsigned long pfn = pte_pfn(pte), dpfn;
330 pgprot_t hugeprot; 362 pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
336 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); 368 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
337 dpfn = pgsize >> PAGE_SHIFT; 369 dpfn = pgsize >> PAGE_SHIFT;
338 370
371 if (!__cont_access_flags_changed(ptep, pte, ncontig))
372 return 0;
373
339 orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); 374 orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
340 if (!pte_same(orig_pte, pte))
341 changed = 1;
342 375
343 /* Make sure we don't lose the dirty state */ 376 /* Make sure we don't lose the dirty or young state */
344 if (pte_dirty(orig_pte)) 377 if (pte_dirty(orig_pte))
345 pte = pte_mkdirty(pte); 378 pte = pte_mkdirty(pte);
346 379
380 if (pte_young(orig_pte))
381 pte = pte_mkyoung(pte);
382
347 hugeprot = pte_pgprot(pte); 383 hugeprot = pte_pgprot(pte);
348 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) 384 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
349 set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); 385 set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
350 386
351 return changed; 387 return 1;
352} 388}
353 389
354void huge_ptep_set_wrprotect(struct mm_struct *mm, 390void huge_ptep_set_wrprotect(struct mm_struct *mm,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 913c5725cdb2..bb6ac471a784 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
1306 1306
1307 pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); 1307 pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
1308 1308
1309 /*
1310 * Make sure the NIP points at userspace, not kernel text/data or
1311 * elsewhere.
1312 */
1313 if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
1314 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1315 current->comm, current->pid);
1316 return;
1317 }
1318
1309 pr_info("%s[%d]: code: ", current->comm, current->pid); 1319 pr_info("%s[%d]: code: ", current->comm, current->pid);
1310 1320
1311 for (i = 0; i < instructions_to_print; i++) { 1321 for (i = 0; i < instructions_to_print; i++) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 933c574e1cf7..998f8d089ac7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
646 */ 646 */
647 local_irq_disable(); 647 local_irq_disable();
648 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); 648 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
649 /*
650 * If the PTE disappeared temporarily due to a THP
651 * collapse, just return and let the guest try again.
652 */
653 if (!ptep) {
654 local_irq_enable();
655 if (page)
656 put_page(page);
657 return RESUME_GUEST;
658 }
649 pte = *ptep; 659 pte = *ptep;
650 local_irq_enable(); 660 local_irq_enable();
651 661
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 6ae2777c220d..5ffee298745f 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
28{ 28{
29 int err; 29 int err;
30 30
31 /* Make sure we aren't patching a freed init section */
32 if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
33 pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
34 return 0;
35 }
36
37 __put_user_size(instr, patch_addr, 4, err); 31 __put_user_size(instr, patch_addr, 4, err);
38 if (err) 32 if (err)
39 return err; 33 return err;
@@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
148 return 0; 142 return 0;
149} 143}
150 144
151int patch_instruction(unsigned int *addr, unsigned int instr) 145static int do_patch_instruction(unsigned int *addr, unsigned int instr)
152{ 146{
153 int err; 147 int err;
154 unsigned int *patch_addr = NULL; 148 unsigned int *patch_addr = NULL;
@@ -188,12 +182,22 @@ out:
188} 182}
189#else /* !CONFIG_STRICT_KERNEL_RWX */ 183#else /* !CONFIG_STRICT_KERNEL_RWX */
190 184
191int patch_instruction(unsigned int *addr, unsigned int instr) 185static int do_patch_instruction(unsigned int *addr, unsigned int instr)
192{ 186{
193 return raw_patch_instruction(addr, instr); 187 return raw_patch_instruction(addr, instr);
194} 188}
195 189
196#endif /* CONFIG_STRICT_KERNEL_RWX */ 190#endif /* CONFIG_STRICT_KERNEL_RWX */
191
192int patch_instruction(unsigned int *addr, unsigned int instr)
193{
194 /* Make sure we aren't patching a freed init section */
195 if (init_mem_is_free && init_section_contains(addr, 4)) {
196 pr_debug("Skipping init section patching addr: 0x%px\n", addr);
197 return 0;
198 }
199 return do_patch_instruction(addr, instr);
200}
197NOKPROBE_SYMBOL(patch_instruction); 201NOKPROBE_SYMBOL(patch_instruction);
198 202
199int patch_branch(unsigned int *addr, unsigned long target, int flags) 203int patch_branch(unsigned int *addr, unsigned long target, int flags)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 59d07bd5374a..055b211b7126 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
1217 * Need to ensure that NODE_DATA is initialized for a node from 1217 * Need to ensure that NODE_DATA is initialized for a node from
1218 * available memory (see memblock_alloc_try_nid). If unable to 1218 * available memory (see memblock_alloc_try_nid). If unable to
1219 * init the node, then default to nearest node that has memory 1219 * init the node, then default to nearest node that has memory
1220 * installed. 1220 * installed. Skip onlining a node if the subsystems are not
1221 * yet initialized.
1221 */ 1222 */
1222 if (try_online_node(new_nid)) 1223 if (!topology_inited || try_online_node(new_nid))
1223 new_nid = first_online_node; 1224 new_nid = first_online_node;
1224#else 1225#else
1225 /* 1226 /*
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index aee603123030..b2d26d9d8489 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
186 BUG_ON(mem_size == 0); 186 BUG_ON(mem_size == 0);
187 187
188 set_max_mapnr(PFN_DOWN(mem_size)); 188 set_max_mapnr(PFN_DOWN(mem_size));
189 max_low_pfn = pfn_base + PFN_DOWN(mem_size); 189 max_low_pfn = memblock_end_of_DRAM();
190 190
191#ifdef CONFIG_BLK_DEV_INITRD 191#ifdef CONFIG_BLK_DEV_INITRD
192 setup_initrd(); 192 setup_initrd();
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index fa3f439f0a92..141d415a8c80 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ 69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
70 -fno-omit-frame-pointer -foptimize-sibling-calls \ 70 -fno-omit-frame-pointer -foptimize-sibling-calls \
71 -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS) 71 -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
72
73ifdef CONFIG_RETPOLINE
74ifneq ($(RETPOLINE_VDSO_CFLAGS),)
75 CFL += $(RETPOLINE_VDSO_CFLAGS)
76endif
77endif
72 78
73$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) 79$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
74 80
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
138KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 144KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
139KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 145KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
140KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING 146KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
141KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS) 147
148ifdef CONFIG_RETPOLINE
149ifneq ($(RETPOLINE_VDSO_CFLAGS),)
150 KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
151endif
152endif
153
142$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 154$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
143 155
144$(obj)/vdso32.so.dbg: FORCE \ 156$(obj)/vdso32.so.dbg: FORCE \
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index f19856d95c60..e48ca3afa091 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -43,8 +43,9 @@ extern u8 hvclock_page
43notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) 43notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
44{ 44{
45 long ret; 45 long ret;
46 asm("syscall" : "=a" (ret) : 46 asm ("syscall" : "=a" (ret), "=m" (*ts) :
47 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); 47 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
48 "memory", "rcx", "r11");
48 return ret; 49 return ret;
49} 50}
50 51
@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
52{ 53{
53 long ret; 54 long ret;
54 55
55 asm("syscall" : "=a" (ret) : 56 asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
56 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); 57 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
58 "memory", "rcx", "r11");
57 return ret; 59 return ret;
58} 60}
59 61
@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
64{ 66{
65 long ret; 67 long ret;
66 68
67 asm( 69 asm (
68 "mov %%ebx, %%edx \n" 70 "mov %%ebx, %%edx \n"
69 "mov %2, %%ebx \n" 71 "mov %[clock], %%ebx \n"
70 "call __kernel_vsyscall \n" 72 "call __kernel_vsyscall \n"
71 "mov %%edx, %%ebx \n" 73 "mov %%edx, %%ebx \n"
72 : "=a" (ret) 74 : "=a" (ret), "=m" (*ts)
73 : "0" (__NR_clock_gettime), "g" (clock), "c" (ts) 75 : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
74 : "memory", "edx"); 76 : "memory", "edx");
75 return ret; 77 return ret;
76} 78}
@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
79{ 81{
80 long ret; 82 long ret;
81 83
82 asm( 84 asm (
83 "mov %%ebx, %%edx \n" 85 "mov %%ebx, %%edx \n"
84 "mov %2, %%ebx \n" 86 "mov %[tv], %%ebx \n"
85 "call __kernel_vsyscall \n" 87 "call __kernel_vsyscall \n"
86 "mov %%edx, %%ebx \n" 88 "mov %%edx, %%ebx \n"
87 : "=a" (ret) 89 : "=a" (ret), "=m" (*tv), "=m" (*tz)
88 : "0" (__NR_gettimeofday), "g" (tv), "c" (tz) 90 : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
89 : "memory", "edx"); 91 : "memory", "edx");
90 return ret; 92 return ret;
91} 93}
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 981ba5e8241b..8671de126eac 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -36,6 +36,7 @@
36 36
37static int num_counters_llc; 37static int num_counters_llc;
38static int num_counters_nb; 38static int num_counters_nb;
39static bool l3_mask;
39 40
40static HLIST_HEAD(uncore_unused_list); 41static HLIST_HEAD(uncore_unused_list);
41 42
@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
209 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; 210 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
210 hwc->idx = -1; 211 hwc->idx = -1;
211 212
213 /*
214 * SliceMask and ThreadMask need to be set for certain L3 events in
215 * Family 17h. For other events, the two fields do not affect the count.
216 */
217 if (l3_mask)
218 hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
219
212 if (event->cpu < 0) 220 if (event->cpu < 0)
213 return -EINVAL; 221 return -EINVAL;
214 222
@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
525 amd_llc_pmu.name = "amd_l3"; 533 amd_llc_pmu.name = "amd_l3";
526 format_attr_event_df.show = &event_show_df; 534 format_attr_event_df.show = &event_show_df;
527 format_attr_event_l3.show = &event_show_l3; 535 format_attr_event_l3.show = &event_show_l3;
536 l3_mask = true;
528 } else { 537 } else {
529 num_counters_nb = NUM_COUNTERS_NB; 538 num_counters_nb = NUM_COUNTERS_NB;
530 num_counters_llc = NUM_COUNTERS_L2; 539 num_counters_llc = NUM_COUNTERS_L2;
@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
532 amd_llc_pmu.name = "amd_l2"; 541 amd_llc_pmu.name = "amd_l2";
533 format_attr_event_df = format_attr_event; 542 format_attr_event_df = format_attr_event;
534 format_attr_event_l3 = format_attr_event; 543 format_attr_event_l3 = format_attr_event;
544 l3_mask = false;
535 } 545 }
536 546
537 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; 547 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 51d7c117e3c7..c07bee31abe8 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
3061 3061
3062void bdx_uncore_cpu_init(void) 3062void bdx_uncore_cpu_init(void)
3063{ 3063{
3064 int pkg = topology_phys_to_logical_pkg(0); 3064 int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3065 3065
3066 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 3066 if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3067 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 3067 bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
3931 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), 3931 .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3932 }, 3932 },
3933 { /* M3UPI0 Link 0 */ 3933 { /* M3UPI0 Link 0 */
3934 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), 3934 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3935 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0), 3935 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
3936 }, 3936 },
3937 { /* M3UPI0 Link 1 */ 3937 { /* M3UPI0 Link 1 */
3938 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), 3938 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
3939 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1), 3939 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
3940 }, 3940 },
3941 { /* M3UPI1 Link 2 */ 3941 { /* M3UPI1 Link 2 */
3942 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), 3942 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3943 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2), 3943 .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
3944 }, 3944 },
3945 { /* end: all zeroes */ } 3945 { /* end: all zeroes */ }
3946}; 3946};
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 12f54082f4c8..78241b736f2a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -46,6 +46,14 @@
46#define INTEL_ARCH_EVENT_MASK \ 46#define INTEL_ARCH_EVENT_MASK \
47 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) 47 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
48 48
49#define AMD64_L3_SLICE_SHIFT 48
50#define AMD64_L3_SLICE_MASK \
51 ((0xFULL) << AMD64_L3_SLICE_SHIFT)
52
53#define AMD64_L3_THREAD_SHIFT 56
54#define AMD64_L3_THREAD_MASK \
55 ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
56
49#define X86_RAW_EVENT_MASK \ 57#define X86_RAW_EVENT_MASK \
50 (ARCH_PERFMON_EVENTSEL_EVENT | \ 58 (ARCH_PERFMON_EVENTSEL_EVENT | \
51 ARCH_PERFMON_EVENTSEL_UMASK | \ 59 ARCH_PERFMON_EVENTSEL_UMASK | \
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index a80c0673798f..e60c45fd3679 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -10,8 +10,13 @@ struct cpumask;
10struct mm_struct; 10struct mm_struct;
11 11
12#ifdef CONFIG_X86_UV 12#ifdef CONFIG_X86_UV
13#include <linux/efi.h>
13 14
14extern enum uv_system_type get_uv_system_type(void); 15extern enum uv_system_type get_uv_system_type(void);
16static inline bool is_early_uv_system(void)
17{
18 return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
19}
15extern int is_uv_system(void); 20extern int is_uv_system(void);
16extern int is_uv_hubless(void); 21extern int is_uv_hubless(void);
17extern void uv_cpu_init(void); 22extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
23#else /* X86_UV */ 28#else /* X86_UV */
24 29
25static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; } 30static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
31static inline bool is_early_uv_system(void) { return 0; }
26static inline int is_uv_system(void) { return 0; } 32static inline int is_uv_system(void) { return 0; }
27static inline int is_uv_hubless(void) { return 0; } 33static inline int is_uv_hubless(void) { return 0; }
28static inline void uv_cpu_init(void) { } 34static inline void uv_cpu_init(void) { }
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 22ab408177b2..eeea634bee0a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
922static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 922static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
923{ 923{
924 /* AMD errata T13 (order #21922) */ 924 /* AMD errata T13 (order #21922) */
925 if ((c->x86 == 6)) { 925 if (c->x86 == 6) {
926 /* Duron Rev A0 */ 926 /* Duron Rev A0 */
927 if (c->x86_model == 3 && c->x86_stepping == 0) 927 if (c->x86_model == 3 && c->x86_stepping == 0)
928 size = 64; 928 size = 64;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6490f618e096..b52bd2b6cdb4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -26,6 +26,7 @@
26#include <asm/apic.h> 26#include <asm/apic.h>
27#include <asm/intel-family.h> 27#include <asm/intel-family.h>
28#include <asm/i8259.h> 28#include <asm/i8259.h>
29#include <asm/uv/uv.h>
29 30
30unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ 31unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
31EXPORT_SYMBOL(cpu_khz); 32EXPORT_SYMBOL(cpu_khz);
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
1433{ 1434{
1434 if (!boot_cpu_has(X86_FEATURE_TSC)) 1435 if (!boot_cpu_has(X86_FEATURE_TSC))
1435 return; 1436 return;
1437 /* Don't change UV TSC multi-chassis synchronization */
1438 if (is_early_uv_system())
1439 return;
1436 if (!determine_cpu_tsc_frequencies(true)) 1440 if (!determine_cpu_tsc_frequencies(true))
1437 return; 1441 return;
1438 loops_per_jiffy = get_loops_per_jiffy(); 1442 loops_per_jiffy = get_loops_per_jiffy();
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d7e9bce6ff61..51b953ad9d4e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
249 */ 249 */
250static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; 250static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
251 251
252/*
253 * In some cases, we need to preserve the GFN of a non-present or reserved
254 * SPTE when we usurp the upper five bits of the physical address space to
255 * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
256 * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
257 * left into the reserved bits, i.e. the GFN in the SPTE will be split into
258 * high and low parts. This mask covers the lower bits of the GFN.
259 */
260static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
261
262
252static void mmu_spte_set(u64 *sptep, u64 spte); 263static void mmu_spte_set(u64 *sptep, u64 spte);
253static union kvm_mmu_page_role 264static union kvm_mmu_page_role
254kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); 265kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
357 368
358static gfn_t get_mmio_spte_gfn(u64 spte) 369static gfn_t get_mmio_spte_gfn(u64 spte)
359{ 370{
360 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask | 371 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
361 shadow_nonpresent_or_rsvd_mask;
362 u64 gpa = spte & ~mask;
363 372
364 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) 373 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
365 & shadow_nonpresent_or_rsvd_mask; 374 & shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
423 432
424static void kvm_mmu_reset_all_pte_masks(void) 433static void kvm_mmu_reset_all_pte_masks(void)
425{ 434{
435 u8 low_phys_bits;
436
426 shadow_user_mask = 0; 437 shadow_user_mask = 0;
427 shadow_accessed_mask = 0; 438 shadow_accessed_mask = 0;
428 shadow_dirty_mask = 0; 439 shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
437 * appropriate mask to guard against L1TF attacks. Otherwise, it is 448 * appropriate mask to guard against L1TF attacks. Otherwise, it is
438 * assumed that the CPU is not vulnerable to L1TF. 449 * assumed that the CPU is not vulnerable to L1TF.
439 */ 450 */
451 low_phys_bits = boot_cpu_data.x86_phys_bits;
440 if (boot_cpu_data.x86_phys_bits < 452 if (boot_cpu_data.x86_phys_bits <
441 52 - shadow_nonpresent_or_rsvd_mask_len) 453 52 - shadow_nonpresent_or_rsvd_mask_len) {
442 shadow_nonpresent_or_rsvd_mask = 454 shadow_nonpresent_or_rsvd_mask =
443 rsvd_bits(boot_cpu_data.x86_phys_bits - 455 rsvd_bits(boot_cpu_data.x86_phys_bits -
444 shadow_nonpresent_or_rsvd_mask_len, 456 shadow_nonpresent_or_rsvd_mask_len,
445 boot_cpu_data.x86_phys_bits - 1); 457 boot_cpu_data.x86_phys_bits - 1);
458 low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
459 }
460 shadow_nonpresent_or_rsvd_lower_gfn_mask =
461 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
446} 462}
447 463
448static int is_cpuid_PSE36(void) 464static int is_cpuid_PSE36(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 06412ba46aa3..612fd17be635 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
121 121
122#define MSR_BITMAP_MODE_X2APIC 1 122#define MSR_BITMAP_MODE_X2APIC 1
123#define MSR_BITMAP_MODE_X2APIC_APICV 2 123#define MSR_BITMAP_MODE_X2APIC_APICV 2
124#define MSR_BITMAP_MODE_LM 4
125 124
126#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL 125#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
127 126
@@ -857,6 +856,7 @@ struct nested_vmx {
857 856
858 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ 857 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
859 u64 vmcs01_debugctl; 858 u64 vmcs01_debugctl;
859 u64 vmcs01_guest_bndcfgs;
860 860
861 u16 vpid02; 861 u16 vpid02;
862 u16 last_vpid; 862 u16 last_vpid;
@@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2899 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); 2899 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
2900 } 2900 }
2901 2901
2902 if (is_long_mode(&vmx->vcpu)) 2902 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2903 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2904#else 2903#else
2905 savesegment(fs, fs_sel); 2904 savesegment(fs, fs_sel);
2906 savesegment(gs, gs_sel); 2905 savesegment(gs, gs_sel);
@@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
2951 vmx->loaded_cpu_state = NULL; 2950 vmx->loaded_cpu_state = NULL;
2952 2951
2953#ifdef CONFIG_X86_64 2952#ifdef CONFIG_X86_64
2954 if (is_long_mode(&vmx->vcpu)) 2953 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2955 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2956#endif 2954#endif
2957 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { 2955 if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
2958 kvm_load_ldt(host_state->ldt_sel); 2956 kvm_load_ldt(host_state->ldt_sel);
@@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
2980#ifdef CONFIG_X86_64 2978#ifdef CONFIG_X86_64
2981static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) 2979static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
2982{ 2980{
2983 if (is_long_mode(&vmx->vcpu)) { 2981 preempt_disable();
2984 preempt_disable(); 2982 if (vmx->loaded_cpu_state)
2985 if (vmx->loaded_cpu_state) 2983 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2986 rdmsrl(MSR_KERNEL_GS_BASE, 2984 preempt_enable();
2987 vmx->msr_guest_kernel_gs_base);
2988 preempt_enable();
2989 }
2990 return vmx->msr_guest_kernel_gs_base; 2985 return vmx->msr_guest_kernel_gs_base;
2991} 2986}
2992 2987
2993static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) 2988static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
2994{ 2989{
2995 if (is_long_mode(&vmx->vcpu)) { 2990 preempt_disable();
2996 preempt_disable(); 2991 if (vmx->loaded_cpu_state)
2997 if (vmx->loaded_cpu_state) 2992 wrmsrl(MSR_KERNEL_GS_BASE, data);
2998 wrmsrl(MSR_KERNEL_GS_BASE, data); 2993 preempt_enable();
2999 preempt_enable();
3000 }
3001 vmx->msr_guest_kernel_gs_base = data; 2994 vmx->msr_guest_kernel_gs_base = data;
3002} 2995}
3003#endif 2996#endif
@@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3533 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 3526 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
3534 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 3527 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3535 3528
3536 if (kvm_mpx_supported())
3537 msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
3538
3539 /* We support free control of debug control saving. */ 3529 /* We support free control of debug control saving. */
3540 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 3530 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
3541 3531
@@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3552 VM_ENTRY_LOAD_IA32_PAT; 3542 VM_ENTRY_LOAD_IA32_PAT;
3553 msrs->entry_ctls_high |= 3543 msrs->entry_ctls_high |=
3554 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 3544 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
3555 if (kvm_mpx_supported())
3556 msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
3557 3545
3558 /* We support free control of debug control loading. */ 3546 /* We support free control of debug control loading. */
3559 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 3547 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3601 msrs->secondary_ctls_high); 3589 msrs->secondary_ctls_high);
3602 msrs->secondary_ctls_low = 0; 3590 msrs->secondary_ctls_low = 0;
3603 msrs->secondary_ctls_high &= 3591 msrs->secondary_ctls_high &=
3604 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3605 SECONDARY_EXEC_DESC | 3592 SECONDARY_EXEC_DESC |
3606 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3593 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3607 SECONDARY_EXEC_APIC_REGISTER_VIRT | 3594 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3608 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3595 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3609 SECONDARY_EXEC_WBINVD_EXITING; 3596 SECONDARY_EXEC_WBINVD_EXITING;
3597
3610 /* 3598 /*
3611 * We can emulate "VMCS shadowing," even if the hardware 3599 * We can emulate "VMCS shadowing," even if the hardware
3612 * doesn't support it. 3600 * doesn't support it.
@@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
3663 msrs->secondary_ctls_high |= 3651 msrs->secondary_ctls_high |=
3664 SECONDARY_EXEC_UNRESTRICTED_GUEST; 3652 SECONDARY_EXEC_UNRESTRICTED_GUEST;
3665 3653
3654 if (flexpriority_enabled)
3655 msrs->secondary_ctls_high |=
3656 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3657
3666 /* miscellaneous data */ 3658 /* miscellaneous data */
3667 rdmsr(MSR_IA32_VMX_MISC, 3659 rdmsr(MSR_IA32_VMX_MISC,
3668 msrs->misc_low, 3660 msrs->misc_low,
@@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
5073 if (!msr) 5065 if (!msr)
5074 return; 5066 return;
5075 5067
5076 /*
5077 * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
5078 * 64-bit mode as a 64-bit kernel may frequently access the
5079 * MSR. This means we need to manually save/restore the MSR
5080 * when switching between guest and host state, but only if
5081 * the guest is in 64-bit mode. Sync our cached value if the
5082 * guest is transitioning to 32-bit mode and the CPU contains
5083 * guest state, i.e. the cache is stale.
5084 */
5085#ifdef CONFIG_X86_64
5086 if (!(efer & EFER_LMA))
5087 (void)vmx_read_guest_kernel_gs_base(vmx);
5088#endif
5089 vcpu->arch.efer = efer; 5068 vcpu->arch.efer = efer;
5090 if (efer & EFER_LMA) { 5069 if (efer & EFER_LMA) {
5091 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); 5070 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
6078 mode |= MSR_BITMAP_MODE_X2APIC_APICV; 6057 mode |= MSR_BITMAP_MODE_X2APIC_APICV;
6079 } 6058 }
6080 6059
6081 if (is_long_mode(vcpu))
6082 mode |= MSR_BITMAP_MODE_LM;
6083
6084 return mode; 6060 return mode;
6085} 6061}
6086 6062
@@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
6121 if (!changed) 6097 if (!changed)
6122 return; 6098 return;
6123 6099
6124 vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
6125 !(mode & MSR_BITMAP_MODE_LM));
6126
6127 if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV)) 6100 if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
6128 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode); 6101 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
6129 6102
@@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
6189 nested_mark_vmcs12_pages_dirty(vcpu); 6162 nested_mark_vmcs12_pages_dirty(vcpu);
6190} 6163}
6191 6164
6165static u8 vmx_get_rvi(void)
6166{
6167 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
6168}
6169
6192static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) 6170static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
6193{ 6171{
6194 struct vcpu_vmx *vmx = to_vmx(vcpu); 6172 struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
6201 WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) 6179 WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
6202 return false; 6180 return false;
6203 6181
6204 rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff; 6182 rvi = vmx_get_rvi();
6205 6183
6206 vapic_page = kmap(vmx->nested.virtual_apic_page); 6184 vapic_page = kmap(vmx->nested.virtual_apic_page);
6207 vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); 6185 vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
@@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
10245 if (!lapic_in_kernel(vcpu)) 10223 if (!lapic_in_kernel(vcpu))
10246 return; 10224 return;
10247 10225
10226 if (!flexpriority_enabled &&
10227 !cpu_has_vmx_virtualize_x2apic_mode())
10228 return;
10229
10248 /* Postpone execution until vmcs01 is the current VMCS. */ 10230 /* Postpone execution until vmcs01 is the current VMCS. */
10249 if (is_guest_mode(vcpu)) { 10231 if (is_guest_mode(vcpu)) {
10250 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; 10232 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
10251 return; 10233 return;
10252 } 10234 }
10253 10235
10254 if (!cpu_need_tpr_shadow(vcpu))
10255 return;
10256
10257 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); 10236 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
10258 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 10237 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
10259 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); 10238 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
10375 return max_irr; 10354 return max_irr;
10376} 10355}
10377 10356
10357static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
10358{
10359 u8 rvi = vmx_get_rvi();
10360 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
10361
10362 return ((rvi & 0xf0) > (vppr & 0xf0));
10363}
10364
10378static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 10365static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
10379{ 10366{
10380 if (!kvm_vcpu_apicv_active(vcpu)) 10367 if (!kvm_vcpu_apicv_active(vcpu))
@@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
11264#undef cr4_fixed1_update 11251#undef cr4_fixed1_update
11265} 11252}
11266 11253
11254static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
11255{
11256 struct vcpu_vmx *vmx = to_vmx(vcpu);
11257
11258 if (kvm_mpx_supported()) {
11259 bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
11260
11261 if (mpx_enabled) {
11262 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
11263 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
11264 } else {
11265 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
11266 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
11267 }
11268 }
11269}
11270
11267static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 11271static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
11268{ 11272{
11269 struct vcpu_vmx *vmx = to_vmx(vcpu); 11273 struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
11280 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= 11284 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
11281 ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; 11285 ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
11282 11286
11283 if (nested_vmx_allowed(vcpu)) 11287 if (nested_vmx_allowed(vcpu)) {
11284 nested_vmx_cr_fixed1_bits_update(vcpu); 11288 nested_vmx_cr_fixed1_bits_update(vcpu);
11289 nested_vmx_entry_exit_ctls_update(vcpu);
11290 }
11285} 11291}
11286 11292
11287static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 11293static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12049 12055
12050 set_cr4_guest_host_mask(vmx); 12056 set_cr4_guest_host_mask(vmx);
12051 12057
12052 if (vmx_mpx_supported()) 12058 if (kvm_mpx_supported()) {
12053 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 12059 if (vmx->nested.nested_run_pending &&
12060 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
12061 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
12062 else
12063 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
12064 }
12054 12065
12055 if (enable_vpid) { 12066 if (enable_vpid) {
12056 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 12067 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
12595 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 12606 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
12596 bool from_vmentry = !!exit_qual; 12607 bool from_vmentry = !!exit_qual;
12597 u32 dummy_exit_qual; 12608 u32 dummy_exit_qual;
12598 u32 vmcs01_cpu_exec_ctrl; 12609 bool evaluate_pending_interrupts;
12599 int r = 0; 12610 int r = 0;
12600 12611
12601 vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 12612 evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
12613 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
12614 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
12615 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
12602 12616
12603 enter_guest_mode(vcpu); 12617 enter_guest_mode(vcpu);
12604 12618
12605 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 12619 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
12606 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 12620 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
12621 if (kvm_mpx_supported() &&
12622 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
12623 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
12607 12624
12608 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 12625 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
12609 vmx_segment_cache_clear(vmx); 12626 vmx_segment_cache_clear(vmx);
@@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
12643 * to L1 or delivered directly to L2 (e.g. In case L1 don't 12660 * to L1 or delivered directly to L2 (e.g. In case L1 don't
12644 * intercept EXTERNAL_INTERRUPT). 12661 * intercept EXTERNAL_INTERRUPT).
12645 * 12662 *
12646 * Usually this would be handled by L0 requesting a 12663 * Usually this would be handled by the processor noticing an
12647 * IRQ/NMI window by setting VMCS accordingly. However, 12664 * IRQ/NMI window request, or checking RVI during evaluation of
12648 * this setting was done on VMCS01 and now VMCS02 is active 12665 * pending virtual interrupts. However, this setting was done
12649 * instead. Thus, we force L0 to perform pending event 12666 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
12650 * evaluation by requesting a KVM_REQ_EVENT. 12667 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
12651 */ 12668 */
12652 if (vmcs01_cpu_exec_ctrl & 12669 if (unlikely(evaluate_pending_interrupts))
12653 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
12654 kvm_make_request(KVM_REQ_EVENT, vcpu); 12670 kvm_make_request(KVM_REQ_EVENT, vcpu);
12655 }
12656 12671
12657 /* 12672 /*
12658 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 12673 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index edbf00ec56b3..ca717737347e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
4698 */ 4698 */
4699 switch (msrs_to_save[i]) { 4699 switch (msrs_to_save[i]) {
4700 case MSR_IA32_BNDCFGS: 4700 case MSR_IA32_BNDCFGS:
4701 if (!kvm_x86_ops->mpx_supported()) 4701 if (!kvm_mpx_supported())
4702 continue; 4702 continue;
4703 break; 4703 break;
4704 case MSR_TSC_AUX: 4704 case MSR_TSC_AUX:
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index b3c0498ee433..8e9213b36e31 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
226 } 226 }
227 227
228 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); 228 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
229 if (tmp && !(opt_flags & FW_OPT_NOCACHE)) 229 if (tmp) {
230 list_add(&tmp->list, &fwc->head); 230 INIT_LIST_HEAD(&tmp->list);
231 if (!(opt_flags & FW_OPT_NOCACHE))
232 list_add(&tmp->list, &fwc->head);
233 }
231 spin_unlock(&fwc->lock); 234 spin_unlock(&fwc->lock);
232 235
233 *fw_priv = tmp; 236 *fw_priv = tmp;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3f68e2919dc5..a690fd400260 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1713 1713
1714 dpm_wait_for_subordinate(dev, async); 1714 dpm_wait_for_subordinate(dev, async);
1715 1715
1716 if (async_error) 1716 if (async_error) {
1717 dev->power.direct_complete = false;
1717 goto Complete; 1718 goto Complete;
1719 }
1718 1720
1719 /* 1721 /*
1720 * If a device configured to wake up the system from sleep states 1722 * If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1726 pm_wakeup_event(dev, 0); 1728 pm_wakeup_event(dev, 0);
1727 1729
1728 if (pm_wakeup_pending()) { 1730 if (pm_wakeup_pending()) {
1731 dev->power.direct_complete = false;
1729 async_error = -EBUSY; 1732 async_error = -EBUSY;
1730 goto Complete; 1733 goto Complete;
1731 } 1734 }
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d67667970f7e..ec40f991e6c6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1553 edesc->src_nents = src_nents; 1553 edesc->src_nents = src_nents;
1554 edesc->dst_nents = dst_nents; 1554 edesc->dst_nents = dst_nents;
1555 edesc->sec4_sg_bytes = sec4_sg_bytes; 1555 edesc->sec4_sg_bytes = sec4_sg_bytes;
1556 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1556 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1557 desc_bytes; 1557 desc_bytes);
1558 edesc->iv_dir = DMA_TO_DEVICE; 1558 edesc->iv_dir = DMA_TO_DEVICE;
1559 1559
1560 /* Make sure IV is located in a DMAable area */ 1560 /* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
1757 edesc->src_nents = src_nents; 1757 edesc->src_nents = src_nents;
1758 edesc->dst_nents = dst_nents; 1758 edesc->dst_nents = dst_nents;
1759 edesc->sec4_sg_bytes = sec4_sg_bytes; 1759 edesc->sec4_sg_bytes = sec4_sg_bytes;
1760 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1760 edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
1761 desc_bytes; 1761 desc_bytes);
1762 edesc->iv_dir = DMA_FROM_DEVICE; 1762 edesc->iv_dir = DMA_FROM_DEVICE;
1763 1763
1764 /* Make sure IV is located in a DMAable area */ 1764 /* Make sure IV is located in a DMAable area */
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 5c539af8ed60..010bbf607797 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
367 walk->to = (struct phys_sge_pairs *)(dsgl + 1); 367 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
368} 368}
369 369
370static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) 370static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
371 int pci_chan_id)
371{ 372{
372 struct cpl_rx_phys_dsgl *phys_cpl; 373 struct cpl_rx_phys_dsgl *phys_cpl;
373 374
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
385 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 386 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
386 phys_cpl->rss_hdr_int.qid = htons(qid); 387 phys_cpl->rss_hdr_int.qid = htons(qid);
387 phys_cpl->rss_hdr_int.hash_val = 0; 388 phys_cpl->rss_hdr_int.hash_val = 0;
389 phys_cpl->rss_hdr_int.channel = pci_chan_id;
388} 390}
389 391
390static inline void dsgl_walk_add_page(struct dsgl_walk *walk, 392static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
718 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, 720 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
719 !!lcb, ctx->tx_qidx); 721 !!lcb, ctx->tx_qidx);
720 722
721 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, 723 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
722 qid); 724 qid);
723 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - 725 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
724 ((sizeof(chcr_req->wreq)) >> 4))); 726 ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
1339 adap->vres.ncrypto_fc); 1341 adap->vres.ncrypto_fc);
1340 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; 1342 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1341 txq_perchan = ntxq / u_ctx->lldi.nchan; 1343 txq_perchan = ntxq / u_ctx->lldi.nchan;
1342 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1343 rxq_idx += id % rxq_perchan;
1344 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1345 txq_idx += id % txq_perchan;
1346 spin_lock(&ctx->dev->lock_chcr_dev); 1344 spin_lock(&ctx->dev->lock_chcr_dev);
1347 ctx->rx_qidx = rxq_idx; 1345 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1348 ctx->tx_qidx = txq_idx;
1349 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; 1346 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1350 ctx->dev->rx_channel_id = 0; 1347 ctx->dev->rx_channel_id = 0;
1351 spin_unlock(&ctx->dev->lock_chcr_dev); 1348 spin_unlock(&ctx->dev->lock_chcr_dev);
1349 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1350 rxq_idx += id % rxq_perchan;
1351 txq_idx = ctx->tx_chan_id * txq_perchan;
1352 txq_idx += id % txq_perchan;
1353 ctx->rx_qidx = rxq_idx;
1354 ctx->tx_qidx = txq_idx;
1355 /* Channel Id used by SGE to forward packet to Host.
1356 * Same value should be used in cpl_fw6_pld RSS_CH field
1357 * by FW. Driver programs PCI channel ID to be used in fw
1358 * at the time of queue allocation with value "pi->tx_chan"
1359 */
1360 ctx->pci_chan_id = txq_idx / txq_perchan;
1352 } 1361 }
1353out: 1362out:
1354 return err; 1363 return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
2503 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2512 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2504 struct dsgl_walk dsgl_walk; 2513 struct dsgl_walk dsgl_walk;
2505 unsigned int authsize = crypto_aead_authsize(tfm); 2514 unsigned int authsize = crypto_aead_authsize(tfm);
2515 struct chcr_context *ctx = a_ctx(tfm);
2506 u32 temp; 2516 u32 temp;
2507 2517
2508 dsgl_walk_init(&dsgl_walk, phys_cpl); 2518 dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
2512 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); 2522 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2513 temp = req->cryptlen + (reqctx->op ? -authsize : authsize); 2523 temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2514 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); 2524 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2515 dsgl_walk_end(&dsgl_walk, qid); 2525 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2516} 2526}
2517 2527
2518void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 2528void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2544 unsigned short qid) 2554 unsigned short qid)
2545{ 2555{
2546 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2556 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2557 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2558 struct chcr_context *ctx = c_ctx(tfm);
2547 struct dsgl_walk dsgl_walk; 2559 struct dsgl_walk dsgl_walk;
2548 2560
2549 dsgl_walk_init(&dsgl_walk, phys_cpl); 2561 dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2552 reqctx->dstsg = dsgl_walk.last_sg; 2564 reqctx->dstsg = dsgl_walk.last_sg;
2553 reqctx->dst_ofst = dsgl_walk.last_sg_len; 2565 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2554 2566
2555 dsgl_walk_end(&dsgl_walk, qid); 2567 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2556} 2568}
2557 2569
2558void chcr_add_hash_src_ent(struct ahash_request *req, 2570void chcr_add_hash_src_ent(struct ahash_request *req,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 54835cb109e5..0d2c70c344f3 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -255,6 +255,8 @@ struct chcr_context {
255 struct chcr_dev *dev; 255 struct chcr_dev *dev;
256 unsigned char tx_qidx; 256 unsigned char tx_qidx;
257 unsigned char rx_qidx; 257 unsigned char rx_qidx;
258 unsigned char tx_chan_id;
259 unsigned char pci_chan_id;
258 struct __crypto_ctx crypto_ctx[0]; 260 struct __crypto_ctx crypto_ctx[0];
259}; 261};
260 262
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a10c418d4e5c..56bd28174f52 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -63,7 +63,7 @@ struct dcp {
63 struct dcp_coherent_block *coh; 63 struct dcp_coherent_block *coh;
64 64
65 struct completion completion[DCP_MAX_CHANS]; 65 struct completion completion[DCP_MAX_CHANS];
66 struct mutex mutex[DCP_MAX_CHANS]; 66 spinlock_t lock[DCP_MAX_CHANS];
67 struct task_struct *thread[DCP_MAX_CHANS]; 67 struct task_struct *thread[DCP_MAX_CHANS];
68 struct crypto_queue queue[DCP_MAX_CHANS]; 68 struct crypto_queue queue[DCP_MAX_CHANS];
69}; 69};
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
349 349
350 int ret; 350 int ret;
351 351
352 do { 352 while (!kthread_should_stop()) {
353 __set_current_state(TASK_INTERRUPTIBLE); 353 set_current_state(TASK_INTERRUPTIBLE);
354 354
355 mutex_lock(&sdcp->mutex[chan]); 355 spin_lock(&sdcp->lock[chan]);
356 backlog = crypto_get_backlog(&sdcp->queue[chan]); 356 backlog = crypto_get_backlog(&sdcp->queue[chan]);
357 arq = crypto_dequeue_request(&sdcp->queue[chan]); 357 arq = crypto_dequeue_request(&sdcp->queue[chan]);
358 mutex_unlock(&sdcp->mutex[chan]); 358 spin_unlock(&sdcp->lock[chan]);
359
360 if (!backlog && !arq) {
361 schedule();
362 continue;
363 }
364
365 set_current_state(TASK_RUNNING);
359 366
360 if (backlog) 367 if (backlog)
361 backlog->complete(backlog, -EINPROGRESS); 368 backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
363 if (arq) { 370 if (arq) {
364 ret = mxs_dcp_aes_block_crypt(arq); 371 ret = mxs_dcp_aes_block_crypt(arq);
365 arq->complete(arq, ret); 372 arq->complete(arq, ret);
366 continue;
367 } 373 }
368 374 }
369 schedule();
370 } while (!kthread_should_stop());
371 375
372 return 0; 376 return 0;
373} 377}
@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
409 rctx->ecb = ecb; 413 rctx->ecb = ecb;
410 actx->chan = DCP_CHAN_CRYPTO; 414 actx->chan = DCP_CHAN_CRYPTO;
411 415
412 mutex_lock(&sdcp->mutex[actx->chan]); 416 spin_lock(&sdcp->lock[actx->chan]);
413 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 417 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
414 mutex_unlock(&sdcp->mutex[actx->chan]); 418 spin_unlock(&sdcp->lock[actx->chan]);
415 419
416 wake_up_process(sdcp->thread[actx->chan]); 420 wake_up_process(sdcp->thread[actx->chan]);
417 421
@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
640 struct ahash_request *req; 644 struct ahash_request *req;
641 int ret, fini; 645 int ret, fini;
642 646
643 do { 647 while (!kthread_should_stop()) {
644 __set_current_state(TASK_INTERRUPTIBLE); 648 set_current_state(TASK_INTERRUPTIBLE);
645 649
646 mutex_lock(&sdcp->mutex[chan]); 650 spin_lock(&sdcp->lock[chan]);
647 backlog = crypto_get_backlog(&sdcp->queue[chan]); 651 backlog = crypto_get_backlog(&sdcp->queue[chan]);
648 arq = crypto_dequeue_request(&sdcp->queue[chan]); 652 arq = crypto_dequeue_request(&sdcp->queue[chan]);
649 mutex_unlock(&sdcp->mutex[chan]); 653 spin_unlock(&sdcp->lock[chan]);
654
655 if (!backlog && !arq) {
656 schedule();
657 continue;
658 }
659
660 set_current_state(TASK_RUNNING);
650 661
651 if (backlog) 662 if (backlog)
652 backlog->complete(backlog, -EINPROGRESS); 663 backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
658 ret = dcp_sha_req_to_buf(arq); 669 ret = dcp_sha_req_to_buf(arq);
659 fini = rctx->fini; 670 fini = rctx->fini;
660 arq->complete(arq, ret); 671 arq->complete(arq, ret);
661 if (!fini)
662 continue;
663 } 672 }
664 673 }
665 schedule();
666 } while (!kthread_should_stop());
667 674
668 return 0; 675 return 0;
669} 676}
@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
721 rctx->init = 1; 728 rctx->init = 1;
722 } 729 }
723 730
724 mutex_lock(&sdcp->mutex[actx->chan]); 731 spin_lock(&sdcp->lock[actx->chan]);
725 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); 732 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
726 mutex_unlock(&sdcp->mutex[actx->chan]); 733 spin_unlock(&sdcp->lock[actx->chan]);
727 734
728 wake_up_process(sdcp->thread[actx->chan]); 735 wake_up_process(sdcp->thread[actx->chan]);
729 mutex_unlock(&actx->mutex); 736 mutex_unlock(&actx->mutex);
@@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
997 platform_set_drvdata(pdev, sdcp); 1004 platform_set_drvdata(pdev, sdcp);
998 1005
999 for (i = 0; i < DCP_MAX_CHANS; i++) { 1006 for (i = 0; i < DCP_MAX_CHANS; i++) {
1000 mutex_init(&sdcp->mutex[i]); 1007 spin_lock_init(&sdcp->lock[i]);
1001 init_completion(&sdcp->completion[i]); 1008 init_completion(&sdcp->completion[i]);
1002 crypto_init_queue(&sdcp->queue[i], 50); 1009 crypto_init_queue(&sdcp->queue[i], 50);
1003 } 1010 }
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index ba197f34c252..763c2166ee0e 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
123 struct adf_hw_device_data *hw_data; 123 struct adf_hw_device_data *hw_data;
124 char name[ADF_DEVICE_NAME_LENGTH]; 124 char name[ADF_DEVICE_NAME_LENGTH];
125 unsigned int i, bar_nr; 125 unsigned int i, bar_nr;
126 int ret, bar_mask; 126 unsigned long bar_mask;
127 int ret;
127 128
128 switch (ent->device) { 129 switch (ent->device) {
129 case ADF_C3XXX_PCI_DEVICE_ID: 130 case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
235 /* Find and map all the device's BARS */ 236 /* Find and map all the device's BARS */
236 i = 0; 237 i = 0;
237 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 238 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
238 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 239 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
239 ADF_PCI_MAX_BARS * 2) {
240 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 240 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
241 241
242 bar->base_addr = pci_resource_start(pdev, bar_nr); 242 bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 24ec908eb26c..613c7d5644ce 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
125 struct adf_hw_device_data *hw_data; 125 struct adf_hw_device_data *hw_data;
126 char name[ADF_DEVICE_NAME_LENGTH]; 126 char name[ADF_DEVICE_NAME_LENGTH];
127 unsigned int i, bar_nr; 127 unsigned int i, bar_nr;
128 int ret, bar_mask; 128 unsigned long bar_mask;
129 int ret;
129 130
130 switch (ent->device) { 131 switch (ent->device) {
131 case ADF_C3XXXIOV_PCI_DEVICE_ID: 132 case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
215 /* Find and map all the device's BARS */ 216 /* Find and map all the device's BARS */
216 i = 0; 217 i = 0;
217 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 218 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
218 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 219 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
219 ADF_PCI_MAX_BARS * 2) {
220 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 220 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
221 221
222 bar->base_addr = pci_resource_start(pdev, bar_nr); 222 bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 59a5a0df50b6..9cb832963357 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
123 struct adf_hw_device_data *hw_data; 123 struct adf_hw_device_data *hw_data;
124 char name[ADF_DEVICE_NAME_LENGTH]; 124 char name[ADF_DEVICE_NAME_LENGTH];
125 unsigned int i, bar_nr; 125 unsigned int i, bar_nr;
126 int ret, bar_mask; 126 unsigned long bar_mask;
127 int ret;
127 128
128 switch (ent->device) { 129 switch (ent->device) {
129 case ADF_C62X_PCI_DEVICE_ID: 130 case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
235 /* Find and map all the device's BARS */ 236 /* Find and map all the device's BARS */
236 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; 237 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
237 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 238 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
238 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 239 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
239 ADF_PCI_MAX_BARS * 2) {
240 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 240 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
241 241
242 bar->base_addr = pci_resource_start(pdev, bar_nr); 242 bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index b9f3e0e4fde9..278452b8ef81 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
125 struct adf_hw_device_data *hw_data; 125 struct adf_hw_device_data *hw_data;
126 char name[ADF_DEVICE_NAME_LENGTH]; 126 char name[ADF_DEVICE_NAME_LENGTH];
127 unsigned int i, bar_nr; 127 unsigned int i, bar_nr;
128 int ret, bar_mask; 128 unsigned long bar_mask;
129 int ret;
129 130
130 switch (ent->device) { 131 switch (ent->device) {
131 case ADF_C62XIOV_PCI_DEVICE_ID: 132 case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
215 /* Find and map all the device's BARS */ 216 /* Find and map all the device's BARS */
216 i = 0; 217 i = 0;
217 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 218 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
218 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 219 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
219 ADF_PCI_MAX_BARS * 2) {
220 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 220 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
221 221
222 bar->base_addr = pci_resource_start(pdev, bar_nr); 222 bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index be5c5a988ca5..3a9708ef4ce2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
123 struct adf_hw_device_data *hw_data; 123 struct adf_hw_device_data *hw_data;
124 char name[ADF_DEVICE_NAME_LENGTH]; 124 char name[ADF_DEVICE_NAME_LENGTH];
125 unsigned int i, bar_nr; 125 unsigned int i, bar_nr;
126 int ret, bar_mask; 126 unsigned long bar_mask;
127 int ret;
127 128
128 switch (ent->device) { 129 switch (ent->device) {
129 case ADF_DH895XCC_PCI_DEVICE_ID: 130 case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
237 /* Find and map all the device's BARS */ 238 /* Find and map all the device's BARS */
238 i = 0; 239 i = 0;
239 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 240 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
240 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 241 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
241 ADF_PCI_MAX_BARS * 2) {
242 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 242 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
243 243
244 bar->base_addr = pci_resource_start(pdev, bar_nr); 244 bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 26ab17bfc6da..3da0f951cb59 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
125 struct adf_hw_device_data *hw_data; 125 struct adf_hw_device_data *hw_data;
126 char name[ADF_DEVICE_NAME_LENGTH]; 126 char name[ADF_DEVICE_NAME_LENGTH];
127 unsigned int i, bar_nr; 127 unsigned int i, bar_nr;
128 int ret, bar_mask; 128 unsigned long bar_mask;
129 int ret;
129 130
130 switch (ent->device) { 131 switch (ent->device) {
131 case ADF_DH895XCCIOV_PCI_DEVICE_ID: 132 case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
215 /* Find and map all the device's BARS */ 216 /* Find and map all the device's BARS */
216 i = 0; 217 i = 0;
217 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 218 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
218 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 219 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
219 ADF_PCI_MAX_BARS * 2) {
220 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; 220 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
221 221
222 bar->base_addr = pci_resource_start(pdev, bar_nr); 222 bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 0b7e19c27c6d..51a5ac2293a7 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/fpga/fpga-mgr.h>
17#include <linux/fpga/fpga-region.h> 18#include <linux/fpga/fpga-region.h>
18 19
19#include "dfl-fme-pr.h" 20#include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ eprobe_mgr_put:
66static int fme_region_remove(struct platform_device *pdev) 67static int fme_region_remove(struct platform_device *pdev)
67{ 68{
68 struct fpga_region *region = dev_get_drvdata(&pdev->dev); 69 struct fpga_region *region = dev_get_drvdata(&pdev->dev);
70 struct fpga_manager *mgr = region->mgr;
69 71
70 fpga_region_unregister(region); 72 fpga_region_unregister(region);
71 fpga_mgr_put(region->mgr); 73 fpga_mgr_put(mgr);
72 74
73 return 0; 75 return 0;
74} 76}
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 24b8f98b73ec..c983dac97501 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
125 * 125 *
126 * Given a device, get an exclusive reference to a fpga bridge. 126 * Given a device, get an exclusive reference to a fpga bridge.
127 * 127 *
128 * Return: fpga manager struct or IS_ERR() condition containing error code. 128 * Return: fpga bridge struct or IS_ERR() condition containing error code.
129 */ 129 */
130struct fpga_bridge *fpga_bridge_get(struct device *dev, 130struct fpga_bridge *fpga_bridge_get(struct device *dev,
131 struct fpga_image_info *info) 131 struct fpga_image_info *info)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 35fabb8083fb..052a1342ab7e 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -437,9 +437,10 @@ eprobe_mgr_put:
437static int of_fpga_region_remove(struct platform_device *pdev) 437static int of_fpga_region_remove(struct platform_device *pdev)
438{ 438{
439 struct fpga_region *region = platform_get_drvdata(pdev); 439 struct fpga_region *region = platform_get_drvdata(pdev);
440 struct fpga_manager *mgr = region->mgr;
440 441
441 fpga_region_unregister(region); 442 fpga_region_unregister(region);
442 fpga_mgr_put(region->mgr); 443 fpga_mgr_put(mgr);
443 444
444 return 0; 445 return 0;
445} 446}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e8f8a1999393..a57300c1d649 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
571 if (ret) 571 if (ret)
572 goto out_free_descs; 572 goto out_free_descs;
573 lh->descs[i] = desc; 573 lh->descs[i] = desc;
574 count = i; 574 count = i + 1;
575 575
576 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) 576 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
577 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 577 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ec0d62a16e53..4f22e745df51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
358 struct queue *q, 358 struct queue *q,
359 struct qcm_process_device *qpd) 359 struct qcm_process_device *qpd)
360{ 360{
361 int retval;
362 struct mqd_manager *mqd_mgr; 361 struct mqd_manager *mqd_mgr;
362 int retval;
363 363
364 mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); 364 mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
365 if (!mqd_mgr) 365 if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
387 if (!q->properties.is_active) 387 if (!q->properties.is_active)
388 return 0; 388 return 0;
389 389
390 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, 390 if (WARN(q->process->mm != current->mm,
391 &q->properties, q->process->mm); 391 "should only run in user thread"))
392 retval = -EFAULT;
393 else
394 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
395 &q->properties, current->mm);
392 if (retval) 396 if (retval)
393 goto out_uninit_mqd; 397 goto out_uninit_mqd;
394 398
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
545 retval = map_queues_cpsch(dqm); 549 retval = map_queues_cpsch(dqm);
546 else if (q->properties.is_active && 550 else if (q->properties.is_active &&
547 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 551 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
548 q->properties.type == KFD_QUEUE_TYPE_SDMA)) 552 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
549 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, 553 if (WARN(q->process->mm != current->mm,
550 &q->properties, q->process->mm); 554 "should only run in user thread"))
555 retval = -EFAULT;
556 else
557 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
558 q->pipe, q->queue,
559 &q->properties, current->mm);
560 }
551 561
552out_unlock: 562out_unlock:
553 dqm_unlock(dqm); 563 dqm_unlock(dqm);
@@ -653,6 +663,7 @@ out:
653static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, 663static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
654 struct qcm_process_device *qpd) 664 struct qcm_process_device *qpd)
655{ 665{
666 struct mm_struct *mm = NULL;
656 struct queue *q; 667 struct queue *q;
657 struct mqd_manager *mqd_mgr; 668 struct mqd_manager *mqd_mgr;
658 struct kfd_process_device *pdd; 669 struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
686 kfd_flush_tlb(pdd); 697 kfd_flush_tlb(pdd);
687 } 698 }
688 699
700 /* Take a safe reference to the mm_struct, which may otherwise
701 * disappear even while the kfd_process is still referenced.
702 */
703 mm = get_task_mm(pdd->process->lead_thread);
704 if (!mm) {
705 retval = -EFAULT;
706 goto out;
707 }
708
689 /* activate all active queues on the qpd */ 709 /* activate all active queues on the qpd */
690 list_for_each_entry(q, &qpd->queues_list, list) { 710 list_for_each_entry(q, &qpd->queues_list, list) {
691 if (!q->properties.is_evicted) 711 if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
700 q->properties.is_evicted = false; 720 q->properties.is_evicted = false;
701 q->properties.is_active = true; 721 q->properties.is_active = true;
702 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, 722 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
703 q->queue, &q->properties, 723 q->queue, &q->properties, mm);
704 q->process->mm);
705 if (retval) 724 if (retval)
706 goto out; 725 goto out;
707 dqm->queue_count++; 726 dqm->queue_count++;
708 } 727 }
709 qpd->evicted = 0; 728 qpd->evicted = 0;
710out: 729out:
730 if (mm)
731 mmput(mm);
711 dqm_unlock(dqm); 732 dqm_unlock(dqm);
712 return retval; 733 return retval;
713} 734}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 96875950845a..6903fe6c894b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4633 } 4633 }
4634 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 4634 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4635 4635
4636 /* Signal HW programming completion */
4637 drm_atomic_helper_commit_hw_done(state);
4638 4636
4639 if (wait_for_vblank) 4637 if (wait_for_vblank)
4640 drm_atomic_helper_wait_for_flip_done(dev, state); 4638 drm_atomic_helper_wait_for_flip_done(dev, state);
4641 4639
4640 /*
4641 * FIXME:
4642 * Delay hw_done() until flip_done() is signaled. This is to block
4643 * another commit from freeing the CRTC state while we're still
4644 * waiting on flip_done.
4645 */
4646 drm_atomic_helper_commit_hw_done(state);
4647
4642 drm_atomic_helper_cleanup_planes(dev, state); 4648 drm_atomic_helper_cleanup_planes(dev, state);
4643 4649
4644 /* Finally, drop a runtime PM reference for each newly disabled CRTC, 4650 /* Finally, drop a runtime PM reference for each newly disabled CRTC,
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index baff50a4c234..df31c3815092 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
63EXPORT_SYMBOL(drm_client_close); 63EXPORT_SYMBOL(drm_client_close);
64 64
65/** 65/**
66 * drm_client_new - Create a DRM client 66 * drm_client_init - Initialise a DRM client
67 * @dev: DRM device 67 * @dev: DRM device
68 * @client: DRM client 68 * @client: DRM client
69 * @name: Client name 69 * @name: Client name
70 * @funcs: DRM client functions (optional) 70 * @funcs: DRM client functions (optional)
71 * 71 *
72 * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
72 * The caller needs to hold a reference on @dev before calling this function. 73 * The caller needs to hold a reference on @dev before calling this function.
73 * The client is freed when the &drm_device is unregistered. See drm_client_release(). 74 * The client is freed when the &drm_device is unregistered. See drm_client_release().
74 * 75 *
75 * Returns: 76 * Returns:
76 * Zero on success or negative error code on failure. 77 * Zero on success or negative error code on failure.
77 */ 78 */
78int drm_client_new(struct drm_device *dev, struct drm_client_dev *client, 79int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
79 const char *name, const struct drm_client_funcs *funcs) 80 const char *name, const struct drm_client_funcs *funcs)
80{ 81{
81 int ret; 82 int ret;
82 83
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
95 if (ret) 96 if (ret)
96 goto err_put_module; 97 goto err_put_module;
97 98
98 mutex_lock(&dev->clientlist_mutex);
99 list_add(&client->list, &dev->clientlist);
100 mutex_unlock(&dev->clientlist_mutex);
101
102 drm_dev_get(dev); 99 drm_dev_get(dev);
103 100
104 return 0; 101 return 0;
@@ -109,13 +106,33 @@ err_put_module:
109 106
110 return ret; 107 return ret;
111} 108}
112EXPORT_SYMBOL(drm_client_new); 109EXPORT_SYMBOL(drm_client_init);
110
111/**
112 * drm_client_add - Add client to the device list
113 * @client: DRM client
114 *
115 * Add the client to the &drm_device client list to activate its callbacks.
116 * @client must be initialized by a call to drm_client_init(). After
117 * drm_client_add() it is no longer permissible to call drm_client_release()
118 * directly (outside the unregister callback), instead cleanup will happen
119 * automatically on driver unload.
120 */
121void drm_client_add(struct drm_client_dev *client)
122{
123 struct drm_device *dev = client->dev;
124
125 mutex_lock(&dev->clientlist_mutex);
126 list_add(&client->list, &dev->clientlist);
127 mutex_unlock(&dev->clientlist_mutex);
128}
129EXPORT_SYMBOL(drm_client_add);
113 130
114/** 131/**
115 * drm_client_release - Release DRM client resources 132 * drm_client_release - Release DRM client resources
116 * @client: DRM client 133 * @client: DRM client
117 * 134 *
118 * Releases resources by closing the &drm_file that was opened by drm_client_new(). 135 * Releases resources by closing the &drm_file that was opened by drm_client_init().
119 * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set. 136 * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
120 * 137 *
121 * This function should only be called from the unregister callback. An exception 138 * This function should only be called from the unregister callback. An exception
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9da36a6271d3..9ac1f2e0f064 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
160 160
161 fb_helper = &fbdev_cma->fb_helper; 161 fb_helper = &fbdev_cma->fb_helper;
162 162
163 ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL); 163 ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
164 if (ret) 164 if (ret)
165 goto err_free; 165 goto err_free;
166 166
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
169 if (ret) 169 if (ret)
170 goto err_client_put; 170 goto err_client_put;
171 171
172 drm_client_add(&fb_helper->client);
173
172 return fbdev_cma; 174 return fbdev_cma;
173 175
174err_client_put: 176err_client_put:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 16ec93b75dbf..515a7aec57ac 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
3218 if (!fb_helper) 3218 if (!fb_helper)
3219 return -ENOMEM; 3219 return -ENOMEM;
3220 3220
3221 ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 3221 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
3222 if (ret) { 3222 if (ret) {
3223 kfree(fb_helper); 3223 kfree(fb_helper);
3224 return ret; 3224 return ret;
3225 } 3225 }
3226 3226
3227 drm_client_add(&fb_helper->client);
3228
3227 fb_helper->preferred_bpp = preferred_bpp; 3229 fb_helper->preferred_bpp = preferred_bpp;
3228 3230
3229 drm_fbdev_client_hotplug(&fb_helper->client); 3231 drm_fbdev_client_hotplug(&fb_helper->client);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index b54fb78a283c..b82da96ded5c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
566 lessee_priv->is_master = 1; 566 lessee_priv->is_master = 1;
567 lessee_priv->authenticated = 1; 567 lessee_priv->authenticated = 1;
568 568
569 /* Hook up the fd */
570 fd_install(fd, lessee_file);
571
572 /* Pass fd back to userspace */ 569 /* Pass fd back to userspace */
573 DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id); 570 DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
574 cl->fd = fd; 571 cl->fd = fd;
575 cl->lessee_id = lessee->lessee_id; 572 cl->lessee_id = lessee->lessee_id;
576 573
574 /* Hook up the fd */
575 fd_install(fd, lessee_file);
576
577 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); 577 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
578 return 0; 578 return 0;
579 579
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 87f6b5672e11..797d9ee5f15a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
55static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, 55static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
56 unsigned long start, unsigned long size) 56 unsigned long start, unsigned long size)
57{ 57{
58 struct iommu_domain *domain; 58 priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
59 int ret;
60
61 domain = iommu_domain_alloc(priv->dma_dev->bus);
62 if (!domain)
63 return -ENOMEM;
64
65 ret = iommu_get_dma_cookie(domain);
66 if (ret)
67 goto free_domain;
68
69 ret = iommu_dma_init_domain(domain, start, size, NULL);
70 if (ret)
71 goto put_cookie;
72
73 priv->mapping = domain;
74 return 0; 59 return 0;
75
76put_cookie:
77 iommu_put_dma_cookie(domain);
78free_domain:
79 iommu_domain_free(domain);
80 return ret;
81} 60}
82 61
83static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv) 62static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
84{ 63{
85 struct iommu_domain *domain = priv->mapping;
86
87 iommu_put_dma_cookie(domain);
88 iommu_domain_free(domain);
89 priv->mapping = NULL; 64 priv->mapping = NULL;
90} 65}
91 66
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
94{ 69{
95 struct iommu_domain *domain = priv->mapping; 70 struct iommu_domain *domain = priv->mapping;
96 71
97 return iommu_attach_device(domain, dev); 72 if (dev != priv->dma_dev)
73 return iommu_attach_device(domain, dev);
74 return 0;
98} 75}
99 76
100static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, 77static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
102{ 79{
103 struct iommu_domain *domain = priv->mapping; 80 struct iommu_domain *domain = priv->mapping;
104 81
105 iommu_detach_device(domain, dev); 82 if (dev != priv->dma_dev)
83 iommu_detach_device(domain, dev);
106} 84}
107#else 85#else
108#error Unsupported architecture and IOMMU/DMA-mapping glue code 86#error Unsupported architecture and IOMMU/DMA-mapping glue code
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5d2f0d548469..250b5e02a314 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
191 break; 191 break;
192 } 192 }
193 /* TDA9950 executes all retries for us */ 193 /* TDA9950 executes all retries for us */
194 tx_status |= CEC_TX_STATUS_MAX_RETRIES; 194 if (tx_status != CEC_TX_STATUS_OK)
195 tx_status |= CEC_TX_STATUS_MAX_RETRIES;
195 cec_transmit_done(priv->adap, tx_status, arb_lost_cnt, 196 cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
196 nack_cnt, 0, err_cnt); 197 nack_cnt, 0, err_cnt);
197 break; 198 break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
310 /* Wait up to .5s for it to signal non-busy */ 311 /* Wait up to .5s for it to signal non-busy */
311 do { 312 do {
312 csr = tda9950_read(client, REG_CSR); 313 csr = tda9950_read(client, REG_CSR);
313 if (!(csr & CSR_BUSY) || --timeout) 314 if (!(csr & CSR_BUSY) || !--timeout)
314 break; 315 break;
315 msleep(10); 316 msleep(10);
316 } while (1); 317 } while (1);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f7f2aa71d8d9..a262a64f5625 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
232 return true; 232 return true;
233} 233}
234 234
235static void *compress_next_page(struct drm_i915_error_object *dst)
236{
237 unsigned long page;
238
239 if (dst->page_count >= dst->num_pages)
240 return ERR_PTR(-ENOSPC);
241
242 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
243 if (!page)
244 return ERR_PTR(-ENOMEM);
245
246 return dst->pages[dst->page_count++] = (void *)page;
247}
248
235static int compress_page(struct compress *c, 249static int compress_page(struct compress *c,
236 void *src, 250 void *src,
237 struct drm_i915_error_object *dst) 251 struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
245 259
246 do { 260 do {
247 if (zstream->avail_out == 0) { 261 if (zstream->avail_out == 0) {
248 unsigned long page; 262 zstream->next_out = compress_next_page(dst);
249 263 if (IS_ERR(zstream->next_out))
250 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); 264 return PTR_ERR(zstream->next_out);
251 if (!page)
252 return -ENOMEM;
253 265
254 dst->pages[dst->page_count++] = (void *)page;
255
256 zstream->next_out = (void *)page;
257 zstream->avail_out = PAGE_SIZE; 266 zstream->avail_out = PAGE_SIZE;
258 } 267 }
259 268
260 if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK) 269 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
261 return -EIO; 270 return -EIO;
262 } while (zstream->avail_in); 271 } while (zstream->avail_in);
263 272
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
268 return 0; 277 return 0;
269} 278}
270 279
271static void compress_fini(struct compress *c, 280static int compress_flush(struct compress *c,
272 struct drm_i915_error_object *dst) 281 struct drm_i915_error_object *dst)
273{ 282{
274 struct z_stream_s *zstream = &c->zstream; 283 struct z_stream_s *zstream = &c->zstream;
275 284
276 if (dst) { 285 do {
277 zlib_deflate(zstream, Z_FINISH); 286 switch (zlib_deflate(zstream, Z_FINISH)) {
278 dst->unused = zstream->avail_out; 287 case Z_OK: /* more space requested */
279 } 288 zstream->next_out = compress_next_page(dst);
289 if (IS_ERR(zstream->next_out))
290 return PTR_ERR(zstream->next_out);
291
292 zstream->avail_out = PAGE_SIZE;
293 break;
294
295 case Z_STREAM_END:
296 goto end;
297
298 default: /* any error */
299 return -EIO;
300 }
301 } while (1);
302
303end:
304 memset(zstream->next_out, 0, zstream->avail_out);
305 dst->unused = zstream->avail_out;
306 return 0;
307}
308
309static void compress_fini(struct compress *c,
310 struct drm_i915_error_object *dst)
311{
312 struct z_stream_s *zstream = &c->zstream;
280 313
281 zlib_deflateEnd(zstream); 314 zlib_deflateEnd(zstream);
282 kfree(zstream->workspace); 315 kfree(zstream->workspace);
283
284 if (c->tmp) 316 if (c->tmp)
285 free_page((unsigned long)c->tmp); 317 free_page((unsigned long)c->tmp);
286} 318}
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
319 return 0; 351 return 0;
320} 352}
321 353
354static int compress_flush(struct compress *c,
355 struct drm_i915_error_object *dst)
356{
357 return 0;
358}
359
322static void compress_fini(struct compress *c, 360static void compress_fini(struct compress *c,
323 struct drm_i915_error_object *dst) 361 struct drm_i915_error_object *dst)
324{ 362{
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
917 unsigned long num_pages; 955 unsigned long num_pages;
918 struct sgt_iter iter; 956 struct sgt_iter iter;
919 dma_addr_t dma; 957 dma_addr_t dma;
958 int ret;
920 959
921 if (!vma) 960 if (!vma)
922 return NULL; 961 return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
930 969
931 dst->gtt_offset = vma->node.start; 970 dst->gtt_offset = vma->node.start;
932 dst->gtt_size = vma->node.size; 971 dst->gtt_size = vma->node.size;
972 dst->num_pages = num_pages;
933 dst->page_count = 0; 973 dst->page_count = 0;
934 dst->unused = 0; 974 dst->unused = 0;
935 975
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
938 return NULL; 978 return NULL;
939 } 979 }
940 980
981 ret = -EINVAL;
941 for_each_sgt_dma(dma, iter, vma->pages) { 982 for_each_sgt_dma(dma, iter, vma->pages) {
942 void __iomem *s; 983 void __iomem *s;
943 int ret;
944 984
945 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); 985 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
946 986
947 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); 987 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
948 ret = compress_page(&compress, (void __force *)s, dst); 988 ret = compress_page(&compress, (void __force *)s, dst);
949 io_mapping_unmap_atomic(s); 989 io_mapping_unmap_atomic(s);
950
951 if (ret) 990 if (ret)
952 goto unwind; 991 break;
953 } 992 }
954 goto out;
955 993
956unwind: 994 if (ret || compress_flush(&compress, dst)) {
957 while (dst->page_count--) 995 while (dst->page_count--)
958 free_page((unsigned long)dst->pages[dst->page_count]); 996 free_page((unsigned long)dst->pages[dst->page_count]);
959 kfree(dst); 997 kfree(dst);
960 dst = NULL; 998 dst = NULL;
999 }
961 1000
962out:
963 compress_fini(&compress, dst); 1001 compress_fini(&compress, dst);
964 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 1002 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
965 return dst; 1003 return dst;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index f893a4e8b783..8710fb18ed74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -135,6 +135,7 @@ struct i915_gpu_state {
135 struct drm_i915_error_object { 135 struct drm_i915_error_object {
136 u64 gtt_offset; 136 u64 gtt_offset;
137 u64 gtt_size; 137 u64 gtt_size;
138 int num_pages;
138 int page_count; 139 int page_count;
139 int unused; 140 int unused;
140 u32 *pages[0]; 141 u32 *pages[0];
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 90628a47ae17..29877969310d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
3091 spin_unlock(&i915->irq_lock); 3091 spin_unlock(&i915->irq_lock);
3092} 3092}
3093 3093
3094static void 3094static u32
3095gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, 3095gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3096 u32 *iir)
3097{ 3096{
3098 void __iomem * const regs = dev_priv->regs; 3097 void __iomem * const regs = dev_priv->regs;
3098 u32 iir;
3099 3099
3100 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3100 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3101 return; 3101 return 0;
3102
3103 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3104 if (likely(iir))
3105 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3102 3106
3103 *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3107 return iir;
3104 if (likely(*iir))
3105 raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
3106} 3108}
3107 3109
3108static void 3110static void
3109gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, 3111gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3110 const u32 master_ctl, const u32 iir)
3111{ 3112{
3112 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3113 return;
3114
3115 if (unlikely(!iir)) {
3116 DRM_ERROR("GU_MISC iir blank!\n");
3117 return;
3118 }
3119
3120 if (iir & GEN11_GU_MISC_GSE) 3113 if (iir & GEN11_GU_MISC_GSE)
3121 intel_opregion_asle_intr(dev_priv); 3114 intel_opregion_asle_intr(dev_priv);
3122 else
3123 DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
3124} 3115}
3125 3116
3126static irqreturn_t gen11_irq_handler(int irq, void *arg) 3117static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
3157 enable_rpm_wakeref_asserts(i915); 3148 enable_rpm_wakeref_asserts(i915);
3158 } 3149 }
3159 3150
3160 gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); 3151 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3161 3152
3162 /* Acknowledge and enable interrupts. */ 3153 /* Acknowledge and enable interrupts. */
3163 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 3154 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3164 3155
3165 gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); 3156 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3166 3157
3167 return IRQ_HANDLED; 3158 return IRQ_HANDLED;
3168} 3159}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6a4d1388ad2d..1df3ce134cd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
592 GEN10_FEATURES, \ 592 GEN10_FEATURES, \
593 GEN(11), \ 593 GEN(11), \
594 .ddb_size = 2048, \ 594 .ddb_size = 2048, \
595 .has_csr = 0, \
596 .has_logical_ring_elsq = 1 595 .has_logical_ring_elsq = 1
597 596
598static const struct intel_device_info intel_icelake_11_info = { 597static const struct intel_device_info intel_icelake_11_info = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5146ee029db4..bc49909aba8e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -976,7 +976,6 @@
976#define USB_DEVICE_ID_SIS817_TOUCH 0x0817 976#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
977#define USB_DEVICE_ID_SIS_TS 0x1013 977#define USB_DEVICE_ID_SIS_TS 0x1013
978#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030 978#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
979#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
980 979
981#define USB_VENDOR_ID_SKYCABLE 0x1223 980#define USB_VENDOR_ID_SKYCABLE 0x1223
982#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 981#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index f3076659361a..4e3592e7a3f7 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,7 +47,7 @@
47/* quirks to control the device */ 47/* quirks to control the device */
48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
50#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) 50#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
51 51
52/* flags */ 52/* flags */
53#define I2C_HID_STARTED 0 53#define I2C_HID_STARTED 0
@@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
169 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755, 169 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
173 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, 173 I2C_HID_QUIRK_NO_RUNTIME_PM },
174 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
175 { 0, 0 } 174 { 0, 0 }
176}; 175};
177 176
@@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
1105 goto err_mem_free; 1104 goto err_mem_free;
1106 } 1105 }
1107 1106
1108 pm_runtime_put(&client->dev); 1107 if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
1108 pm_runtime_put(&client->dev);
1109
1109 return 0; 1110 return 0;
1110 1111
1111err_mem_free: 1112err_mem_free:
@@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
1130 struct i2c_hid *ihid = i2c_get_clientdata(client); 1131 struct i2c_hid *ihid = i2c_get_clientdata(client);
1131 struct hid_device *hid; 1132 struct hid_device *hid;
1132 1133
1133 pm_runtime_get_sync(&client->dev); 1134 if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
1135 pm_runtime_get_sync(&client->dev);
1134 pm_runtime_disable(&client->dev); 1136 pm_runtime_disable(&client->dev);
1135 pm_runtime_set_suspended(&client->dev); 1137 pm_runtime_set_suspended(&client->dev);
1136 pm_runtime_put_noidle(&client->dev); 1138 pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
1236 1238
1237 /* Instead of resetting device, simply powers the device on. This 1239 /* Instead of resetting device, simply powers the device on. This
1238 * solves "incomplete reports" on Raydium devices 2386:3118 and 1240 * solves "incomplete reports" on Raydium devices 2386:3118 and
1239 * 2386:4B33 1241 * 2386:4B33 and fixes various SIS touchscreens no longer sending
1242 * data after a suspend/resume.
1240 */ 1243 */
1241 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); 1244 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
1242 if (ret) 1245 if (ret)
1243 return ret; 1246 return ret;
1244 1247
1245 /* Some devices need to re-send report descr cmd
1246 * after resume, after this it will be back normal.
1247 * otherwise it issues too many incomplete reports.
1248 */
1249 if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
1250 ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 if (hid->driver && hid->driver->reset_resume) { 1248 if (hid->driver && hid->driver->reset_resume) {
1256 ret = hid->driver->reset_resume(hid); 1249 ret = hid->driver->reset_resume(hid);
1257 return ret; 1250 return ret;
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index da133716bed0..08a8327dfd22 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -29,6 +29,7 @@
29#define CNL_Ax_DEVICE_ID 0x9DFC 29#define CNL_Ax_DEVICE_ID 0x9DFC
30#define GLK_Ax_DEVICE_ID 0x31A2 30#define GLK_Ax_DEVICE_ID 0x31A2
31#define CNL_H_DEVICE_ID 0xA37C 31#define CNL_H_DEVICE_ID 0xA37C
32#define ICL_MOBILE_DEVICE_ID 0x34FC
32#define SPT_H_DEVICE_ID 0xA135 33#define SPT_H_DEVICE_ID 0xA135
33 34
34#define REVISION_ID_CHT_A0 0x6 35#define REVISION_ID_CHT_A0 0x6
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a1125a5c7965..256b3016116c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, 38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
41 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
41 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 42 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
42 {0, } 43 {0, }
43}; 44};
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ced041899456..f4d08c8ac7f8 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
76 __u32 version) 76 __u32 version)
77{ 77{
78 int ret = 0; 78 int ret = 0;
79 unsigned int cur_cpu;
79 struct vmbus_channel_initiate_contact *msg; 80 struct vmbus_channel_initiate_contact *msg;
80 unsigned long flags; 81 unsigned long flags;
81 82
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
118 * the CPU attempting to connect may not be CPU 0. 119 * the CPU attempting to connect may not be CPU 0.
119 */ 120 */
120 if (version >= VERSION_WIN8_1) { 121 if (version >= VERSION_WIN8_1) {
121 msg->target_vcpu = 122 cur_cpu = get_cpu();
122 hv_cpu_number_to_vp_number(smp_processor_id()); 123 msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
123 vmbus_connection.connect_cpu = smp_processor_id(); 124 vmbus_connection.connect_cpu = cur_cpu;
125 put_cpu();
124 } else { 126 } else {
125 msg->target_vcpu = 0; 127 msg->target_vcpu = 0;
126 vmbus_connection.connect_cpu = 0; 128 vmbus_connection.connect_cpu = 0;
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 94d94b4a9a0d..18cc324f3ca9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
34 34
35static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) 35static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
36{ 36{
37 u32 ic_clk = i2c_dw_clk_rate(dev);
38 const char *mode_str, *fp_str = ""; 37 const char *mode_str, *fp_str = "";
39 u32 comp_param1; 38 u32 comp_param1;
40 u32 sda_falling_time, scl_falling_time; 39 u32 sda_falling_time, scl_falling_time;
41 struct i2c_timings *t = &dev->timings; 40 struct i2c_timings *t = &dev->timings;
41 u32 ic_clk;
42 int ret; 42 int ret;
43 43
44 ret = i2c_dw_acquire_lock(dev); 44 ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
53 53
54 /* Calculate SCL timing parameters for standard mode if not set */ 54 /* Calculate SCL timing parameters for standard mode if not set */
55 if (!dev->ss_hcnt || !dev->ss_lcnt) { 55 if (!dev->ss_hcnt || !dev->ss_lcnt) {
56 ic_clk = i2c_dw_clk_rate(dev);
56 dev->ss_hcnt = 57 dev->ss_hcnt =
57 i2c_dw_scl_hcnt(ic_clk, 58 i2c_dw_scl_hcnt(ic_clk,
58 4000, /* tHD;STA = tHIGH = 4.0 us */ 59 4000, /* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
89 * needed also in high speed mode. 90 * needed also in high speed mode.
90 */ 91 */
91 if (!dev->fs_hcnt || !dev->fs_lcnt) { 92 if (!dev->fs_hcnt || !dev->fs_lcnt) {
93 ic_clk = i2c_dw_clk_rate(dev);
92 dev->fs_hcnt = 94 dev->fs_hcnt =
93 i2c_dw_scl_hcnt(ic_clk, 95 i2c_dw_scl_hcnt(ic_clk,
94 600, /* tHD;STA = tHIGH = 0.6 us */ 96 600, /* tHD;STA = tHIGH = 0.6 us */
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 0cf1379f4e80..5c754bf659e2 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
164 * run ~75 kHz instead which should do no harm. 164 * run ~75 kHz instead which should do no harm.
165 */ 165 */
166 dev_notice(&sch_adapter.dev, 166 dev_notice(&sch_adapter.dev,
167 "Clock divider unitialized. Setting defaults\n"); 167 "Clock divider uninitialized. Setting defaults\n");
168 outw(backbone_speed / (4 * 100), SMBHSTCLK); 168 outw(backbone_speed / (4 * 100), SMBHSTCLK);
169 } 169 }
170 170
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 36732eb688a4..9f2eb02481d3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
367 dma_addr_t rx_dma; 367 dma_addr_t rx_dma;
368 enum geni_se_xfer_mode mode; 368 enum geni_se_xfer_mode mode;
369 unsigned long time_left = XFER_TIMEOUT; 369 unsigned long time_left = XFER_TIMEOUT;
370 void *dma_buf;
370 371
371 gi2c->cur = msg; 372 gi2c->cur = msg;
372 mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; 373 mode = GENI_SE_FIFO;
374 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
375 if (dma_buf)
376 mode = GENI_SE_DMA;
377
373 geni_se_select_mode(&gi2c->se, mode); 378 geni_se_select_mode(&gi2c->se, mode);
374 writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN); 379 writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
375 geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param); 380 geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
376 if (mode == GENI_SE_DMA) { 381 if (mode == GENI_SE_DMA) {
377 int ret; 382 int ret;
378 383
379 ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len, 384 ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
380 &rx_dma); 385 &rx_dma);
381 if (ret) { 386 if (ret) {
382 mode = GENI_SE_FIFO; 387 mode = GENI_SE_FIFO;
383 geni_se_select_mode(&gi2c->se, mode); 388 geni_se_select_mode(&gi2c->se, mode);
389 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
384 } 390 }
385 } 391 }
386 392
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
393 if (gi2c->err) 399 if (gi2c->err)
394 geni_i2c_rx_fsm_rst(gi2c); 400 geni_i2c_rx_fsm_rst(gi2c);
395 geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len); 401 geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
402 i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
396 } 403 }
397 return gi2c->err; 404 return gi2c->err;
398} 405}
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
403 dma_addr_t tx_dma; 410 dma_addr_t tx_dma;
404 enum geni_se_xfer_mode mode; 411 enum geni_se_xfer_mode mode;
405 unsigned long time_left; 412 unsigned long time_left;
413 void *dma_buf;
406 414
407 gi2c->cur = msg; 415 gi2c->cur = msg;
408 mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO; 416 mode = GENI_SE_FIFO;
417 dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
418 if (dma_buf)
419 mode = GENI_SE_DMA;
420
409 geni_se_select_mode(&gi2c->se, mode); 421 geni_se_select_mode(&gi2c->se, mode);
410 writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN); 422 writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
411 geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param); 423 geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
412 if (mode == GENI_SE_DMA) { 424 if (mode == GENI_SE_DMA) {
413 int ret; 425 int ret;
414 426
415 ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len, 427 ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
416 &tx_dma); 428 &tx_dma);
417 if (ret) { 429 if (ret) {
418 mode = GENI_SE_FIFO; 430 mode = GENI_SE_FIFO;
419 geni_se_select_mode(&gi2c->se, mode); 431 geni_se_select_mode(&gi2c->se, mode);
432 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
420 } 433 }
421 } 434 }
422 435
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
432 if (gi2c->err) 445 if (gi2c->err)
433 geni_i2c_tx_fsm_rst(gi2c); 446 geni_i2c_tx_fsm_rst(gi2c);
434 geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len); 447 geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
448 i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
435 } 449 }
436 return gi2c->err; 450 return gi2c->err;
437} 451}
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index a01389b85f13..7e9a2bbf5ddc 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
152 mt_params[3].type = ACPI_TYPE_INTEGER; 152 mt_params[3].type = ACPI_TYPE_INTEGER;
153 mt_params[3].integer.value = len; 153 mt_params[3].integer.value = len;
154 mt_params[4].type = ACPI_TYPE_BUFFER; 154 mt_params[4].type = ACPI_TYPE_BUFFER;
155 mt_params[4].buffer.length = len;
155 mt_params[4].buffer.pointer = data->block + 1; 156 mt_params[4].buffer.pointer = data->block + 1;
156 } 157 }
157 break; 158 break;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 73e47d93e7a0..bee0dfb7b93b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3069,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3069 return 0; 3069 return 0;
3070 3070
3071 offset_mask = pte_pgsize - 1; 3071 offset_mask = pte_pgsize - 1;
3072 __pte = *pte & PM_ADDR_MASK; 3072 __pte = __sme_clr(*pte & PM_ADDR_MASK);
3073 3073
3074 return (__pte & ~offset_mask) | (iova & offset_mask); 3074 return (__pte & ~offset_mask) | (iova & offset_mask);
3075} 3075}
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 69dddeab124c..5936de71883f 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
1455 if (hints_valid) { 1455 if (hints_valid) {
1456 r = dm_array_cursor_next(&cmd->hint_cursor); 1456 r = dm_array_cursor_next(&cmd->hint_cursor);
1457 if (r) { 1457 if (r) {
1458 DMERR("dm_array_cursor_next for hint failed"); 1458 dm_array_cursor_end(&cmd->hint_cursor);
1459 goto out; 1459 hints_valid = false;
1460 } 1460 }
1461 } 1461 }
1462 1462
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index a53413371725..e13d991e9fb5 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
3009 3009
3010static bool can_resize(struct cache *cache, dm_cblock_t new_size) 3010static bool can_resize(struct cache *cache, dm_cblock_t new_size)
3011{ 3011{
3012 if (from_cblock(new_size) > from_cblock(cache->cache_size)) 3012 if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
3013 return true; 3013 if (cache->sized) {
3014 DMERR("%s: unable to extend cache due to missing cache table reload",
3015 cache_device_name(cache));
3016 return false;
3017 }
3018 }
3014 3019
3015 /* 3020 /*
3016 * We can't drop a dirty block when shrinking the cache. 3021 * We can't drop a dirty block when shrinking the cache.
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d94ba6f72ff5..419362c2d8ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
806} 806}
807 807
808static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, 808static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
809 const char *attached_handler_name, char **error) 809 const char **attached_handler_name, char **error)
810{ 810{
811 struct request_queue *q = bdev_get_queue(bdev); 811 struct request_queue *q = bdev_get_queue(bdev);
812 int r; 812 int r;
813 813
814 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { 814 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
815retain: 815retain:
816 if (attached_handler_name) { 816 if (*attached_handler_name) {
817 /* 817 /*
818 * Clear any hw_handler_params associated with a 818 * Clear any hw_handler_params associated with a
819 * handler that isn't already attached. 819 * handler that isn't already attached.
820 */ 820 */
821 if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) { 821 if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
822 kfree(m->hw_handler_params); 822 kfree(m->hw_handler_params);
823 m->hw_handler_params = NULL; 823 m->hw_handler_params = NULL;
824 } 824 }
@@ -830,7 +830,8 @@ retain:
830 * handler instead of the original table passed in. 830 * handler instead of the original table passed in.
831 */ 831 */
832 kfree(m->hw_handler_name); 832 kfree(m->hw_handler_name);
833 m->hw_handler_name = attached_handler_name; 833 m->hw_handler_name = *attached_handler_name;
834 *attached_handler_name = NULL;
834 } 835 }
835 } 836 }
836 837
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
867 struct pgpath *p; 868 struct pgpath *p;
868 struct multipath *m = ti->private; 869 struct multipath *m = ti->private;
869 struct request_queue *q; 870 struct request_queue *q;
870 const char *attached_handler_name; 871 const char *attached_handler_name = NULL;
871 872
872 /* we need at least a path arg */ 873 /* we need at least a path arg */
873 if (as->argc < 1) { 874 if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
890 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); 891 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
891 if (attached_handler_name || m->hw_handler_name) { 892 if (attached_handler_name || m->hw_handler_name) {
892 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 893 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
893 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error); 894 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
894 if (r) { 895 if (r) {
895 dm_put_device(ti, p->path.dev); 896 dm_put_device(ti, p->path.dev);
896 goto bad; 897 goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
905 906
906 return p; 907 return p;
907 bad: 908 bad:
909 kfree(attached_handler_name);
908 free_pgpath(p); 910 free_pgpath(p);
909 return ERR_PTR(r); 911 return ERR_PTR(r);
910} 912}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5ba067fa0c72..c44925e4e481 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
3353}; 3353};
3354 3354
3355/* Return enum sync_state for @mddev derived from @recovery flags */ 3355/* Return enum sync_state for @mddev derived from @recovery flags */
3356static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) 3356static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3357{ 3357{
3358 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 3358 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3359 return st_frozen; 3359 return st_frozen;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 74f6770c70b1..20b0776e39ef 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
832 if (r) { 832 if (r) {
833 DMERR("could not get size of metadata device"); 833 DMERR("could not get size of metadata device");
834 pmd->metadata_reserve = max_blocks; 834 pmd->metadata_reserve = max_blocks;
835 } else { 835 } else
836 sector_div(total, 10); 836 pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
837 pmd->metadata_reserve = min(max_blocks, total);
838 }
839} 837}
840 838
841struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, 839struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 127fe6eb91d9..a3ef1f50a4b3 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
115 if (sev == NULL) 115 if (sev == NULL)
116 return; 116 return;
117 117
118 /*
119 * If the event has been added to the fh->subscribed list, but its
120 * add op has not completed yet elems will be 0, treat this as
121 * not being subscribed.
122 */
123 if (!sev->elems)
124 return;
125
126 /* Increase event sequence number on fh. */ 118 /* Increase event sequence number on fh. */
127 fh->sequence++; 119 fh->sequence++;
128 120
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
208 struct v4l2_subscribed_event *sev, *found_ev; 200 struct v4l2_subscribed_event *sev, *found_ev;
209 unsigned long flags; 201 unsigned long flags;
210 unsigned i; 202 unsigned i;
203 int ret = 0;
211 204
212 if (sub->type == V4L2_EVENT_ALL) 205 if (sub->type == V4L2_EVENT_ALL)
213 return -EINVAL; 206 return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
225 sev->flags = sub->flags; 218 sev->flags = sub->flags;
226 sev->fh = fh; 219 sev->fh = fh;
227 sev->ops = ops; 220 sev->ops = ops;
221 sev->elems = elems;
222
223 mutex_lock(&fh->subscribe_lock);
228 224
229 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 225 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
230 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); 226 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
231 if (!found_ev)
232 list_add(&sev->list, &fh->subscribed);
233 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 227 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
234 228
235 if (found_ev) { 229 if (found_ev) {
230 /* Already listening */
236 kvfree(sev); 231 kvfree(sev);
237 return 0; /* Already listening */ 232 goto out_unlock;
238 } 233 }
239 234
240 if (sev->ops && sev->ops->add) { 235 if (sev->ops && sev->ops->add) {
241 int ret = sev->ops->add(sev, elems); 236 ret = sev->ops->add(sev, elems);
242 if (ret) { 237 if (ret) {
243 sev->ops = NULL; 238 kvfree(sev);
244 v4l2_event_unsubscribe(fh, sub); 239 goto out_unlock;
245 return ret;
246 } 240 }
247 } 241 }
248 242
249 /* Mark as ready for use */ 243 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
250 sev->elems = elems; 244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
251 246
252 return 0; 247out_unlock:
248 mutex_unlock(&fh->subscribe_lock);
249
250 return ret;
253} 251}
254EXPORT_SYMBOL_GPL(v4l2_event_subscribe); 252EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255 253
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
288 return 0; 286 return 0;
289 } 287 }
290 288
289 mutex_lock(&fh->subscribe_lock);
290
291 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 291 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292 292
293 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 293 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
305 if (sev && sev->ops && sev->ops->del) 305 if (sev && sev->ops && sev->ops->del)
306 sev->ops->del(sev); 306 sev->ops->del(sev);
307 307
308 mutex_unlock(&fh->subscribe_lock);
309
308 kvfree(sev); 310 kvfree(sev);
309 311
310 return 0; 312 return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 3895999bf880..c91a7bd3ecfc 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
45 INIT_LIST_HEAD(&fh->available); 45 INIT_LIST_HEAD(&fh->available);
46 INIT_LIST_HEAD(&fh->subscribed); 46 INIT_LIST_HEAD(&fh->subscribed);
47 fh->sequence = -1; 47 fh->sequence = -1;
48 mutex_init(&fh->subscribe_lock);
48} 49}
49EXPORT_SYMBOL_GPL(v4l2_fh_init); 50EXPORT_SYMBOL_GPL(v4l2_fh_init);
50 51
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
90 return; 91 return;
91 v4l_disable_media_source(fh->vdev); 92 v4l_disable_media_source(fh->vdev);
92 v4l2_event_unsubscribe_all(fh); 93 v4l2_event_unsubscribe_all(fh);
94 mutex_destroy(&fh->subscribe_lock);
93 fh->vdev = NULL; 95 fh->vdev = NULL;
94} 96}
95EXPORT_SYMBOL_GPL(v4l2_fh_exit); 97EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abf9e884386c..f57f5de54206 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
235 host->caps |= MMC_CAP_NEEDS_POLL; 235 host->caps |= MMC_CAP_NEEDS_POLL;
236 236
237 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 ret = mmc_gpiod_request_cd(host, "cd", 0, true,
238 cd_debounce_delay_ms, 238 cd_debounce_delay_ms * 1000,
239 &cd_gpio_invert); 239 &cd_gpio_invert);
240 if (!ret) 240 if (!ret)
241 dev_info(host->parent, "Got CD GPIO\n"); 241 dev_info(host->parent, "Got CD GPIO\n");
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a833686784b..86803a3a04dc 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
271 if (debounce) { 271 if (debounce) {
272 ret = gpiod_set_debounce(desc, debounce); 272 ret = gpiod_set_debounce(desc, debounce);
273 if (ret < 0) 273 if (ret < 0)
274 ctx->cd_debounce_delay_ms = debounce; 274 ctx->cd_debounce_delay_ms = debounce / 1000;
275 } 275 }
276 276
277 if (gpio_invert) 277 if (gpio_invert)
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 890f192dedbd..5389c4821882 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
498 498
499static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 499static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
500{ 500{
501 if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && 501 if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
502 of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
502 !soc_device_match(gen3_soc_whitelist)) 503 !soc_device_match(gen3_soc_whitelist))
503 return -ENODEV; 504 return -ENODEV;
504 505
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0d87e11e7f1d..ee28ec9e0aba 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
210static void bond_slave_arr_handler(struct work_struct *work); 210static void bond_slave_arr_handler(struct work_struct *work);
211static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 211static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
212 int mod); 212 int mod);
213static void bond_netdev_notify_work(struct work_struct *work);
213 214
214/*---------------------------- General routines -----------------------------*/ 215/*---------------------------- General routines -----------------------------*/
215 216
@@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1170 } 1171 }
1171 } 1172 }
1172 1173
1173 /* don't change skb->dev for link-local packets */ 1174 /* Link-local multicast packets should be passed to the
1174 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) 1175 * stack on the link they arrive as well as pass them to the
1176 * bond-master device. These packets are mostly usable when
1177 * stack receives it with the link on which they arrive
1178 * (e.g. LLDP) they also must be available on master. Some of
1179 * the use cases include (but are not limited to): LLDP agents
1180 * that must be able to operate both on enslaved interfaces as
1181 * well as on bonds themselves; linux bridges that must be able
1182 * to process/pass BPDUs from attached bonds when any kind of
1183 * STP version is enabled on the network.
1184 */
1185 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
1186 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1187
1188 if (nskb) {
1189 nskb->dev = bond->dev;
1190 nskb->queue_mapping = 0;
1191 netif_rx(nskb);
1192 }
1175 return RX_HANDLER_PASS; 1193 return RX_HANDLER_PASS;
1194 }
1176 if (bond_should_deliver_exact_match(skb, slave, bond)) 1195 if (bond_should_deliver_exact_match(skb, slave, bond))
1177 return RX_HANDLER_EXACT; 1196 return RX_HANDLER_EXACT;
1178 1197
@@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
1269 return NULL; 1288 return NULL;
1270 } 1289 }
1271 } 1290 }
1291 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1292
1272 return slave; 1293 return slave;
1273} 1294}
1274 1295
@@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
1276{ 1297{
1277 struct bonding *bond = bond_get_bond_by_slave(slave); 1298 struct bonding *bond = bond_get_bond_by_slave(slave);
1278 1299
1300 cancel_delayed_work_sync(&slave->notify_work);
1279 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1301 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1280 kfree(SLAVE_AD_INFO(slave)); 1302 kfree(SLAVE_AD_INFO(slave));
1281 1303
@@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1297 info->link_failure_count = slave->link_failure_count; 1319 info->link_failure_count = slave->link_failure_count;
1298} 1320}
1299 1321
1300static void bond_netdev_notify(struct net_device *dev,
1301 struct netdev_bonding_info *info)
1302{
1303 rtnl_lock();
1304 netdev_bonding_info_change(dev, info);
1305 rtnl_unlock();
1306}
1307
1308static void bond_netdev_notify_work(struct work_struct *_work) 1322static void bond_netdev_notify_work(struct work_struct *_work)
1309{ 1323{
1310 struct netdev_notify_work *w = 1324 struct slave *slave = container_of(_work, struct slave,
1311 container_of(_work, struct netdev_notify_work, work.work); 1325 notify_work.work);
1326
1327 if (rtnl_trylock()) {
1328 struct netdev_bonding_info binfo;
1312 1329
1313 bond_netdev_notify(w->dev, &w->bonding_info); 1330 bond_fill_ifslave(slave, &binfo.slave);
1314 dev_put(w->dev); 1331 bond_fill_ifbond(slave->bond, &binfo.master);
1315 kfree(w); 1332 netdev_bonding_info_change(slave->dev, &binfo);
1333 rtnl_unlock();
1334 } else {
1335 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1336 }
1316} 1337}
1317 1338
1318void bond_queue_slave_event(struct slave *slave) 1339void bond_queue_slave_event(struct slave *slave)
1319{ 1340{
1320 struct bonding *bond = slave->bond; 1341 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1321 struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
1322
1323 if (!nnw)
1324 return;
1325
1326 dev_hold(slave->dev);
1327 nnw->dev = slave->dev;
1328 bond_fill_ifslave(slave, &nnw->bonding_info.slave);
1329 bond_fill_ifbond(bond, &nnw->bonding_info.master);
1330 INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1331
1332 queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1333} 1342}
1334 1343
1335void bond_lower_state_changed(struct slave *slave) 1344void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index d93c790bfbe8..ad534b90ef21 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1107,7 +1107,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
1107 b53_get_vlan_entry(dev, vid, vl); 1107 b53_get_vlan_entry(dev, vid, vl);
1108 1108
1109 vl->members |= BIT(port); 1109 vl->members |= BIT(port);
1110 if (untagged) 1110 if (untagged && !dsa_is_cpu_port(ds, port))
1111 vl->untag |= BIT(port); 1111 vl->untag |= BIT(port);
1112 else 1112 else
1113 vl->untag &= ~BIT(port); 1113 vl->untag &= ~BIT(port);
@@ -1149,7 +1149,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
1149 pvid = 0; 1149 pvid = 0;
1150 } 1150 }
1151 1151
1152 if (untagged) 1152 if (untagged && !dsa_is_cpu_port(ds, port))
1153 vl->untag &= ~(BIT(port)); 1153 vl->untag &= ~(BIT(port));
1154 1154
1155 b53_set_vlan_entry(dev, vid, vl); 1155 b53_set_vlan_entry(dev, vid, vl);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 29b5774dd32d..25621a218f20 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2185,25 +2185,6 @@ error_drop_packet:
2185 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2186} 2186}
2187 2187
2188#ifdef CONFIG_NET_POLL_CONTROLLER
2189static void ena_netpoll(struct net_device *netdev)
2190{
2191 struct ena_adapter *adapter = netdev_priv(netdev);
2192 int i;
2193
2194 /* Dont schedule NAPI if the driver is in the middle of reset
2195 * or netdev is down.
2196 */
2197
2198 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2199 test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2200 return;
2201
2202 for (i = 0; i < adapter->num_queues; i++)
2203 napi_schedule(&adapter->ena_napi[i].napi);
2204}
2205#endif /* CONFIG_NET_POLL_CONTROLLER */
2206
2207static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 2188static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2208 struct net_device *sb_dev, 2189 struct net_device *sb_dev,
2209 select_queue_fallback_t fallback) 2190 select_queue_fallback_t fallback)
@@ -2369,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
2369 .ndo_change_mtu = ena_change_mtu, 2350 .ndo_change_mtu = ena_change_mtu,
2370 .ndo_set_mac_address = NULL, 2351 .ndo_set_mac_address = NULL,
2371 .ndo_validate_addr = eth_validate_addr, 2352 .ndo_validate_addr = eth_validate_addr,
2372#ifdef CONFIG_NET_POLL_CONTROLLER
2373 .ndo_poll_controller = ena_netpoll,
2374#endif /* CONFIG_NET_POLL_CONTROLLER */
2375}; 2353};
2376 2354
2377static int ena_device_validate_params(struct ena_adapter *adapter, 2355static int ena_device_validate_params(struct ena_adapter *adapter,
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 116997a8b593..00332a1ea84b 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
1031 int i, ret; 1031 int i, ret;
1032 unsigned long esar_base; 1032 unsigned long esar_base;
1033 unsigned char *esar; 1033 unsigned char *esar;
1034 const char *desc;
1034 1035
1035 if (dec_lance_debug && version_printed++ == 0) 1036 if (dec_lance_debug && version_printed++ == 0)
1036 printk(version); 1037 printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
1216 */ 1217 */
1217 switch (type) { 1218 switch (type) {
1218 case ASIC_LANCE: 1219 case ASIC_LANCE:
1219 printk("%s: IOASIC onboard LANCE", name); 1220 desc = "IOASIC onboard LANCE";
1220 break; 1221 break;
1221 case PMAD_LANCE: 1222 case PMAD_LANCE:
1222 printk("%s: PMAD-AA", name); 1223 desc = "PMAD-AA";
1223 break; 1224 break;
1224 case PMAX_LANCE: 1225 case PMAX_LANCE:
1225 printk("%s: PMAX onboard LANCE", name); 1226 desc = "PMAX onboard LANCE";
1226 break; 1227 break;
1227 } 1228 }
1228 for (i = 0; i < 6; i++) 1229 for (i = 0; i < 6; i++)
1229 dev->dev_addr[i] = esar[i * 4]; 1230 dev->dev_addr[i] = esar[i * 4];
1230 1231
1231 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); 1232 printk("%s: %s, addr = %pM, irq = %d\n",
1233 name, desc, dev->dev_addr, dev->irq);
1232 1234
1233 dev->netdev_ops = &lance_netdev_ops; 1235 dev->netdev_ops = &lance_netdev_ops;
1234 dev->watchdog_timeo = 5*HZ; 1236 dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 147045757b10..c57238fce863 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1069{ 1069{
1070 u32 reg; 1070 u32 reg;
1071 1071
1072 /* Stop monitoring MPD interrupt */
1073 intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
1074
1075 /* Disable RXCHK, active filters and Broadcom tag matching */ 1072 /* Disable RXCHK, active filters and Broadcom tag matching */
1076 reg = rxchk_readl(priv, RXCHK_CONTROL); 1073 reg = rxchk_readl(priv, RXCHK_CONTROL);
1077 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1074 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1081 /* Clear the MagicPacket detection logic */ 1078 /* Clear the MagicPacket detection logic */
1082 mpd_enable_set(priv, false); 1079 mpd_enable_set(priv, false);
1083 1080
1081 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1082 if (reg & INTRL2_0_MPD)
1083 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1084
1085 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1086 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1087 RXCHK_BRCM_TAG_MATCH_MASK;
1088 netdev_info(priv->netdev,
1089 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1090 }
1091
1084 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1092 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1085} 1093}
1086 1094
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1105 struct bcm_sysport_priv *priv = netdev_priv(dev); 1113 struct bcm_sysport_priv *priv = netdev_priv(dev);
1106 struct bcm_sysport_tx_ring *txr; 1114 struct bcm_sysport_tx_ring *txr;
1107 unsigned int ring, ring_bit; 1115 unsigned int ring, ring_bit;
1108 u32 reg;
1109 1116
1110 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1117 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1111 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1118 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1131 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1138 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1132 bcm_sysport_tx_reclaim_all(priv); 1139 bcm_sysport_tx_reclaim_all(priv);
1133 1140
1134 if (priv->irq0_stat & INTRL2_0_MPD)
1135 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1136
1137 if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
1138 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1139 RXCHK_BRCM_TAG_MATCH_MASK;
1140 netdev_info(priv->netdev,
1141 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1142 }
1143
1144 if (!priv->is_lite) 1141 if (!priv->is_lite)
1145 goto out; 1142 goto out;
1146 1143
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2641 /* UniMAC receive needs to be turned on */ 2638 /* UniMAC receive needs to be turned on */
2642 umac_enable_set(priv, CMD_RX_EN, 1); 2639 umac_enable_set(priv, CMD_RX_EN, 1);
2643 2640
2644 /* Enable the interrupt wake-up source */
2645 intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
2646
2647 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2641 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2648 2642
2649 return 0; 2643 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61957b0bbd8c..e2d92548226a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1884 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1884 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1885 tx_pkts++; 1885 tx_pkts++;
1886 /* return full budget so NAPI will complete. */ 1886 /* return full budget so NAPI will complete. */
1887 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1887 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1888 rx_pkts = budget; 1888 rx_pkts = budget;
1889 raw_cons = NEXT_RAW_CMP(raw_cons);
1890 break;
1891 }
1889 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1892 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1890 if (likely(budget)) 1893 if (likely(budget))
1891 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1894 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1913 } 1916 }
1914 raw_cons = NEXT_RAW_CMP(raw_cons); 1917 raw_cons = NEXT_RAW_CMP(raw_cons);
1915 1918
1916 if (rx_pkts == budget) 1919 if (rx_pkts && rx_pkts == budget)
1917 break; 1920 break;
1918 } 1921 }
1919 1922
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
2027 while (1) { 2030 while (1) {
2028 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 2031 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
2029 2032
2030 if (work_done >= budget) 2033 if (work_done >= budget) {
2034 if (!budget)
2035 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2036 cpr->cp_raw_cons);
2031 break; 2037 break;
2038 }
2032 2039
2033 if (!bnxt_has_work(bp, cpr)) { 2040 if (!bnxt_has_work(bp, cpr)) {
2034 if (napi_complete_done(napi, work_done)) 2041 if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
3010{ 3017{
3011 struct pci_dev *pdev = bp->pdev; 3018 struct pci_dev *pdev = bp->pdev;
3012 3019
3013 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, 3020 if (bp->hwrm_cmd_resp_addr) {
3014 bp->hwrm_cmd_resp_dma_addr); 3021 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3015 3022 bp->hwrm_cmd_resp_dma_addr);
3016 bp->hwrm_cmd_resp_addr = NULL; 3023 bp->hwrm_cmd_resp_addr = NULL;
3024 }
3017} 3025}
3018 3026
3019static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 3027static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
4643 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 4651 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4644 enables |= ring_grps ? 4652 enables |= ring_grps ?
4645 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 4653 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4646 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 4654 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
4647 4655
4648 req->num_rx_rings = cpu_to_le16(rx_rings); 4656 req->num_rx_rings = cpu_to_le16(rx_rings);
4649 req->num_hw_ring_grps = cpu_to_le16(ring_grps); 4657 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8614 *max_tx = hw_resc->max_tx_rings; 8622 *max_tx = hw_resc->max_tx_rings;
8615 *max_rx = hw_resc->max_rx_rings; 8623 *max_rx = hw_resc->max_rx_rings;
8616 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp), 8624 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
8617 hw_resc->max_irqs); 8625 hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
8618 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); 8626 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
8619 max_ring_grps = hw_resc->max_hw_ring_grps; 8627 max_ring_grps = hw_resc->max_hw_ring_grps;
8620 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 8628 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -9050,6 +9058,7 @@ init_err_cleanup_tc:
9050 bnxt_clear_int_mode(bp); 9058 bnxt_clear_int_mode(bp);
9051 9059
9052init_err_pci_clean: 9060init_err_pci_clean:
9061 bnxt_free_hwrm_resources(bp);
9053 bnxt_cleanup_pci(bp); 9062 bnxt_cleanup_pci(bp);
9054 9063
9055init_err_free: 9064init_err_free:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index ddc98c359488..a85d2be986af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
98 98
99 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); 99 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
100 for (i = 0; i < max_tc; i++) { 100 for (i = 0; i < max_tc; i++) {
101 u8 qidx; 101 u8 qidx = bp->tc_to_qidx[i];
102 102
103 req.enables |= cpu_to_le32( 103 req.enables |= cpu_to_le32(
104 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); 104 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
105 qidx);
105 106
106 memset(&cos2bw, 0, sizeof(cos2bw)); 107 memset(&cos2bw, 0, sizeof(cos2bw));
107 qidx = bp->tc_to_qidx[i];
108 cos2bw.queue_id = bp->q_info[qidx].queue_id; 108 cos2bw.queue_id = bp->q_info[qidx].queue_id;
109 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { 109 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
110 cos2bw.tsa = 110 cos2bw.tsa =
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index f1a86b422617..58b9744c4058 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
2160 else 2160 else
2161 dmacfg &= ~GEM_BIT(TXCOEN); 2161 dmacfg &= ~GEM_BIT(TXCOEN);
2162 2162
2163 dmacfg &= ~GEM_BIT(ADDR64);
2163#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2164#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2164 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2165 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2165 dmacfg |= GEM_BIT(ADDR64); 2166 dmacfg |= GEM_BIT(ADDR64);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index a19172dbe6be..c34ea385fe4a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2159 return -EPERM; 2159 return -EPERM;
2160 if (copy_from_user(&t, useraddr, sizeof(t))) 2160 if (copy_from_user(&t, useraddr, sizeof(t)))
2161 return -EFAULT; 2161 return -EFAULT;
2162 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2163 return -EINVAL;
2162 if (t.qset_idx >= SGE_QSETS) 2164 if (t.qset_idx >= SGE_QSETS)
2163 return -EINVAL; 2165 return -EINVAL;
2164 if (!in_range(t.intr_lat, 0, M_NEWTIMER) || 2166 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2258 if (copy_from_user(&t, useraddr, sizeof(t))) 2260 if (copy_from_user(&t, useraddr, sizeof(t)))
2259 return -EFAULT; 2261 return -EFAULT;
2260 2262
2263 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2264 return -EINVAL;
2265
2261 /* Display qsets for all ports when offload enabled */ 2266 /* Display qsets for all ports when offload enabled */
2262 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2267 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2263 q1 = 0; 2268 q1 = 0;
@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2303 return -EBUSY; 2308 return -EBUSY;
2304 if (copy_from_user(&edata, useraddr, sizeof(edata))) 2309 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2305 return -EFAULT; 2310 return -EFAULT;
2311 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2312 return -EINVAL;
2306 if (edata.val < 1 || 2313 if (edata.val < 1 ||
2307 (edata.val > 1 && !(adapter->flags & USING_MSIX))) 2314 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2308 return -EINVAL; 2315 return -EINVAL;
@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2343 return -EPERM; 2350 return -EPERM;
2344 if (copy_from_user(&t, useraddr, sizeof(t))) 2351 if (copy_from_user(&t, useraddr, sizeof(t)))
2345 return -EFAULT; 2352 return -EFAULT;
2353 if (t.cmd != CHELSIO_LOAD_FW)
2354 return -EINVAL;
2346 /* Check t.len sanity ? */ 2355 /* Check t.len sanity ? */
2347 fw_data = memdup_user(useraddr + sizeof(t), t.len); 2356 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2348 if (IS_ERR(fw_data)) 2357 if (IS_ERR(fw_data))
@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2366 return -EBUSY; 2375 return -EBUSY;
2367 if (copy_from_user(&m, useraddr, sizeof(m))) 2376 if (copy_from_user(&m, useraddr, sizeof(m)))
2368 return -EFAULT; 2377 return -EFAULT;
2378 if (m.cmd != CHELSIO_SETMTUTAB)
2379 return -EINVAL;
2369 if (m.nmtus != NMTUS) 2380 if (m.nmtus != NMTUS)
2370 return -EINVAL; 2381 return -EINVAL;
2371 if (m.mtus[0] < 81) /* accommodate SACK */ 2382 if (m.mtus[0] < 81) /* accommodate SACK */
@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2407 return -EBUSY; 2418 return -EBUSY;
2408 if (copy_from_user(&m, useraddr, sizeof(m))) 2419 if (copy_from_user(&m, useraddr, sizeof(m)))
2409 return -EFAULT; 2420 return -EFAULT;
2421 if (m.cmd != CHELSIO_SET_PM)
2422 return -EINVAL;
2410 if (!is_power_of_2(m.rx_pg_sz) || 2423 if (!is_power_of_2(m.rx_pg_sz) ||
2411 !is_power_of_2(m.tx_pg_sz)) 2424 !is_power_of_2(m.tx_pg_sz))
2412 return -EINVAL; /* not power of 2 */ 2425 return -EINVAL; /* not power of 2 */
@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2440 return -EIO; /* need the memory controllers */ 2453 return -EIO; /* need the memory controllers */
2441 if (copy_from_user(&t, useraddr, sizeof(t))) 2454 if (copy_from_user(&t, useraddr, sizeof(t)))
2442 return -EFAULT; 2455 return -EFAULT;
2456 if (t.cmd != CHELSIO_GET_MEM)
2457 return -EINVAL;
2443 if ((t.addr & 7) || (t.len & 7)) 2458 if ((t.addr & 7) || (t.len & 7))
2444 return -EINVAL; 2459 return -EINVAL;
2445 if (t.mem_id == MEM_CM) 2460 if (t.mem_id == MEM_CM)
@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2492 return -EAGAIN; 2507 return -EAGAIN;
2493 if (copy_from_user(&t, useraddr, sizeof(t))) 2508 if (copy_from_user(&t, useraddr, sizeof(t)))
2494 return -EFAULT; 2509 return -EFAULT;
2510 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2511 return -EINVAL;
2495 2512
2496 tp = (const struct trace_params *)&t.sip; 2513 tp = (const struct trace_params *)&t.sip;
2497 if (t.config_tx) 2514 if (t.config_tx)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 74d122616e76..534787291b44 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
4002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4003 NETIF_F_TSO | NETIF_F_TSO6 | 4003 NETIF_F_TSO | NETIF_F_TSO6 |
4004 NETIF_F_GSO_UDP_TUNNEL; 4004 NETIF_F_GSO_UDP_TUNNEL;
4005 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4006 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
4007 4005
4008 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", 4006 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4009 be16_to_cpu(port)); 4007 be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
4025 adapter->vxlan_port = 0; 4023 adapter->vxlan_port = 0;
4026 4024
4027 netdev->hw_enc_features = 0; 4025 netdev->hw_enc_features = 0;
4028 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
4029 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
4030} 4026}
4031 4027
4032static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, 4028static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
5320 struct be_adapter *adapter = netdev_priv(netdev); 5316 struct be_adapter *adapter = netdev_priv(netdev);
5321 5317
5322 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 5318 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5319 NETIF_F_GSO_UDP_TUNNEL |
5323 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | 5320 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5324 NETIF_F_HW_VLAN_CTAG_TX; 5321 NETIF_F_HW_VLAN_CTAG_TX;
5325 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 5322 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2708297e7795..bf9b9fd6d2a0 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
1158 napi_disable(&fep->napi); 1158 napi_disable(&fep->napi);
1159 netif_tx_lock_bh(ndev); 1159 netif_tx_lock_bh(ndev);
1160 fec_restart(ndev); 1160 fec_restart(ndev);
1161 netif_wake_queue(ndev); 1161 netif_tx_wake_all_queues(ndev);
1162 netif_tx_unlock_bh(ndev); 1162 netif_tx_unlock_bh(ndev);
1163 napi_enable(&fep->napi); 1163 napi_enable(&fep->napi);
1164 } 1164 }
@@ -1273,7 +1273,7 @@ skb_done:
1273 1273
1274 /* Since we have freed up a buffer, the ring is no longer full 1274 /* Since we have freed up a buffer, the ring is no longer full
1275 */ 1275 */
1276 if (netif_queue_stopped(ndev)) { 1276 if (netif_tx_queue_stopped(nq)) {
1277 entries_free = fec_enet_get_free_txdesc_num(txq); 1277 entries_free = fec_enet_get_free_txdesc_num(txq);
1278 if (entries_free >= txq->tx_wake_threshold) 1278 if (entries_free >= txq->tx_wake_threshold)
1279 netif_tx_wake_queue(nq); 1279 netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1746 napi_disable(&fep->napi); 1746 napi_disable(&fep->napi);
1747 netif_tx_lock_bh(ndev); 1747 netif_tx_lock_bh(ndev);
1748 fec_restart(ndev); 1748 fec_restart(ndev);
1749 netif_wake_queue(ndev); 1749 netif_tx_wake_all_queues(ndev);
1750 netif_tx_unlock_bh(ndev); 1750 netif_tx_unlock_bh(ndev);
1751 napi_enable(&fep->napi); 1751 napi_enable(&fep->napi);
1752 } 1752 }
@@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
2247 napi_disable(&fep->napi); 2247 napi_disable(&fep->napi);
2248 netif_tx_lock_bh(ndev); 2248 netif_tx_lock_bh(ndev);
2249 fec_restart(ndev); 2249 fec_restart(ndev);
2250 netif_wake_queue(ndev); 2250 netif_tx_wake_all_queues(ndev);
2251 netif_tx_unlock_bh(ndev); 2251 netif_tx_unlock_bh(ndev);
2252 napi_enable(&fep->napi); 2252 napi_enable(&fep->napi);
2253 } 2253 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index a051e582d541..79d03f8ee7b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
84 if (cb->type == DESC_TYPE_SKB) 84 if (cb->type == DESC_TYPE_SKB)
85 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 85 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
86 ring_to_dma_dir(ring)); 86 ring_to_dma_dir(ring));
87 else 87 else if (cb->length)
88 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 88 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
89 ring_to_dma_dir(ring)); 89 ring_to_dma_dir(ring));
90} 90}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index f56855e63c96..28e907831b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -40,9 +40,9 @@
40#define SKB_TMP_LEN(SKB) \ 40#define SKB_TMP_LEN(SKB) \
41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) 41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
42 42
43static void fill_v2_desc(struct hnae_ring *ring, void *priv, 43static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
44 int size, dma_addr_t dma, int frag_end, 44 int send_sz, dma_addr_t dma, int frag_end,
45 int buf_num, enum hns_desc_type type, int mtu) 45 int buf_num, enum hns_desc_type type, int mtu)
46{ 46{
47 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 47 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
64 desc_cb->type = type; 64 desc_cb->type = type;
65 65
66 desc->addr = cpu_to_le64(dma); 66 desc->addr = cpu_to_le64(dma);
67 desc->tx.send_size = cpu_to_le16((u16)size); 67 desc->tx.send_size = cpu_to_le16((u16)send_sz);
68 68
69 /* config bd buffer end */ 69 /* config bd buffer end */
70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
133 ring_ptr_move_fw(ring, next_to_use); 133 ring_ptr_move_fw(ring, next_to_use);
134} 134}
135 135
136static void fill_v2_desc(struct hnae_ring *ring, void *priv,
137 int size, dma_addr_t dma, int frag_end,
138 int buf_num, enum hns_desc_type type, int mtu)
139{
140 fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
141 buf_num, type, mtu);
142}
143
136static const struct acpi_device_id hns_enet_acpi_match[] = { 144static const struct acpi_device_id hns_enet_acpi_match[] = {
137 { "HISI00C1", 0 }, 145 { "HISI00C1", 0 },
138 { "HISI00C2", 0 }, 146 { "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
289 297
290 /* when the frag size is bigger than hardware, split this frag */ 298 /* when the frag size is bigger than hardware, split this frag */
291 for (k = 0; k < frag_buf_num; k++) 299 for (k = 0; k < frag_buf_num; k++)
292 fill_v2_desc(ring, priv, 300 fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
293 (k == frag_buf_num - 1) ? 301 (k == frag_buf_num - 1) ?
294 sizeoflast : BD_MAX_SEND_SIZE, 302 sizeoflast : BD_MAX_SEND_SIZE,
295 dma + BD_MAX_SEND_SIZE * k, 303 dma + BD_MAX_SEND_SIZE * k,
296 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 304 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
297 buf_num, 305 buf_num,
298 (type == DESC_TYPE_SKB && !k) ? 306 (type == DESC_TYPE_SKB && !k) ?
299 DESC_TYPE_SKB : DESC_TYPE_PAGE, 307 DESC_TYPE_SKB : DESC_TYPE_PAGE,
300 mtu); 308 mtu);
301} 309}
302 310
303netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, 311netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1495 return phy_mii_ioctl(phy_dev, ifr, cmd); 1503 return phy_mii_ioctl(phy_dev, ifr, cmd);
1496} 1504}
1497 1505
1498/* use only for netconsole to poll with the device without interrupt */
1499#ifdef CONFIG_NET_POLL_CONTROLLER
1500static void hns_nic_poll_controller(struct net_device *ndev)
1501{
1502 struct hns_nic_priv *priv = netdev_priv(ndev);
1503 unsigned long flags;
1504 int i;
1505
1506 local_irq_save(flags);
1507 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1508 napi_schedule(&priv->ring_data[i].napi);
1509 local_irq_restore(flags);
1510}
1511#endif
1512
1513static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, 1506static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1514 struct net_device *ndev) 1507 struct net_device *ndev)
1515{ 1508{
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
1962 .ndo_set_features = hns_nic_set_features, 1955 .ndo_set_features = hns_nic_set_features,
1963 .ndo_fix_features = hns_nic_fix_features, 1956 .ndo_fix_features = hns_nic_fix_features,
1964 .ndo_get_stats64 = hns_nic_get_stats64, 1957 .ndo_get_stats64 = hns_nic_get_stats64,
1965#ifdef CONFIG_NET_POLL_CONTROLLER
1966 .ndo_poll_controller = hns_nic_poll_controller,
1967#endif
1968 .ndo_set_rx_mode = hns_nic_set_rx_mode, 1958 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1969 .ndo_select_queue = hns_nic_select_queue, 1959 .ndo_select_queue = hns_nic_select_queue,
1970}; 1960};
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 09e9da10b786..4a8f82938ed5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
789 stats->tx_errors = nic_tx_stats->tx_dropped; 789 stats->tx_errors = nic_tx_stats->tx_dropped;
790} 790}
791 791
792#ifdef CONFIG_NET_POLL_CONTROLLER
793static void hinic_netpoll(struct net_device *netdev)
794{
795 struct hinic_dev *nic_dev = netdev_priv(netdev);
796 int i, num_qps;
797
798 num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
799 for (i = 0; i < num_qps; i++) {
800 struct hinic_txq *txq = &nic_dev->txqs[i];
801 struct hinic_rxq *rxq = &nic_dev->rxqs[i];
802
803 napi_schedule(&txq->napi);
804 napi_schedule(&rxq->napi);
805 }
806}
807#endif
808
809static const struct net_device_ops hinic_netdev_ops = { 792static const struct net_device_ops hinic_netdev_ops = {
810 .ndo_open = hinic_open, 793 .ndo_open = hinic_open,
811 .ndo_stop = hinic_close, 794 .ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
818 .ndo_start_xmit = hinic_xmit_frame, 801 .ndo_start_xmit = hinic_xmit_frame,
819 .ndo_tx_timeout = hinic_tx_timeout, 802 .ndo_tx_timeout = hinic_tx_timeout,
820 .ndo_get_stats64 = hinic_get_stats64, 803 .ndo_get_stats64 = hinic_get_stats64,
821#ifdef CONFIG_NET_POLL_CONTROLLER
822 .ndo_poll_controller = hinic_netpoll,
823#endif
824}; 804};
825 805
826static void netdev_features_init(struct net_device *netdev) 806static void netdev_features_init(struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ba580bfae512..03f64f40b2a3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
921 return rx; 921 return rx;
922} 922}
923 923
924#ifdef CONFIG_NET_POLL_CONTROLLER
925static void ehea_netpoll(struct net_device *dev)
926{
927 struct ehea_port *port = netdev_priv(dev);
928 int i;
929
930 for (i = 0; i < port->num_def_qps; i++)
931 napi_schedule(&port->port_res[i].napi);
932}
933#endif
934
935static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 924static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
936{ 925{
937 struct ehea_port_res *pr = param; 926 struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
2953 .ndo_open = ehea_open, 2942 .ndo_open = ehea_open,
2954 .ndo_stop = ehea_stop, 2943 .ndo_stop = ehea_stop,
2955 .ndo_start_xmit = ehea_start_xmit, 2944 .ndo_start_xmit = ehea_start_xmit,
2956#ifdef CONFIG_NET_POLL_CONTROLLER
2957 .ndo_poll_controller = ehea_netpoll,
2958#endif
2959 .ndo_get_stats64 = ehea_get_stats64, 2945 .ndo_get_stats64 = ehea_get_stats64,
2960 .ndo_set_mac_address = ehea_set_mac_addr, 2946 .ndo_set_mac_address = ehea_set_mac_addr,
2961 .ndo_validate_addr = eth_validate_addr, 2947 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f0daf67b18d..699ef942b615 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2207,19 +2207,6 @@ restart_poll:
2207 return frames_processed; 2207 return frames_processed;
2208} 2208}
2209 2209
2210#ifdef CONFIG_NET_POLL_CONTROLLER
2211static void ibmvnic_netpoll_controller(struct net_device *dev)
2212{
2213 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2214 int i;
2215
2216 replenish_pools(netdev_priv(dev));
2217 for (i = 0; i < adapter->req_rx_queues; i++)
2218 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2219 adapter->rx_scrq[i]);
2220}
2221#endif
2222
2223static int wait_for_reset(struct ibmvnic_adapter *adapter) 2210static int wait_for_reset(struct ibmvnic_adapter *adapter)
2224{ 2211{
2225 int rc, ret; 2212 int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
2292 .ndo_set_mac_address = ibmvnic_set_mac, 2279 .ndo_set_mac_address = ibmvnic_set_mac,
2293 .ndo_validate_addr = eth_validate_addr, 2280 .ndo_validate_addr = eth_validate_addr,
2294 .ndo_tx_timeout = ibmvnic_tx_timeout, 2281 .ndo_tx_timeout = ibmvnic_tx_timeout,
2295#ifdef CONFIG_NET_POLL_CONTROLLER
2296 .ndo_poll_controller = ibmvnic_netpoll_controller,
2297#endif
2298 .ndo_change_mtu = ibmvnic_change_mtu, 2282 .ndo_change_mtu = ibmvnic_change_mtu,
2299 .ndo_features_check = ibmvnic_features_check, 2283 .ndo_features_check = ibmvnic_features_check,
2300}; 2284};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f27d73a7bf16..6cdd58d9d461 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
3196 return budget; 3196 return budget;
3197 3197
3198 /* all work done, exit the polling mode */ 3198 /* all work done, exit the polling mode */
3199 napi_complete_done(napi, work_done); 3199 if (likely(napi_complete_done(napi, work_done))) {
3200 if (adapter->rx_itr_setting & 1) 3200 if (adapter->rx_itr_setting & 1)
3201 ixgbe_set_itr(q_vector); 3201 ixgbe_set_itr(q_vector);
3202 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3202 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3203 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); 3203 ixgbe_irq_enable_queues(adapter,
3204 BIT_ULL(q_vector->v_idx));
3205 }
3204 3206
3205 return min(work_done, budget - 1); 3207 return min(work_done, budget - 1);
3206} 3208}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 38cc01beea79..a74002b43b51 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1725,7 +1725,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
1725} 1725}
1726 1726
1727/* Set Tx descriptors fields relevant for CSUM calculation */ 1727/* Set Tx descriptors fields relevant for CSUM calculation */
1728static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, 1728static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
1729 int ip_hdr_len, int l4_proto) 1729 int ip_hdr_len, int l4_proto)
1730{ 1730{
1731 u32 command; 1731 u32 command;
@@ -2600,14 +2600,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2600 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2600 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2601 int ip_hdr_len = 0; 2601 int ip_hdr_len = 0;
2602 u8 l4_proto; 2602 u8 l4_proto;
2603 __be16 l3_proto = vlan_get_protocol(skb);
2603 2604
2604 if (skb->protocol == htons(ETH_P_IP)) { 2605 if (l3_proto == htons(ETH_P_IP)) {
2605 struct iphdr *ip4h = ip_hdr(skb); 2606 struct iphdr *ip4h = ip_hdr(skb);
2606 2607
2607 /* Calculate IPv4 checksum and L4 checksum */ 2608 /* Calculate IPv4 checksum and L4 checksum */
2608 ip_hdr_len = ip4h->ihl; 2609 ip_hdr_len = ip4h->ihl;
2609 l4_proto = ip4h->protocol; 2610 l4_proto = ip4h->protocol;
2610 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2611 } else if (l3_proto == htons(ETH_P_IPV6)) {
2611 struct ipv6hdr *ip6h = ipv6_hdr(skb); 2612 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2612 2613
2613 /* Read l4_protocol from one of IPv6 extra headers */ 2614 /* Read l4_protocol from one of IPv6 extra headers */
@@ -2619,7 +2620,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2619 } 2620 }
2620 2621
2621 return mvpp2_txq_desc_csum(skb_network_offset(skb), 2622 return mvpp2_txq_desc_csum(skb_network_offset(skb),
2622 skb->protocol, ip_hdr_len, l4_proto); 2623 l3_proto, ip_hdr_len, l4_proto);
2623 } 2624 }
2624 2625
2625 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; 2626 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index db2cfcd21d43..0f189f873859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
54#include "en_stats.h" 54#include "en_stats.h"
55#include "en/fs.h" 55#include "en/fs.h"
56 56
57extern const struct net_device_ops mlx5e_netdev_ops;
57struct page_pool; 58struct page_pool;
58 59
59#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) 60#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bbf69e859b78..1431232c9a09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
16 16
17 DECLARE_HASHTABLE(mod_hdr_tbl, 8); 17 DECLARE_HASHTABLE(mod_hdr_tbl, 8);
18 DECLARE_HASHTABLE(hairpin_tbl, 8); 18 DECLARE_HASHTABLE(hairpin_tbl, 8);
19
20 struct notifier_block netdevice_nb;
19}; 21};
20 22
21struct mlx5e_flow_table { 23struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 54118b77dc1f..f291d1bf1558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4315 } 4315 }
4316} 4316}
4317 4317
4318static const struct net_device_ops mlx5e_netdev_ops = { 4318const struct net_device_ops mlx5e_netdev_ops = {
4319 .ndo_open = mlx5e_open, 4319 .ndo_open = mlx5e_open,
4320 .ndo_stop = mlx5e_close, 4320 .ndo_stop = mlx5e_close,
4321 .ndo_start_xmit = mlx5e_xmit, 4321 .ndo_start_xmit = mlx5e_xmit,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9fed54017659..85796727093e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1368 1368
1369 *match_level = MLX5_MATCH_L2; 1369 *match_level = MLX5_MATCH_L2;
1370 } 1370 }
1371 } else {
1372 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1373 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1371 } 1374 }
1372 1375
1373 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { 1376 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
2946 return 0; 2949 return 0;
2947} 2950}
2948 2951
2952static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
2953 struct mlx5e_priv *peer_priv)
2954{
2955 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
2956 struct mlx5e_hairpin_entry *hpe;
2957 u16 peer_vhca_id;
2958 int bkt;
2959
2960 if (!same_hw_devs(priv, peer_priv))
2961 return;
2962
2963 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
2964
2965 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
2966 if (hpe->peer_vhca_id == peer_vhca_id)
2967 hpe->hp->pair->peer_gone = true;
2968 }
2969}
2970
2971static int mlx5e_tc_netdev_event(struct notifier_block *this,
2972 unsigned long event, void *ptr)
2973{
2974 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2975 struct mlx5e_flow_steering *fs;
2976 struct mlx5e_priv *peer_priv;
2977 struct mlx5e_tc_table *tc;
2978 struct mlx5e_priv *priv;
2979
2980 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
2981 event != NETDEV_UNREGISTER ||
2982 ndev->reg_state == NETREG_REGISTERED)
2983 return NOTIFY_DONE;
2984
2985 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
2986 fs = container_of(tc, struct mlx5e_flow_steering, tc);
2987 priv = container_of(fs, struct mlx5e_priv, fs);
2988 peer_priv = netdev_priv(ndev);
2989 if (priv == peer_priv ||
2990 !(priv->netdev->features & NETIF_F_HW_TC))
2991 return NOTIFY_DONE;
2992
2993 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
2994
2995 return NOTIFY_DONE;
2996}
2997
2949int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 2998int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
2950{ 2999{
2951 struct mlx5e_tc_table *tc = &priv->fs.tc; 3000 struct mlx5e_tc_table *tc = &priv->fs.tc;
3001 int err;
2952 3002
2953 hash_init(tc->mod_hdr_tbl); 3003 hash_init(tc->mod_hdr_tbl);
2954 hash_init(tc->hairpin_tbl); 3004 hash_init(tc->hairpin_tbl);
2955 3005
2956 return rhashtable_init(&tc->ht, &tc_ht_params); 3006 err = rhashtable_init(&tc->ht, &tc_ht_params);
3007 if (err)
3008 return err;
3009
3010 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3011 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3012 tc->netdevice_nb.notifier_call = NULL;
3013 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3014 }
3015
3016 return err;
2957} 3017}
2958 3018
2959static void _mlx5e_tc_del_flow(void *ptr, void *arg) 3019static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
2969{ 3029{
2970 struct mlx5e_tc_table *tc = &priv->fs.tc; 3030 struct mlx5e_tc_table *tc = &priv->fs.tc;
2971 3031
3032 if (tc->netdevice_nb.notifier_call)
3033 unregister_netdevice_notifier(&tc->netdevice_nb);
3034
2972 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); 3035 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
2973 3036
2974 if (!IS_ERR_OR_NULL(tc->t)) { 3037 if (!IS_ERR_OR_NULL(tc->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b252cde5cc2..ea7dedc2d5ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2000 u32 max_guarantee = 0; 2000 u32 max_guarantee = 0;
2001 int i; 2001 int i;
2002 2002
2003 for (i = 0; i <= esw->total_vports; i++) { 2003 for (i = 0; i < esw->total_vports; i++) {
2004 evport = &esw->vports[i]; 2004 evport = &esw->vports[i];
2005 if (!evport->enabled || evport->info.min_rate < max_guarantee) 2005 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2006 continue; 2006 continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2020 int err; 2020 int err;
2021 int i; 2021 int i;
2022 2022
2023 for (i = 0; i <= esw->total_vports; i++) { 2023 for (i = 0; i < esw->total_vports; i++) {
2024 evport = &esw->vports[i]; 2024 evport = &esw->vports[i];
2025 if (!evport->enabled) 2025 if (!evport->enabled)
2026 continue; 2026 continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index d2f76070ea7c..a1ee9a8a769e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
475 475
476 for (i = 0; i < hp->num_channels; i++) { 476 for (i = 0; i < hp->num_channels; i++) {
477 mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]); 477 mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
478 mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); 478 if (!hp->peer_gone)
479 mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
479 } 480 }
480} 481}
481 482
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
567 MLX5_RQC_STATE_RST, 0, 0); 568 MLX5_RQC_STATE_RST, 0, 0);
568 569
569 /* unset peer SQs */ 570 /* unset peer SQs */
571 if (hp->peer_gone)
572 return;
570 for (i = 0; i < hp->num_channels; i++) 573 for (i = 0; i < hp->num_channels; i++)
571 mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY, 574 mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
572 MLX5_SQC_STATE_RST, 0, 0); 575 MLX5_SQC_STATE_RST, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4d271fb3de3d..5890fdfd62c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
718 memset(&active_cqns, 0, sizeof(active_cqns)); 718 memset(&active_cqns, 0, sizeof(active_cqns));
719 719
720 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { 720 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
721 u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
722 721
723 switch (event_type) { 722 /* Command interface completion events are always received on
724 case MLXSW_PCI_EQE_EVENT_TYPE_CMD: 723 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
724 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
725 */
726 switch (q->num) {
727 case MLXSW_PCI_EQ_ASYNC_NUM:
725 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); 728 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
726 q->u.eq.ev_cmd_count++; 729 q->u.eq.ev_cmd_count++;
727 break; 730 break;
728 case MLXSW_PCI_EQE_EVENT_TYPE_COMP: 731 case MLXSW_PCI_EQ_COMP_NUM:
729 cqn = mlxsw_pci_eqe_cqn_get(eqe); 732 cqn = mlxsw_pci_eqe_cqn_get(eqe);
730 set_bit(cqn, active_cqns); 733 set_bit(cqn, active_cqns);
731 cq_handle = true; 734 cq_handle = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index b492152c8881..30bb2c533cec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4845,6 +4845,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4845 upper_dev = info->upper_dev; 4845 upper_dev = info->upper_dev;
4846 if (info->linking) 4846 if (info->linking)
4847 break; 4847 break;
4848 if (is_vlan_dev(upper_dev))
4849 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4848 if (netif_is_macvlan(upper_dev)) 4850 if (netif_is_macvlan(upper_dev))
4849 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); 4851 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4850 break; 4852 break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 8ed38fd5a852..c6d29fdbb880 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
2077 return true; 2077 return true;
2078} 2078}
2079 2079
2080static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) 2080static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2081{ 2081{
2082 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; 2082 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2083 struct nfp_net *nn = r_vec->nfp_net; 2083 struct nfp_net *nn = r_vec->nfp_net;
2084 struct nfp_net_dp *dp = &nn->dp; 2084 struct nfp_net_dp *dp = &nn->dp;
2085 unsigned int budget = 512;
2085 2086
2086 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring)) 2087 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2087 continue; 2088 continue;
2089
2090 return budget;
2088} 2091}
2089 2092
2090static void nfp_ctrl_poll(unsigned long arg) 2093static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
2096 __nfp_ctrl_tx_queued(r_vec); 2099 __nfp_ctrl_tx_queued(r_vec);
2097 spin_unlock_bh(&r_vec->lock); 2100 spin_unlock_bh(&r_vec->lock);
2098 2101
2099 nfp_ctrl_rx(r_vec); 2102 if (nfp_ctrl_rx(r_vec)) {
2100 2103 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2101 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 2104 } else {
2105 tasklet_schedule(&r_vec->tasklet);
2106 nn_dp_warn(&r_vec->nfp_net->dp,
2107 "control message budget exceeded!\n");
2108 }
2102} 2109}
2103 2110
2104/* Setup and Configuration 2111/* Setup and Configuration
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69aa7fc392c5..59c70be22a84 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
72 work_func_t func, int delay); 72 work_func_t func, int delay);
73static void netxen_cancel_fw_work(struct netxen_adapter *adapter); 73static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
74static int netxen_nic_poll(struct napi_struct *napi, int budget); 74static int netxen_nic_poll(struct napi_struct *napi, int budget);
75#ifdef CONFIG_NET_POLL_CONTROLLER
76static void netxen_nic_poll_controller(struct net_device *netdev);
77#endif
78 75
79static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); 76static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
80static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 77static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
581 .ndo_tx_timeout = netxen_tx_timeout, 578 .ndo_tx_timeout = netxen_tx_timeout,
582 .ndo_fix_features = netxen_fix_features, 579 .ndo_fix_features = netxen_fix_features,
583 .ndo_set_features = netxen_set_features, 580 .ndo_set_features = netxen_set_features,
584#ifdef CONFIG_NET_POLL_CONTROLLER
585 .ndo_poll_controller = netxen_nic_poll_controller,
586#endif
587}; 581};
588 582
589static inline bool netxen_function_zero(struct pci_dev *pdev) 583static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2402 return work_done; 2396 return work_done;
2403} 2397}
2404 2398
2405#ifdef CONFIG_NET_POLL_CONTROLLER
2406static void netxen_nic_poll_controller(struct net_device *netdev)
2407{
2408 int ring;
2409 struct nx_host_sds_ring *sds_ring;
2410 struct netxen_adapter *adapter = netdev_priv(netdev);
2411 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
2412
2413 disable_irq(adapter->irq);
2414 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2415 sds_ring = &recv_ctx->sds_rings[ring];
2416 netxen_intr(adapter->irq, sds_ring);
2417 }
2418 enable_irq(adapter->irq);
2419}
2420#endif
2421
2422static int 2399static int
2423nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) 2400nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
2424{ 2401{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 9b3ef00e5782..a71382687ef2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -11987,6 +11987,7 @@ struct public_global {
11987 u32 running_bundle_id; 11987 u32 running_bundle_id;
11988 s32 external_temperature; 11988 s32 external_temperature;
11989 u32 mdump_reason; 11989 u32 mdump_reason;
11990 u64 reserved;
11990 u32 data_ptr; 11991 u32 data_ptr;
11991 u32 data_size; 11992 u32 data_size;
11992}; 11993};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 17f3dfa2cc94..e860bdf0f752 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1710 1710
1711 cm_info->local_ip[0] = ntohl(iph->daddr); 1711 cm_info->local_ip[0] = ntohl(iph->daddr);
1712 cm_info->remote_ip[0] = ntohl(iph->saddr); 1712 cm_info->remote_ip[0] = ntohl(iph->saddr);
1713 cm_info->ip_version = TCP_IPV4; 1713 cm_info->ip_version = QED_TCP_IPV4;
1714 1714
1715 ip_hlen = (iph->ihl) * sizeof(u32); 1715 ip_hlen = (iph->ihl) * sizeof(u32);
1716 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1716 *payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1730 cm_info->remote_ip[i] = 1730 cm_info->remote_ip[i] =
1731 ntohl(ip6h->saddr.in6_u.u6_addr32[i]); 1731 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1732 } 1732 }
1733 cm_info->ip_version = TCP_IPV6; 1733 cm_info->ip_version = QED_TCP_IPV6;
1734 1734
1735 ip_hlen = sizeof(*ip6h); 1735 ip_hlen = sizeof(*ip6h);
1736 *payload_len = ntohs(ip6h->payload_len); 1736 *payload_len = ntohs(ip6h->payload_len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index be941cfaa2d4..c71391b9c757 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
228 num_cons, "Toggle"); 228 num_cons, "Toggle");
229 if (rc) { 229 if (rc) {
230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
231 "Failed to allocate toogle bits, rc = %d\n", rc); 231 "Failed to allocate toggle bits, rc = %d\n", rc);
232 goto free_cq_map; 232 goto free_cq_map;
233 } 233 }
234 234
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 7d7a64c55ff1..f9167d1354bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
140 140
141static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 141static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
142{ 142{
143 enum roce_flavor flavor;
144
145 switch (roce_mode) { 143 switch (roce_mode) {
146 case ROCE_V1: 144 case ROCE_V1:
147 flavor = PLAIN_ROCE; 145 return PLAIN_ROCE;
148 break;
149 case ROCE_V2_IPV4: 146 case ROCE_V2_IPV4:
150 flavor = RROCE_IPV4; 147 return RROCE_IPV4;
151 break;
152 case ROCE_V2_IPV6: 148 case ROCE_V2_IPV6:
153 flavor = ROCE_V2_IPV6; 149 return RROCE_IPV6;
154 break;
155 default: 150 default:
156 flavor = MAX_ROCE_MODE; 151 return MAX_ROCE_FLAVOR;
157 break;
158 } 152 }
159 return flavor;
160} 153}
161 154
162static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) 155static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 8de644b4721e..77b6248ad3b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
154static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, 154static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
155 struct qed_tunnel_info *p_src) 155 struct qed_tunnel_info *p_src)
156{ 156{
157 enum tunnel_clss type; 157 int type;
158 158
159 p_tun->b_update_rx_cls = p_src->b_update_rx_cls; 159 p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
160 p_tun->b_update_tx_cls = p_src->b_update_tx_cls; 160 p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 3d4269659820..be118d057b92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
413 } 413 }
414 414
415 if (!p_iov->b_pre_fp_hsi && 415 if (!p_iov->b_pre_fp_hsi &&
416 ETH_HSI_VER_MINOR &&
417 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 416 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
418 DP_INFO(p_hwfn, 417 DP_INFO(p_hwfn,
419 "PF is using older fastpath HSI; %02x.%02x is configured\n", 418 "PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
572static void 571static void
573__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 572__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
574 struct qed_tunn_update_type *p_src, 573 struct qed_tunn_update_type *p_src,
575 enum qed_tunn_clss mask, u8 *p_cls) 574 enum qed_tunn_mode mask, u8 *p_cls)
576{ 575{
577 if (p_src->b_update_mode) { 576 if (p_src->b_update_mode) {
578 p_req->tun_mode_update_mask |= BIT(mask); 577 p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
587static void 586static void
588qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 587qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
589 struct qed_tunn_update_type *p_src, 588 struct qed_tunn_update_type *p_src,
590 enum qed_tunn_clss mask, 589 enum qed_tunn_mode mask,
591 u8 *p_cls, struct qed_tunn_update_udp_port *p_port, 590 u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
592 u8 *p_update_port, u16 *p_udp_port) 591 u8 *p_update_port, u16 *p_udp_port)
593{ 592{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81312924df14..0c443ea98479 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
1800 int (*config_loopback) (struct qlcnic_adapter *, u8); 1800 int (*config_loopback) (struct qlcnic_adapter *, u8);
1801 int (*clear_loopback) (struct qlcnic_adapter *, u8); 1801 int (*clear_loopback) (struct qlcnic_adapter *, u8);
1802 int (*config_promisc_mode) (struct qlcnic_adapter *, u32); 1802 int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
1803 void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); 1803 void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
1804 u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
1804 int (*get_board_info) (struct qlcnic_adapter *); 1805 int (*get_board_info) (struct qlcnic_adapter *);
1805 void (*set_mac_filter_count) (struct qlcnic_adapter *); 1806 void (*set_mac_filter_count) (struct qlcnic_adapter *);
1806 void (*free_mac_list) (struct qlcnic_adapter *); 1807 void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
2064} 2065}
2065 2066
2066static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, 2067static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
2067 u64 *addr, u16 id) 2068 u64 *addr, u16 vlan,
2069 struct qlcnic_host_tx_ring *tx_ring)
2068{ 2070{
2069 adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); 2071 adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
2070} 2072}
2071 2073
2072static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) 2074static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 569d54ededec..a79d84f99102 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2135,7 +2135,8 @@ out:
2135} 2135}
2136 2136
2137void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, 2137void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
2138 u16 vlan_id) 2138 u16 vlan_id,
2139 struct qlcnic_host_tx_ring *tx_ring)
2139{ 2140{
2140 u8 mac[ETH_ALEN]; 2141 u8 mac[ETH_ALEN];
2141 memcpy(&mac, addr, ETH_ALEN); 2142 memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index b75a81246856..73fe2f64491d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
550int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); 550int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
551int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); 551int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
552int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); 552int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
553void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16); 553void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
554 u16 vlan, struct qlcnic_host_tx_ring *ring);
554int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); 555int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
555int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 556int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
556void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int); 557void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3..56a3bd9e37dc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
173 struct net_device *netdev); 173 struct net_device *netdev);
174void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); 174void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
175void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, 175void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
176 u64 *uaddr, u16 vlan_id); 176 u64 *uaddr, u16 vlan_id,
177 struct qlcnic_host_tx_ring *tx_ring);
177int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *, 178int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
178 struct ethtool_coalesce *); 179 struct ethtool_coalesce *);
179int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *); 180int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 84dd83031a1b..9647578cbe6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
268} 268}
269 269
270void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, 270void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
271 u16 vlan_id) 271 u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
272{ 272{
273 struct cmd_desc_type0 *hwdesc; 273 struct cmd_desc_type0 *hwdesc;
274 struct qlcnic_nic_req *req; 274 struct qlcnic_nic_req *req;
275 struct qlcnic_mac_req *mac_req; 275 struct qlcnic_mac_req *mac_req;
276 struct qlcnic_vlan_req *vlan_req; 276 struct qlcnic_vlan_req *vlan_req;
277 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
278 u32 producer; 277 u32 producer;
279 u64 word; 278 u64 word;
280 279
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
301 300
302static void qlcnic_send_filter(struct qlcnic_adapter *adapter, 301static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
303 struct cmd_desc_type0 *first_desc, 302 struct cmd_desc_type0 *first_desc,
304 struct sk_buff *skb) 303 struct sk_buff *skb,
304 struct qlcnic_host_tx_ring *tx_ring)
305{ 305{
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); 306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
335 tmp_fil->vlan_id == vlan_id) { 335 tmp_fil->vlan_id == vlan_id) {
336 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) 336 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
337 qlcnic_change_filter(adapter, &src_addr, 337 qlcnic_change_filter(adapter, &src_addr,
338 vlan_id); 338 vlan_id, tx_ring);
339 tmp_fil->ftime = jiffies; 339 tmp_fil->ftime = jiffies;
340 return; 340 return;
341 } 341 }
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
350 if (!fil) 350 if (!fil)
351 return; 351 return;
352 352
353 qlcnic_change_filter(adapter, &src_addr, vlan_id); 353 qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
354 fil->ftime = jiffies; 354 fil->ftime = jiffies;
355 fil->vlan_id = vlan_id; 355 fil->vlan_id = vlan_id;
356 memcpy(fil->faddr, &src_addr, ETH_ALEN); 356 memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
766 } 766 }
767 767
768 if (adapter->drv_mac_learn) 768 if (adapter->drv_mac_learn)
769 qlcnic_send_filter(adapter, first_desc, skb); 769 qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
770 770
771 tx_ring->tx_stats.tx_bytes += skb->len; 771 tx_ring->tx_stats.tx_bytes += skb->len;
772 tx_ring->tx_stats.xmit_called++; 772 tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2d38d1ac2aae..dbd48012224f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
59static void qlcnic_tx_timeout(struct net_device *netdev); 59static void qlcnic_tx_timeout(struct net_device *netdev);
60static void qlcnic_attach_work(struct work_struct *work); 60static void qlcnic_attach_work(struct work_struct *work);
61static void qlcnic_fwinit_work(struct work_struct *work); 61static void qlcnic_fwinit_work(struct work_struct *work);
62#ifdef CONFIG_NET_POLL_CONTROLLER
63static void qlcnic_poll_controller(struct net_device *netdev);
64#endif
65 62
66static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); 63static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
67static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 64static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
545 .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, 542 .ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
546 .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, 543 .ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
547 .ndo_features_check = qlcnic_features_check, 544 .ndo_features_check = qlcnic_features_check,
548#ifdef CONFIG_NET_POLL_CONTROLLER
549 .ndo_poll_controller = qlcnic_poll_controller,
550#endif
551#ifdef CONFIG_QLCNIC_SRIOV 545#ifdef CONFIG_QLCNIC_SRIOV
552 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, 546 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
553 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, 547 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
3200 return IRQ_HANDLED; 3194 return IRQ_HANDLED;
3201} 3195}
3202 3196
3203#ifdef CONFIG_NET_POLL_CONTROLLER
3204static void qlcnic_poll_controller(struct net_device *netdev)
3205{
3206 struct qlcnic_adapter *adapter = netdev_priv(netdev);
3207 struct qlcnic_host_sds_ring *sds_ring;
3208 struct qlcnic_recv_context *recv_ctx;
3209 struct qlcnic_host_tx_ring *tx_ring;
3210 int ring;
3211
3212 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3213 return;
3214
3215 recv_ctx = adapter->recv_ctx;
3216
3217 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
3218 sds_ring = &recv_ctx->sds_rings[ring];
3219 qlcnic_disable_sds_intr(adapter, sds_ring);
3220 napi_schedule(&sds_ring->napi);
3221 }
3222
3223 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
3224 /* Only Multi-Tx queue capable devices need to
3225 * schedule NAPI for TX rings
3226 */
3227 if ((qlcnic_83xx_check(adapter) &&
3228 (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
3229 (qlcnic_82xx_check(adapter) &&
3230 !qlcnic_check_multi_tx(adapter)))
3231 return;
3232
3233 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
3234 tx_ring = &adapter->tx_ring[ring];
3235 qlcnic_disable_tx_intr(adapter, tx_ring);
3236 napi_schedule(&tx_ring->napi);
3237 }
3238 }
3239}
3240#endif
3241
3242static void 3197static void
3243qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) 3198qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
3244{ 3199{
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 7fd86d40a337..11167abe5934 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
113 struct sk_buff *skbn; 113 struct sk_buff *skbn;
114 114
115 if (skb->dev->type == ARPHRD_ETHER) { 115 if (skb->dev->type == ARPHRD_ETHER) {
116 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) { 116 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
117 kfree_skb(skb); 117 kfree_skb(skb);
118 return; 118 return;
119 } 119 }
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
147 } 147 }
148 148
149 if (skb_headroom(skb) < required_headroom) { 149 if (skb_headroom(skb) < required_headroom) {
150 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 150 if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
151 return -ENOMEM; 151 return -ENOMEM;
152 } 152 }
153 153
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
189 if (!skb) 189 if (!skb)
190 goto done; 190 goto done;
191 191
192 if (skb->pkt_type == PACKET_LOOPBACK)
193 return RX_HANDLER_PASS;
194
192 dev = skb->dev; 195 dev = skb->dev;
193 port = rmnet_get_port(dev); 196 port = rmnet_get_port(dev);
194 197
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ab30aaeac6d3..9a5e2969df61 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4072,13 +4072,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
4072 4072
4073 genphy_soft_reset(dev->phydev); 4073 genphy_soft_reset(dev->phydev);
4074 4074
4075 /* It was reported that chip version 33 ends up with 10MBit/Half on a 4075 /* It was reported that several chips end up with 10MBit/Half on a
4076 * 1GBit link after resuming from S3. For whatever reason the PHY on 4076 * 1GBit link after resuming from S3. For whatever reason the PHY on
4077 * this chip doesn't properly start a renegotiation when soft-reset. 4077 * these chips doesn't properly start a renegotiation when soft-reset.
4078 * Explicitly requesting a renegotiation fixes this. 4078 * Explicitly requesting a renegotiation fixes this.
4079 */ 4079 */
4080 if (tp->mac_version == RTL_GIGA_MAC_VER_33 && 4080 if (dev->phydev->autoneg == AUTONEG_ENABLE)
4081 dev->phydev->autoneg == AUTONEG_ENABLE)
4082 phy_restart_aneg(dev->phydev); 4081 phy_restart_aneg(dev->phydev);
4083} 4082}
4084 4083
@@ -4536,9 +4535,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4536 4535
4537static void rtl_set_tx_config_registers(struct rtl8169_private *tp) 4536static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
4538{ 4537{
4539 /* Set DMA burst size and Interframe Gap Time */ 4538 u32 val = TX_DMA_BURST << TxDMAShift |
4540 RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | 4539 InterFrameGap << TxInterFrameGapShift;
4541 (InterFrameGap << TxInterFrameGapShift)); 4540
4541 if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
4542 tp->mac_version != RTL_GIGA_MAC_VER_39)
4543 val |= TXCFG_AUTO_FIFO;
4544
4545 RTL_W32(tp, TxConfig, val);
4542} 4546}
4543 4547
4544static void rtl_set_rx_max_size(struct rtl8169_private *tp) 4548static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -5033,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5033 5037
5034 rtl_disable_clock_request(tp); 5038 rtl_disable_clock_request(tp);
5035 5039
5036 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5037 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); 5040 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5038 5041
5039 /* Adjust EEE LED frequency */ 5042 /* Adjust EEE LED frequency */
@@ -5067,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5067 5070
5068 rtl_disable_clock_request(tp); 5071 rtl_disable_clock_request(tp);
5069 5072
5070 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5071 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); 5073 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5072 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); 5074 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
5073 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); 5075 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5112,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5112 5114
5113static void rtl_hw_start_8168g(struct rtl8169_private *tp) 5115static void rtl_hw_start_8168g(struct rtl8169_private *tp)
5114{ 5116{
5115 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5116
5117 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); 5117 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5118 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); 5118 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5119 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); 5119 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5211,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
5211 rtl_hw_aspm_clkreq_enable(tp, false); 5211 rtl_hw_aspm_clkreq_enable(tp, false);
5212 rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); 5212 rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
5213 5213
5214 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5215
5216 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); 5214 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
5217 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); 5215 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5218 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); 5216 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5295,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
5295{ 5293{
5296 rtl8168ep_stop_cmac(tp); 5294 rtl8168ep_stop_cmac(tp);
5297 5295
5298 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5299
5300 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); 5296 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
5301 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); 5297 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
5302 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); 5298 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5618,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5618 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5614 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5619 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); 5615 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5620 5616
5621 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5622 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); 5617 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5623 5618
5624 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); 5619 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -6869,8 +6864,10 @@ static int rtl8169_suspend(struct device *device)
6869{ 6864{
6870 struct pci_dev *pdev = to_pci_dev(device); 6865 struct pci_dev *pdev = to_pci_dev(device);
6871 struct net_device *dev = pci_get_drvdata(pdev); 6866 struct net_device *dev = pci_get_drvdata(pdev);
6867 struct rtl8169_private *tp = netdev_priv(dev);
6872 6868
6873 rtl8169_net_suspend(dev); 6869 rtl8169_net_suspend(dev);
6870 clk_disable_unprepare(tp->clk);
6874 6871
6875 return 0; 6872 return 0;
6876} 6873}
@@ -6898,6 +6895,9 @@ static int rtl8169_resume(struct device *device)
6898{ 6895{
6899 struct pci_dev *pdev = to_pci_dev(device); 6896 struct pci_dev *pdev = to_pci_dev(device);
6900 struct net_device *dev = pci_get_drvdata(pdev); 6897 struct net_device *dev = pci_get_drvdata(pdev);
6898 struct rtl8169_private *tp = netdev_priv(dev);
6899
6900 clk_prepare_enable(tp->clk);
6901 6901
6902 if (netif_running(dev)) 6902 if (netif_running(dev))
6903 __rtl8169_resume(dev); 6903 __rtl8169_resume(dev);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..3d0dd39c289e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
2208 2208
2209/************************************************************************** 2209/**************************************************************************
2210 * 2210 *
2211 * Kernel netpoll interface
2212 *
2213 *************************************************************************/
2214
2215#ifdef CONFIG_NET_POLL_CONTROLLER
2216
2217/* Although in the common case interrupts will be disabled, this is not
2218 * guaranteed. However, all our work happens inside the NAPI callback,
2219 * so no locking is required.
2220 */
2221static void efx_netpoll(struct net_device *net_dev)
2222{
2223 struct efx_nic *efx = netdev_priv(net_dev);
2224 struct efx_channel *channel;
2225
2226 efx_for_each_channel(channel, efx)
2227 efx_schedule_channel(channel);
2228}
2229
2230#endif
2231
2232/**************************************************************************
2233 *
2234 * Kernel net device interface 2211 * Kernel net device interface
2235 * 2212 *
2236 *************************************************************************/ 2213 *************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
2509#endif 2486#endif
2510 .ndo_get_phys_port_id = efx_get_phys_port_id, 2487 .ndo_get_phys_port_id = efx_get_phys_port_id,
2511 .ndo_get_phys_port_name = efx_get_phys_port_name, 2488 .ndo_get_phys_port_name = efx_get_phys_port_name,
2512#ifdef CONFIG_NET_POLL_CONTROLLER
2513 .ndo_poll_controller = efx_netpoll,
2514#endif
2515 .ndo_setup_tc = efx_setup_tc, 2489 .ndo_setup_tc = efx_setup_tc,
2516#ifdef CONFIG_RFS_ACCEL 2490#ifdef CONFIG_RFS_ACCEL
2517 .ndo_rx_flow_steer = efx_filter_rfs, 2491 .ndo_rx_flow_steer = efx_filter_rfs,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index dd5530a4f8c8..03e2455c502e 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
2054 2054
2055/************************************************************************** 2055/**************************************************************************
2056 * 2056 *
2057 * Kernel netpoll interface
2058 *
2059 *************************************************************************/
2060
2061#ifdef CONFIG_NET_POLL_CONTROLLER
2062
2063/* Although in the common case interrupts will be disabled, this is not
2064 * guaranteed. However, all our work happens inside the NAPI callback,
2065 * so no locking is required.
2066 */
2067static void ef4_netpoll(struct net_device *net_dev)
2068{
2069 struct ef4_nic *efx = netdev_priv(net_dev);
2070 struct ef4_channel *channel;
2071
2072 ef4_for_each_channel(channel, efx)
2073 ef4_schedule_channel(channel);
2074}
2075
2076#endif
2077
2078/**************************************************************************
2079 *
2080 * Kernel net device interface 2057 * Kernel net device interface
2081 * 2058 *
2082 *************************************************************************/ 2059 *************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
2250 .ndo_set_mac_address = ef4_set_mac_address, 2227 .ndo_set_mac_address = ef4_set_mac_address,
2251 .ndo_set_rx_mode = ef4_set_rx_mode, 2228 .ndo_set_rx_mode = ef4_set_rx_mode,
2252 .ndo_set_features = ef4_set_features, 2229 .ndo_set_features = ef4_set_features,
2253#ifdef CONFIG_NET_POLL_CONTROLLER
2254 .ndo_poll_controller = ef4_netpoll,
2255#endif
2256 .ndo_setup_tc = ef4_setup_tc, 2230 .ndo_setup_tc = ef4_setup_tc,
2257#ifdef CONFIG_RFS_ACCEL 2231#ifdef CONFIG_RFS_ACCEL
2258 .ndo_rx_flow_steer = ef4_filter_rfs, 2232 .ndo_rx_flow_steer = ef4_filter_rfs,
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 16ec7af6ab7b..ba9df430fca6 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
966 sizeof(struct yamdrv_ioctl_mcs)); 966 sizeof(struct yamdrv_ioctl_mcs));
967 if (IS_ERR(ym)) 967 if (IS_ERR(ym))
968 return PTR_ERR(ym); 968 return PTR_ERR(ym);
969 if (ym->cmd != SIOCYAMSMCS)
970 return -EINVAL;
969 if (ym->bitrate > YAM_MAXBITRATE) { 971 if (ym->bitrate > YAM_MAXBITRATE) {
970 kfree(ym); 972 kfree(ym);
971 return -EINVAL; 973 return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
981 if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg))) 983 if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
982 return -EFAULT; 984 return -EFAULT;
983 985
986 if (yi.cmd != SIOCYAMSCFG)
987 return -EINVAL;
984 if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev)) 988 if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
985 return -EINVAL; /* Cannot change this parameter when up */ 989 return -EINVAL; /* Cannot change this parameter when up */
986 if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev)) 990 if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 23a52b9293f3..cd1d8faccca5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
1308{ 1308{
1309 struct adf7242_local *lp = spi_get_drvdata(spi); 1309 struct adf7242_local *lp = spi_get_drvdata(spi);
1310 1310
1311 if (!IS_ERR_OR_NULL(lp->debugfs_root)) 1311 debugfs_remove_recursive(lp->debugfs_root);
1312 debugfs_remove_recursive(lp->debugfs_root);
1313 1312
1314 cancel_delayed_work_sync(&lp->work); 1313 cancel_delayed_work_sync(&lp->work);
1315 destroy_workqueue(lp->wqueue); 1314 destroy_workqueue(lp->wqueue);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 58299fb666ed..0ff5a403a8dc 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
634 for (i = 0; i < len; i++) 634 for (i = 0; i < len; i++)
635 dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); 635 dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
636 636
637 fifo_buffer = kmalloc(len, GFP_KERNEL); 637 fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
638 if (!fifo_buffer) 638 if (!fifo_buffer)
639 return -ENOMEM; 639 return -ENOMEM;
640 memcpy(fifo_buffer, buf, len);
641 kfifo_in(&test->up_fifo, &fifo_buffer, 4); 640 kfifo_in(&test->up_fifo, &fifo_buffer, 4);
642 wake_up_interruptible(&priv->test.readq); 641 wake_up_interruptible(&priv->test.readq);
643 642
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
3044{ 3043{
3045 struct ca8210_test *test = &priv->test; 3044 struct ca8210_test *test = &priv->test;
3046 3045
3047 if (!IS_ERR(test->ca8210_dfs_spi_int)) 3046 debugfs_remove(test->ca8210_dfs_spi_int);
3048 debugfs_remove(test->ca8210_dfs_spi_int);
3049 kfifo_free(&test->up_fifo); 3047 kfifo_free(&test->up_fifo);
3050 dev_info(&priv->spi->dev, "Test interface removed\n"); 3048 dev_info(&priv->spi->dev, "Test interface removed\n");
3051} 3049}
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index e428277781ac..04891429a554 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
903 903
904 switch (seq_state) { 904 switch (seq_state) {
905 /* TX IRQ, RX IRQ and SEQ IRQ */ 905 /* TX IRQ, RX IRQ and SEQ IRQ */
906 case (0x03): 906 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
907 if (lp->is_tx) { 907 if (lp->is_tx) {
908 lp->is_tx = 0; 908 lp->is_tx = 0;
909 dev_dbg(printdev(lp), "TX is done. No ACK\n"); 909 dev_dbg(printdev(lp), "TX is done. No ACK\n");
910 mcr20a_handle_tx_complete(lp); 910 mcr20a_handle_tx_complete(lp);
911 } 911 }
912 break; 912 break;
913 case (0x05): 913 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
914 /* rx is starting */ 914 /* rx is starting */
915 dev_dbg(printdev(lp), "RX is starting\n"); 915 dev_dbg(printdev(lp), "RX is starting\n");
916 mcr20a_handle_rx(lp); 916 mcr20a_handle_rx(lp);
917 break; 917 break;
918 case (0x07): 918 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
919 if (lp->is_tx) { 919 if (lp->is_tx) {
920 /* tx is done */ 920 /* tx is done */
921 lp->is_tx = 0; 921 lp->is_tx = 0;
@@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
927 mcr20a_handle_rx(lp); 927 mcr20a_handle_rx(lp);
928 } 928 }
929 break; 929 break;
930 case (0x01): 930 case (DAR_IRQSTS1_SEQIRQ):
931 if (lp->is_tx) { 931 if (lp->is_tx) {
932 dev_dbg(printdev(lp), "TX is starting\n"); 932 dev_dbg(printdev(lp), "TX is starting\n");
933 mcr20a_handle_tx(lp); 933 mcr20a_handle_tx(lp);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1172db1e7c..19ab8a7d1e48 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
93 if (!netdev) 93 if (!netdev)
94 return !phydev->suspended; 94 return !phydev->suspended;
95 95
96 /* Don't suspend PHY if the attached netdev parent may wakeup. 96 if (netdev->wol_enabled)
97 return false;
98
99 /* As long as not all affected network drivers support the
100 * wol_enabled flag, let's check for hints that WoL is enabled.
101 * Don't suspend PHY if the attached netdev parent may wake up.
97 * The parent may point to a PCI device, as in tg3 driver. 102 * The parent may point to a PCI device, as in tg3 driver.
98 */ 103 */
99 if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) 104 if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
1132 sysfs_remove_link(&dev->dev.kobj, "phydev"); 1137 sysfs_remove_link(&dev->dev.kobj, "phydev");
1133 sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); 1138 sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
1134 } 1139 }
1140 phy_suspend(phydev);
1135 phydev->attached_dev->phydev = NULL; 1141 phydev->attached_dev->phydev = NULL;
1136 phydev->attached_dev = NULL; 1142 phydev->attached_dev = NULL;
1137 phy_suspend(phydev);
1138 phydev->phylink = NULL; 1143 phydev->phylink = NULL;
1139 1144
1140 phy_led_triggers_unregister(phydev); 1145 phy_led_triggers_unregister(phydev);
@@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
1168int phy_suspend(struct phy_device *phydev) 1173int phy_suspend(struct phy_device *phydev)
1169{ 1174{
1170 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1175 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1176 struct net_device *netdev = phydev->attached_dev;
1171 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1177 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1172 int ret = 0; 1178 int ret = 0;
1173 1179
1174 /* If the device has WOL enabled, we cannot suspend the PHY */ 1180 /* If the device has WOL enabled, we cannot suspend the PHY */
1175 phy_ethtool_get_wol(phydev, &wol); 1181 phy_ethtool_get_wol(phydev, &wol);
1176 if (wol.wolopts) 1182 if (wol.wolopts || (netdev && netdev->wol_enabled))
1177 return -EBUSY; 1183 return -EBUSY;
1178 1184
1179 if (phydev->drv && phydrv->suspend) 1185 if (phydev->drv && phydrv->suspend)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 3ba5cf2a8a5f..7abca86c3aa9 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -717,6 +717,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
717 return 0; 717 return 0;
718} 718}
719 719
720static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
721 phy_interface_t interface)
722{
723 int ret;
724
725 if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
726 (pl->link_an_mode == MLO_AN_INBAND &&
727 phy_interface_mode_is_8023z(interface))))
728 return -EINVAL;
729
730 if (pl->phydev)
731 return -EBUSY;
732
733 ret = phy_attach_direct(pl->netdev, phy, 0, interface);
734 if (ret)
735 return ret;
736
737 ret = phylink_bringup_phy(pl, phy);
738 if (ret)
739 phy_detach(phy);
740
741 return ret;
742}
743
720/** 744/**
721 * phylink_connect_phy() - connect a PHY to the phylink instance 745 * phylink_connect_phy() - connect a PHY to the phylink instance
722 * @pl: a pointer to a &struct phylink returned from phylink_create() 746 * @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -734,31 +758,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
734 */ 758 */
735int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) 759int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
736{ 760{
737 int ret;
738
739 if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
740 (pl->link_an_mode == MLO_AN_INBAND &&
741 phy_interface_mode_is_8023z(pl->link_interface))))
742 return -EINVAL;
743
744 if (pl->phydev)
745 return -EBUSY;
746
747 /* Use PHY device/driver interface */ 761 /* Use PHY device/driver interface */
748 if (pl->link_interface == PHY_INTERFACE_MODE_NA) { 762 if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
749 pl->link_interface = phy->interface; 763 pl->link_interface = phy->interface;
750 pl->link_config.interface = pl->link_interface; 764 pl->link_config.interface = pl->link_interface;
751 } 765 }
752 766
753 ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); 767 return __phylink_connect_phy(pl, phy, pl->link_interface);
754 if (ret)
755 return ret;
756
757 ret = phylink_bringup_phy(pl, phy);
758 if (ret)
759 phy_detach(phy);
760
761 return ret;
762} 768}
763EXPORT_SYMBOL_GPL(phylink_connect_phy); 769EXPORT_SYMBOL_GPL(phylink_connect_phy);
764 770
@@ -1672,7 +1678,9 @@ static void phylink_sfp_link_up(void *upstream)
1672 1678
1673static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) 1679static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
1674{ 1680{
1675 return phylink_connect_phy(upstream, phy); 1681 struct phylink *pl = upstream;
1682
1683 return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
1676} 1684}
1677 1685
1678static void phylink_sfp_disconnect_phy(void *upstream) 1686static void phylink_sfp_disconnect_phy(void *upstream)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 52fffb98fde9..6e13b8832bc7 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1098,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
1098 1098
1099static void sfp_hwmon_remove(struct sfp *sfp) 1099static void sfp_hwmon_remove(struct sfp *sfp)
1100{ 1100{
1101 hwmon_device_unregister(sfp->hwmon_dev); 1101 if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
1102 kfree(sfp->hwmon_name); 1102 hwmon_device_unregister(sfp->hwmon_dev);
1103 sfp->hwmon_dev = NULL;
1104 kfree(sfp->hwmon_name);
1105 }
1103} 1106}
1104#else 1107#else
1105static int sfp_hwmon_insert(struct sfp *sfp) 1108static int sfp_hwmon_insert(struct sfp *sfp)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6a047d30e8c6..d887016e54b6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1167 return -EBUSY; 1167 return -EBUSY;
1168 } 1168 }
1169 1169
1170 if (dev == port_dev) {
1171 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1172 netdev_err(dev, "Cannot enslave team device to itself\n");
1173 return -EINVAL;
1174 }
1175
1170 if (port_dev->features & NETIF_F_VLAN_CHALLENGED && 1176 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1171 vlan_uses_dev(dev)) { 1177 vlan_uses_dev(dev)) {
1172 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); 1178 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e2648b5a3861..50e9cc19023a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,6 +181,7 @@ struct tun_file {
181 }; 181 };
182 struct napi_struct napi; 182 struct napi_struct napi;
183 bool napi_enabled; 183 bool napi_enabled;
184 bool napi_frags_enabled;
184 struct mutex napi_mutex; /* Protects access to the above napi */ 185 struct mutex napi_mutex; /* Protects access to the above napi */
185 struct list_head next; 186 struct list_head next;
186 struct tun_struct *detached; 187 struct tun_struct *detached;
@@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
313} 314}
314 315
315static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 316static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
316 bool napi_en) 317 bool napi_en, bool napi_frags)
317{ 318{
318 tfile->napi_enabled = napi_en; 319 tfile->napi_enabled = napi_en;
320 tfile->napi_frags_enabled = napi_en && napi_frags;
319 if (napi_en) { 321 if (napi_en) {
320 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 322 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
321 NAPI_POLL_WEIGHT); 323 NAPI_POLL_WEIGHT);
322 napi_enable(&tfile->napi); 324 napi_enable(&tfile->napi);
323 mutex_init(&tfile->napi_mutex);
324 } 325 }
325} 326}
326 327
327static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 328static void tun_napi_disable(struct tun_file *tfile)
328{ 329{
329 if (tfile->napi_enabled) 330 if (tfile->napi_enabled)
330 napi_disable(&tfile->napi); 331 napi_disable(&tfile->napi);
331} 332}
332 333
333static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 334static void tun_napi_del(struct tun_file *tfile)
334{ 335{
335 if (tfile->napi_enabled) 336 if (tfile->napi_enabled)
336 netif_napi_del(&tfile->napi); 337 netif_napi_del(&tfile->napi);
337} 338}
338 339
339static bool tun_napi_frags_enabled(const struct tun_struct *tun) 340static bool tun_napi_frags_enabled(const struct tun_file *tfile)
340{ 341{
341 return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 342 return tfile->napi_frags_enabled;
342} 343}
343 344
344#ifdef CONFIG_TUN_VNET_CROSS_LE 345#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
690 tun = rtnl_dereference(tfile->tun); 691 tun = rtnl_dereference(tfile->tun);
691 692
692 if (tun && clean) { 693 if (tun && clean) {
693 tun_napi_disable(tun, tfile); 694 tun_napi_disable(tfile);
694 tun_napi_del(tun, tfile); 695 tun_napi_del(tfile);
695 } 696 }
696 697
697 if (tun && !tfile->detached) { 698 if (tun && !tfile->detached) {
@@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
758 for (i = 0; i < n; i++) { 759 for (i = 0; i < n; i++) {
759 tfile = rtnl_dereference(tun->tfiles[i]); 760 tfile = rtnl_dereference(tun->tfiles[i]);
760 BUG_ON(!tfile); 761 BUG_ON(!tfile);
761 tun_napi_disable(tun, tfile); 762 tun_napi_disable(tfile);
762 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 763 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
763 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 764 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
764 RCU_INIT_POINTER(tfile->tun, NULL); 765 RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
774 synchronize_net(); 775 synchronize_net();
775 for (i = 0; i < n; i++) { 776 for (i = 0; i < n; i++) {
776 tfile = rtnl_dereference(tun->tfiles[i]); 777 tfile = rtnl_dereference(tun->tfiles[i]);
777 tun_napi_del(tun, tfile); 778 tun_napi_del(tfile);
778 /* Drop read queue */ 779 /* Drop read queue */
779 tun_queue_purge(tfile); 780 tun_queue_purge(tfile);
780 xdp_rxq_info_unreg(&tfile->xdp_rxq); 781 xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
793} 794}
794 795
795static int tun_attach(struct tun_struct *tun, struct file *file, 796static int tun_attach(struct tun_struct *tun, struct file *file,
796 bool skip_filter, bool napi) 797 bool skip_filter, bool napi, bool napi_frags)
797{ 798{
798 struct tun_file *tfile = file->private_data; 799 struct tun_file *tfile = file->private_data;
799 struct net_device *dev = tun->dev; 800 struct net_device *dev = tun->dev;
@@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
866 tun_enable_queue(tfile); 867 tun_enable_queue(tfile);
867 } else { 868 } else {
868 sock_hold(&tfile->sk); 869 sock_hold(&tfile->sk);
869 tun_napi_init(tun, tfile, napi); 870 tun_napi_init(tun, tfile, napi, napi_frags);
870 } 871 }
871 872
872 tun_set_real_num_queues(tun); 873 tun_set_real_num_queues(tun);
@@ -1709,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1709 int err; 1710 int err;
1710 u32 rxhash = 0; 1711 u32 rxhash = 0;
1711 int skb_xdp = 1; 1712 int skb_xdp = 1;
1712 bool frags = tun_napi_frags_enabled(tun); 1713 bool frags = tun_napi_frags_enabled(tfile);
1713 1714
1714 if (!(tun->dev->flags & IFF_UP)) 1715 if (!(tun->dev->flags & IFF_UP))
1715 return -EIO; 1716 return -EIO;
@@ -2534,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2534 return err; 2535 return err;
2535 2536
2536 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2537 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2537 ifr->ifr_flags & IFF_NAPI); 2538 ifr->ifr_flags & IFF_NAPI,
2539 ifr->ifr_flags & IFF_NAPI_FRAGS);
2538 if (err < 0) 2540 if (err < 0)
2539 return err; 2541 return err;
2540 2542
@@ -2632,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2632 (ifr->ifr_flags & TUN_FEATURES); 2634 (ifr->ifr_flags & TUN_FEATURES);
2633 2635
2634 INIT_LIST_HEAD(&tun->disabled); 2636 INIT_LIST_HEAD(&tun->disabled);
2635 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2637 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2638 ifr->ifr_flags & IFF_NAPI_FRAGS);
2636 if (err < 0) 2639 if (err < 0)
2637 goto err_free_flow; 2640 goto err_free_flow;
2638 2641
@@ -2781,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
2781 ret = security_tun_dev_attach_queue(tun->security); 2784 ret = security_tun_dev_attach_queue(tun->security);
2782 if (ret < 0) 2785 if (ret < 0)
2783 goto unlock; 2786 goto unlock;
2784 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 2787 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2788 tun->flags & IFF_NAPI_FRAGS);
2785 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2789 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2786 tun = rtnl_dereference(tfile->tun); 2790 tun = rtnl_dereference(tfile->tun);
2787 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2791 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3199,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3199 return -ENOMEM; 3203 return -ENOMEM;
3200 } 3204 }
3201 3205
3206 mutex_init(&tfile->napi_mutex);
3202 RCU_INIT_POINTER(tfile->tun, NULL); 3207 RCU_INIT_POINTER(tfile->tun, NULL);
3203 tfile->flags = 0; 3208 tfile->flags = 0;
3204 tfile->ifindex = 0; 3209 tfile->ifindex = 0;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e95dd12edec4..023b8d0bf175 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
607 struct usbnet *dev = netdev_priv(net); 607 struct usbnet *dev = netdev_priv(net);
608 u8 opt = 0; 608 u8 opt = 0;
609 609
610 if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
611 return -EINVAL;
612
610 if (wolinfo->wolopts & WAKE_PHY) 613 if (wolinfo->wolopts & WAKE_PHY)
611 opt |= AX_MONITOR_LINK; 614 opt |= AX_MONITOR_LINK;
612 if (wolinfo->wolopts & WAKE_MAGIC) 615 if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 9e8ad372f419..2207f7a7d1ff 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
566 struct usbnet *dev = netdev_priv(net); 566 struct usbnet *dev = netdev_priv(net);
567 u8 opt = 0; 567 u8 opt = 0;
568 568
569 if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
570 return -EINVAL;
571
569 if (wolinfo->wolopts & WAKE_PHY) 572 if (wolinfo->wolopts & WAKE_PHY)
570 opt |= AX_MONITOR_MODE_RWLC; 573 opt |= AX_MONITOR_MODE_RWLC;
571 if (wolinfo->wolopts & WAKE_MAGIC) 574 if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a9991c5f4736..c3c9ba44e2a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
1401 if (ret < 0) 1401 if (ret < 0)
1402 return ret; 1402 return ret;
1403 1403
1404 pdata->wol = 0; 1404 if (wol->wolopts & ~WAKE_ALL)
1405 if (wol->wolopts & WAKE_UCAST) 1405 return -EINVAL;
1406 pdata->wol |= WAKE_UCAST; 1406
1407 if (wol->wolopts & WAKE_MCAST) 1407 pdata->wol = wol->wolopts;
1408 pdata->wol |= WAKE_MCAST;
1409 if (wol->wolopts & WAKE_BCAST)
1410 pdata->wol |= WAKE_BCAST;
1411 if (wol->wolopts & WAKE_MAGIC)
1412 pdata->wol |= WAKE_MAGIC;
1413 if (wol->wolopts & WAKE_PHY)
1414 pdata->wol |= WAKE_PHY;
1415 if (wol->wolopts & WAKE_ARP)
1416 pdata->wol |= WAKE_ARP;
1417 1408
1418 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); 1409 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1419 1410
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cd71bdb6484..f1b5201cc320 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4506 if (!rtl_can_wakeup(tp)) 4506 if (!rtl_can_wakeup(tp))
4507 return -EOPNOTSUPP; 4507 return -EOPNOTSUPP;
4508 4508
4509 if (wol->wolopts & ~WAKE_ANY)
4510 return -EINVAL;
4511
4509 ret = usb_autopm_get_interface(tp->intf); 4512 ret = usb_autopm_get_interface(tp->intf);
4510 if (ret < 0) 4513 if (ret < 0)
4511 goto out_set_wol; 4514 goto out_set_wol;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 05553d252446..ec287c9741e8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
731 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 731 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
732 int ret; 732 int ret;
733 733
734 if (wolinfo->wolopts & ~SUPPORTED_WAKE)
735 return -EINVAL;
736
734 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; 737 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
735 738
736 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); 739 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1517{ 1520{
1518 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1521 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1519 if (pdata) { 1522 if (pdata) {
1523 cancel_work_sync(&pdata->set_multicast);
1520 netif_dbg(dev, ifdown, dev->net, "free pdata\n"); 1524 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
1521 kfree(pdata); 1525 kfree(pdata);
1522 pdata = NULL; 1526 pdata = NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 06b4d290784d..262e7a3c23cb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
774 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 774 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
775 int ret; 775 int ret;
776 776
777 if (wolinfo->wolopts & ~SUPPORTED_WAKE)
778 return -EINVAL;
779
777 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; 780 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
778 781
779 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); 782 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 9277a0f228df..35f39f23d881 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
421 struct usbnet *dev = netdev_priv(net); 421 struct usbnet *dev = netdev_priv(net);
422 u8 opt = 0; 422 u8 opt = 0;
423 423
424 if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
425 return -EINVAL;
426
424 if (wolinfo->wolopts & WAKE_PHY) 427 if (wolinfo->wolopts & WAKE_PHY)
425 opt |= SR_MONITOR_LINK; 428 opt |= SR_MONITOR_LINK;
426 if (wolinfo->wolopts & WAKE_MAGIC) 429 if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 765920905226..dab504ec5e50 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
1699 tot->rx_frame_errors = dev->stats.rx_frame_errors; 1699 tot->rx_frame_errors = dev->stats.rx_frame_errors;
1700} 1700}
1701 1701
1702#ifdef CONFIG_NET_POLL_CONTROLLER
1703static void virtnet_netpoll(struct net_device *dev)
1704{
1705 struct virtnet_info *vi = netdev_priv(dev);
1706 int i;
1707
1708 for (i = 0; i < vi->curr_queue_pairs; i++)
1709 napi_schedule(&vi->rq[i].napi);
1710}
1711#endif
1712
1713static void virtnet_ack_link_announce(struct virtnet_info *vi) 1702static void virtnet_ack_link_announce(struct virtnet_info *vi)
1714{ 1703{
1715 rtnl_lock(); 1704 rtnl_lock();
@@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
2447 .ndo_get_stats64 = virtnet_stats, 2436 .ndo_get_stats64 = virtnet_stats,
2448 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 2437 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2449 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 2438 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2450#ifdef CONFIG_NET_POLL_CONTROLLER
2451 .ndo_poll_controller = virtnet_netpoll,
2452#endif
2453 .ndo_bpf = virtnet_xdp, 2439 .ndo_bpf = virtnet_xdp,
2454 .ndo_xdp_xmit = virtnet_xdp_xmit, 2440 .ndo_xdp_xmit = virtnet_xdp_xmit,
2455 .ndo_features_check = passthru_features_check, 2441 .ndo_features_check = passthru_features_check,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ababba37d735..2b8da2b7e721 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
3539 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3539 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3540 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3540 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3541 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3541 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3542 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
3542 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3543 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3543 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3544 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3544 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3545 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3603 } 3604 }
3604 3605
3605 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3606 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3607 nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
3608 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
3606 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3609 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3607 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3610 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3608 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3611 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 094cea775d0c..ef298d8525c5 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -257,7 +257,7 @@ static const struct
257 [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, 257 [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
258 [I2400M_MS_BUSY] = { "busy", -EBUSY }, 258 [I2400M_MS_BUSY] = { "busy", -EBUSY },
259 [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, 259 [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
260 [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ }, 260 [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
261 [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, 261 [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
262 [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, 262 [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
263 [I2400M_MS_NO_RF] = { "no RF", -EIO }, 263 [I2400M_MS_NO_RF] = { "no RF", -EIO },
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 6b0e1ec346cb..d46d57b989ae 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1518 } 1518 }
1519 } else { 1519 } else {
1520 /* More than a single header/data pair were missed. 1520 /* More than a single header/data pair were missed.
1521 * Report this error, and reset the controller to 1521 * Report this error. If running with open-source
1522 * firmware, then reset the controller to
1522 * revive operation. 1523 * revive operation.
1523 */ 1524 */
1524 b43dbg(dev->wl, 1525 b43dbg(dev->wl,
1525 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", 1526 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1526 ring->index, firstused, slot); 1527 ring->index, firstused, slot);
1527 b43_controller_restart(dev, "Out of order TX"); 1528 if (dev->fw.opensource)
1529 b43_controller_restart(dev, "Out of order TX");
1528 return; 1530 return;
1529 } 1531 }
1530 } 1532 }
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 591687984962..497fd766d87c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -51,6 +51,7 @@
51 51
52static const struct iwl_base_params iwl1000_base_params = { 52static const struct iwl_base_params iwl1000_base_params = {
53 .num_of_queues = IWLAGN_NUM_QUEUES, 53 .num_of_queues = IWLAGN_NUM_QUEUES,
54 .max_tfd_queue_size = 256,
54 .eeprom_size = OTP_LOW_IMAGE_SIZE, 55 .eeprom_size = OTP_LOW_IMAGE_SIZE,
55 .pll_cfg = true, 56 .pll_cfg = true,
56 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 57 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1068757ec42e..07442ada6dd0 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -520,7 +520,6 @@ struct mac80211_hwsim_data {
520 int channels, idx; 520 int channels, idx;
521 bool use_chanctx; 521 bool use_chanctx;
522 bool destroy_on_close; 522 bool destroy_on_close;
523 struct work_struct destroy_work;
524 u32 portid; 523 u32 portid;
525 char alpha2[2]; 524 char alpha2[2];
526 const struct ieee80211_regdomain *regd; 525 const struct ieee80211_regdomain *regd;
@@ -2935,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2935 hwsim_radios_generation++; 2934 hwsim_radios_generation++;
2936 spin_unlock_bh(&hwsim_radio_lock); 2935 spin_unlock_bh(&hwsim_radio_lock);
2937 2936
2938 if (idx > 0) 2937 hwsim_mcast_new_radio(idx, info, param);
2939 hwsim_mcast_new_radio(idx, info, param);
2940 2938
2941 return idx; 2939 return idx;
2942 2940
@@ -3565,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
3565 .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), 3563 .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
3566}; 3564};
3567 3565
3568static void destroy_radio(struct work_struct *work)
3569{
3570 struct mac80211_hwsim_data *data =
3571 container_of(work, struct mac80211_hwsim_data, destroy_work);
3572
3573 hwsim_radios_generation++;
3574 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
3575}
3576
3577static void remove_user_radios(u32 portid) 3566static void remove_user_radios(u32 portid)
3578{ 3567{
3579 struct mac80211_hwsim_data *entry, *tmp; 3568 struct mac80211_hwsim_data *entry, *tmp;
3569 LIST_HEAD(list);
3580 3570
3581 spin_lock_bh(&hwsim_radio_lock); 3571 spin_lock_bh(&hwsim_radio_lock);
3582 list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { 3572 list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
3583 if (entry->destroy_on_close && entry->portid == portid) { 3573 if (entry->destroy_on_close && entry->portid == portid) {
3584 list_del(&entry->list); 3574 list_move(&entry->list, &list);
3585 rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, 3575 rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
3586 hwsim_rht_params); 3576 hwsim_rht_params);
3587 INIT_WORK(&entry->destroy_work, destroy_radio); 3577 hwsim_radios_generation++;
3588 queue_work(hwsim_wq, &entry->destroy_work);
3589 } 3578 }
3590 } 3579 }
3591 spin_unlock_bh(&hwsim_radio_lock); 3580 spin_unlock_bh(&hwsim_radio_lock);
3581
3582 list_for_each_entry_safe(entry, tmp, &list, list) {
3583 list_del(&entry->list);
3584 mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
3585 NULL);
3586 }
3592} 3587}
3593 3588
3594static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, 3589static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3646,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
3646static void __net_exit hwsim_exit_net(struct net *net) 3641static void __net_exit hwsim_exit_net(struct net *net)
3647{ 3642{
3648 struct mac80211_hwsim_data *data, *tmp; 3643 struct mac80211_hwsim_data *data, *tmp;
3644 LIST_HEAD(list);
3649 3645
3650 spin_lock_bh(&hwsim_radio_lock); 3646 spin_lock_bh(&hwsim_radio_lock);
3651 list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { 3647 list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3656,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
3656 if (data->netgroup == hwsim_net_get_netgroup(&init_net)) 3652 if (data->netgroup == hwsim_net_get_netgroup(&init_net))
3657 continue; 3653 continue;
3658 3654
3659 list_del(&data->list); 3655 list_move(&data->list, &list);
3660 rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, 3656 rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
3661 hwsim_rht_params); 3657 hwsim_rht_params);
3662 hwsim_radios_generation++; 3658 hwsim_radios_generation++;
3663 spin_unlock_bh(&hwsim_radio_lock); 3659 }
3660 spin_unlock_bh(&hwsim_radio_lock);
3661
3662 list_for_each_entry_safe(data, tmp, &list, list) {
3663 list_del(&data->list);
3664 mac80211_hwsim_del_radio(data, 3664 mac80211_hwsim_del_radio(data,
3665 wiphy_name(data->hw->wiphy), 3665 wiphy_name(data->hw->wiphy),
3666 NULL); 3666 NULL);
3667 spin_lock_bh(&hwsim_radio_lock);
3668 } 3667 }
3669 spin_unlock_bh(&hwsim_radio_lock);
3670 3668
3671 ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); 3669 ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
3672} 3670}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index cf6ffb1ba4a2..22bc9d368728 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
77{ 77{
78 struct mt76x0_dev *dev = hw->priv; 78 struct mt76x0_dev *dev = hw->priv;
79 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; 79 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
80 unsigned int wcid = mvif->group_wcid.idx;
81 80
82 dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG); 81 dev->vif_mask &= ~BIT(mvif->idx);
83} 82}
84 83
85static int mt76x0_config(struct ieee80211_hw *hw, u32 changed) 84static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a46a1e94505d..936c0b3e0ba2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
241struct xenvif_hash { 241struct xenvif_hash {
242 unsigned int alg; 242 unsigned int alg;
243 u32 flags; 243 u32 flags;
244 bool mapping_sel;
244 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; 245 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
245 u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE]; 246 u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
246 unsigned int size; 247 unsigned int size;
247 struct xenvif_hash_cache cache; 248 struct xenvif_hash_cache cache;
248}; 249};
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b9fe76..0ccb021f1e78 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
324 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 324 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
325 325
326 vif->hash.size = size; 326 vif->hash.size = size;
327 memset(vif->hash.mapping, 0, sizeof(u32) * size); 327 memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
328 sizeof(u32) * size);
328 329
329 return XEN_NETIF_CTRL_STATUS_SUCCESS; 330 return XEN_NETIF_CTRL_STATUS_SUCCESS;
330} 331}
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
332u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, 333u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
333 u32 off) 334 u32 off)
334{ 335{
335 u32 *mapping = &vif->hash.mapping[off]; 336 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
336 struct gnttab_copy copy_op = { 337 unsigned int nr = 1;
338 struct gnttab_copy copy_op[2] = {{
337 .source.u.ref = gref, 339 .source.u.ref = gref,
338 .source.domid = vif->domid, 340 .source.domid = vif->domid,
339 .dest.u.gmfn = virt_to_gfn(mapping),
340 .dest.domid = DOMID_SELF, 341 .dest.domid = DOMID_SELF,
341 .dest.offset = xen_offset_in_page(mapping), 342 .len = len * sizeof(*mapping),
342 .len = len * sizeof(u32),
343 .flags = GNTCOPY_source_gref 343 .flags = GNTCOPY_source_gref
344 }; 344 }};
345 345
346 if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE) 346 if ((off + len < off) || (off + len > vif->hash.size) ||
347 len > XEN_PAGE_SIZE / sizeof(*mapping))
347 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 348 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
348 349
349 while (len-- != 0) 350 copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
350 if (mapping[off++] >= vif->num_queues) 351 copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
351 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 352 if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
353 copy_op[1] = copy_op[0];
354 copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
355 copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
356 copy_op[1].dest.offset = 0;
357 copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
358 copy_op[0].len = copy_op[1].source.offset;
359 nr = 2;
360 }
352 361
353 if (copy_op.len != 0) { 362 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
354 gnttab_batch_copy(&copy_op, 1); 363 vif->hash.size * sizeof(*mapping));
355 364
356 if (copy_op.status != GNTST_okay) 365 if (copy_op[0].len != 0) {
366 gnttab_batch_copy(copy_op, nr);
367
368 if (copy_op[0].status != GNTST_okay ||
369 copy_op[nr - 1].status != GNTST_okay)
357 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 370 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
358 } 371 }
359 372
373 while (len-- != 0)
374 if (mapping[off++] >= vif->num_queues)
375 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
376
377 vif->hash.mapping_sel = !vif->hash.mapping_sel;
378
360 return XEN_NETIF_CTRL_STATUS_SUCCESS; 379 return XEN_NETIF_CTRL_STATUS_SUCCESS;
361} 380}
362 381
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
408 } 427 }
409 428
410 if (vif->hash.size != 0) { 429 if (vif->hash.size != 0) {
430 const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
431
411 seq_puts(m, "\nHash Mapping:\n"); 432 seq_puts(m, "\nHash Mapping:\n");
412 433
413 for (i = 0; i < vif->hash.size; ) { 434 for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
420 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); 441 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
421 442
422 for (j = 0; j < n; j++, i++) 443 for (j = 0; j < n; j++, i++)
423 seq_printf(m, "%4u ", vif->hash.mapping[i]); 444 seq_printf(m, "%4u ", mapping[i]);
424 445
425 seq_puts(m, "\n"); 446 seq_puts(m, "\n");
426 } 447 }
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 92274c237200..f6ae23fc3f6b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
162 if (size == 0) 162 if (size == 0)
163 return skb_get_hash_raw(skb) % dev->real_num_tx_queues; 163 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
164 164
165 return vif->hash.mapping[skb_get_hash_raw(skb) % size]; 165 return vif->hash.mapping[vif->hash.mapping_sel]
166 [skb_get_hash_raw(skb) % size];
166} 167}
167 168
168static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 169static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 50eb0729385b..a41d79b8d46a 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1145,7 +1145,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
1145{ 1145{
1146 struct device *dev = &pcie->pdev->dev; 1146 struct device *dev = &pcie->pdev->dev;
1147 struct device_node *np = dev->of_node; 1147 struct device_node *np = dev->of_node;
1148 unsigned int i;
1149 int ret; 1148 int ret;
1150 1149
1151 INIT_LIST_HEAD(&pcie->resources); 1150 INIT_LIST_HEAD(&pcie->resources);
@@ -1179,13 +1178,58 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
1179 resource_size(&pcie->io) - 1); 1178 resource_size(&pcie->io) - 1);
1180 pcie->realio.name = "PCI I/O"; 1179 pcie->realio.name = "PCI I/O";
1181 1180
1181 pci_add_resource(&pcie->resources, &pcie->realio);
1182 }
1183
1184 return devm_request_pci_bus_resources(dev, &pcie->resources);
1185}
1186
1187/*
1188 * This is a copy of pci_host_probe(), except that it does the I/O
1189 * remap as the last step, once we are sure we won't fail.
1190 *
1191 * It should be removed once the I/O remap error handling issue has
1192 * been sorted out.
1193 */
1194static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
1195{
1196 struct mvebu_pcie *pcie;
1197 struct pci_bus *bus, *child;
1198 int ret;
1199
1200 ret = pci_scan_root_bus_bridge(bridge);
1201 if (ret < 0) {
1202 dev_err(bridge->dev.parent, "Scanning root bridge failed");
1203 return ret;
1204 }
1205
1206 pcie = pci_host_bridge_priv(bridge);
1207 if (resource_size(&pcie->io) != 0) {
1208 unsigned int i;
1209
1182 for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K) 1210 for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
1183 pci_ioremap_io(i, pcie->io.start + i); 1211 pci_ioremap_io(i, pcie->io.start + i);
1212 }
1184 1213
1185 pci_add_resource(&pcie->resources, &pcie->realio); 1214 bus = bridge->bus;
1215
1216 /*
1217 * We insert PCI resources into the iomem_resource and
1218 * ioport_resource trees in either pci_bus_claim_resources()
1219 * or pci_bus_assign_resources().
1220 */
1221 if (pci_has_flag(PCI_PROBE_ONLY)) {
1222 pci_bus_claim_resources(bus);
1223 } else {
1224 pci_bus_size_bridges(bus);
1225 pci_bus_assign_resources(bus);
1226
1227 list_for_each_entry(child, &bus->children, node)
1228 pcie_bus_configure_settings(child);
1186 } 1229 }
1187 1230
1188 return devm_request_pci_bus_resources(dev, &pcie->resources); 1231 pci_bus_add_devices(bus);
1232 return 0;
1189} 1233}
1190 1234
1191static int mvebu_pcie_probe(struct platform_device *pdev) 1235static int mvebu_pcie_probe(struct platform_device *pdev)
@@ -1268,7 +1312,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
1268 bridge->align_resource = mvebu_pcie_align_resource; 1312 bridge->align_resource = mvebu_pcie_align_resource;
1269 bridge->msi = pcie->msi; 1313 bridge->msi = pcie->msi;
1270 1314
1271 return pci_host_probe(bridge); 1315 return mvebu_pci_host_probe(bridge);
1272} 1316}
1273 1317
1274static const struct of_device_id mvebu_pcie_of_match_table[] = { 1318static const struct of_device_id mvebu_pcie_of_match_table[] = {
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1835f3a7aa8d..51b6c81671c1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1289,12 +1289,12 @@ int pci_save_state(struct pci_dev *dev)
1289EXPORT_SYMBOL(pci_save_state); 1289EXPORT_SYMBOL(pci_save_state);
1290 1290
1291static void pci_restore_config_dword(struct pci_dev *pdev, int offset, 1291static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1292 u32 saved_val, int retry) 1292 u32 saved_val, int retry, bool force)
1293{ 1293{
1294 u32 val; 1294 u32 val;
1295 1295
1296 pci_read_config_dword(pdev, offset, &val); 1296 pci_read_config_dword(pdev, offset, &val);
1297 if (val == saved_val) 1297 if (!force && val == saved_val)
1298 return; 1298 return;
1299 1299
1300 for (;;) { 1300 for (;;) {
@@ -1313,25 +1313,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1313} 1313}
1314 1314
1315static void pci_restore_config_space_range(struct pci_dev *pdev, 1315static void pci_restore_config_space_range(struct pci_dev *pdev,
1316 int start, int end, int retry) 1316 int start, int end, int retry,
1317 bool force)
1317{ 1318{
1318 int index; 1319 int index;
1319 1320
1320 for (index = end; index >= start; index--) 1321 for (index = end; index >= start; index--)
1321 pci_restore_config_dword(pdev, 4 * index, 1322 pci_restore_config_dword(pdev, 4 * index,
1322 pdev->saved_config_space[index], 1323 pdev->saved_config_space[index],
1323 retry); 1324 retry, force);
1324} 1325}
1325 1326
1326static void pci_restore_config_space(struct pci_dev *pdev) 1327static void pci_restore_config_space(struct pci_dev *pdev)
1327{ 1328{
1328 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { 1329 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1329 pci_restore_config_space_range(pdev, 10, 15, 0); 1330 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1330 /* Restore BARs before the command register. */ 1331 /* Restore BARs before the command register. */
1331 pci_restore_config_space_range(pdev, 4, 9, 10); 1332 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1332 pci_restore_config_space_range(pdev, 0, 3, 0); 1333 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1334 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1335 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1336
1337 /*
1338 * Force rewriting of prefetch registers to avoid S3 resume
1339 * issues on Intel PCI bridges that occur when these
1340 * registers are not explicitly written.
1341 */
1342 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1343 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1333 } else { 1344 } else {
1334 pci_restore_config_space_range(pdev, 0, 15, 0); 1345 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1335 } 1346 }
1336} 1347}
1337 1348
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index de8282420f96..ffce6f39828a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -610,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
610static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 610static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
611 struct qeth_card *card) 611 struct qeth_card *card)
612{ 612{
613 char *ipa_name; 613 const char *ipa_name;
614 int com = cmd->hdr.command; 614 int com = cmd->hdr.command;
615 ipa_name = qeth_get_ipa_cmd_name(com); 615 ipa_name = qeth_get_ipa_cmd_name(com);
616 if (rc) 616 if (rc)
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 5bcb8dafc3ee..e891c0b52f4c 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
148 148
149struct ipa_rc_msg { 149struct ipa_rc_msg {
150 enum qeth_ipa_return_codes rc; 150 enum qeth_ipa_return_codes rc;
151 char *msg; 151 const char *msg;
152}; 152};
153 153
154static struct ipa_rc_msg qeth_ipa_rc_msg[] = { 154static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
155 {IPA_RC_SUCCESS, "success"}, 155 {IPA_RC_SUCCESS, "success"},
156 {IPA_RC_NOTSUPP, "Command not supported"}, 156 {IPA_RC_NOTSUPP, "Command not supported"},
157 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, 157 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
219 219
220 220
221 221
222char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) 222const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
223{ 223{
224 int x = 0; 224 int x;
225 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) / 225
226 sizeof(struct ipa_rc_msg) - 1].rc = rc; 226 for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
227 while (qeth_ipa_rc_msg[x].rc != rc) 227 if (qeth_ipa_rc_msg[x].rc == rc)
228 x++; 228 return qeth_ipa_rc_msg[x].msg;
229 return qeth_ipa_rc_msg[x].msg; 229 return qeth_ipa_rc_msg[x].msg;
230} 230}
231 231
232 232
233struct ipa_cmd_names { 233struct ipa_cmd_names {
234 enum qeth_ipa_cmds cmd; 234 enum qeth_ipa_cmds cmd;
235 char *name; 235 const char *name;
236}; 236};
237 237
238static struct ipa_cmd_names qeth_ipa_cmd_names[] = { 238static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
239 {IPA_CMD_STARTLAN, "startlan"}, 239 {IPA_CMD_STARTLAN, "startlan"},
240 {IPA_CMD_STOPLAN, "stoplan"}, 240 {IPA_CMD_STOPLAN, "stoplan"},
241 {IPA_CMD_SETVMAC, "setvmac"}, 241 {IPA_CMD_SETVMAC, "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
267 {IPA_CMD_UNKNOWN, "unknown"}, 267 {IPA_CMD_UNKNOWN, "unknown"},
268}; 268};
269 269
270char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) 270const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
271{ 271{
272 int x = 0; 272 int x;
273 qeth_ipa_cmd_names[ 273
274 sizeof(qeth_ipa_cmd_names) / 274 for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
275 sizeof(struct ipa_cmd_names)-1].cmd = cmd; 275 if (qeth_ipa_cmd_names[x].cmd == cmd)
276 while (qeth_ipa_cmd_names[x].cmd != cmd) 276 return qeth_ipa_cmd_names[x].name;
277 x++;
278 return qeth_ipa_cmd_names[x].name; 277 return qeth_ipa_cmd_names[x].name;
279} 278}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index aa8b9196b089..aa5de1fe01e1 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
797 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, 797 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
798}; 798};
799 799
800extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 800extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
801extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); 801extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
802 802
803#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ 803#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
804 sizeof(struct qeth_ipacmd_setassparms_hdr)) 804 sizeof(struct qeth_ipacmd_setassparms_hdr))
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cc8e64dc65ad..e5bd035ebad0 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2472,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
2472 /* start qedi context */ 2472 /* start qedi context */
2473 spin_lock_init(&qedi->hba_lock); 2473 spin_lock_init(&qedi->hba_lock);
2474 spin_lock_init(&qedi->task_idx_lock); 2474 spin_lock_init(&qedi->task_idx_lock);
2475 mutex_init(&qedi->stats_lock);
2475 } 2476 }
2476 qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi); 2477 qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
2477 qedi_ops->ll2->start(qedi->cdev, &params); 2478 qedi_ops->ll2->start(qedi->cdev, &params);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index ecb22749df0b..8cc015183043 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2729{ 2729{
2730 unsigned long addr; 2730 unsigned long addr;
2731 2731
2732 if (!p)
2733 return -ENODEV;
2734
2732 addr = gen_pool_alloc(p, cnt); 2735 addr = gen_pool_alloc(p, cnt);
2733 if (!addr) 2736 if (!addr)
2734 return -ENOMEM; 2737 return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index c646d8713861..681f7d4b7724 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
626{ 626{
627 u32 shift; 627 u32 shift;
628 628
629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE; 629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
630 shift -= tdm_num * 2; 630 shift -= tdm_num * 2;
631 631
632 return shift; 632 return shift;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 533068651f90..e3fc920af682 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -735,14 +735,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
735 u8 link, depth; 735 u8 link, depth;
736 u64 route; 736 u64 route;
737 737
738 /*
739 * After NVM upgrade adding root switch device fails because we
740 * initiated reset. During that time ICM might still send
741 * XDomain connected message which we ignore here.
742 */
743 if (!tb->root_switch)
744 return;
745
746 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; 738 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
747 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 739 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
748 ICM_LINK_INFO_DEPTH_SHIFT; 740 ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1034,14 +1026,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1034 if (pkg->hdr.packet_id) 1026 if (pkg->hdr.packet_id)
1035 return; 1027 return;
1036 1028
1037 /*
1038 * After NVM upgrade adding root switch device fails because we
1039 * initiated reset. During that time ICM might still send device
1040 * connected message which we ignore here.
1041 */
1042 if (!tb->root_switch)
1043 return;
1044
1045 route = get_route(pkg->route_hi, pkg->route_lo); 1029 route = get_route(pkg->route_hi, pkg->route_lo);
1046 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; 1030 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1047 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> 1031 security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@@ -1405,19 +1389,26 @@ static void icm_handle_notification(struct work_struct *work)
1405 1389
1406 mutex_lock(&tb->lock); 1390 mutex_lock(&tb->lock);
1407 1391
1408 switch (n->pkg->code) { 1392 /*
1409 case ICM_EVENT_DEVICE_CONNECTED: 1393 * When the domain is stopped we flush its workqueue but before
1410 icm->device_connected(tb, n->pkg); 1394 * that the root switch is removed. In that case we should treat
1411 break; 1395 * the queued events as being canceled.
1412 case ICM_EVENT_DEVICE_DISCONNECTED: 1396 */
1413 icm->device_disconnected(tb, n->pkg); 1397 if (tb->root_switch) {
1414 break; 1398 switch (n->pkg->code) {
1415 case ICM_EVENT_XDOMAIN_CONNECTED: 1399 case ICM_EVENT_DEVICE_CONNECTED:
1416 icm->xdomain_connected(tb, n->pkg); 1400 icm->device_connected(tb, n->pkg);
1417 break; 1401 break;
1418 case ICM_EVENT_XDOMAIN_DISCONNECTED: 1402 case ICM_EVENT_DEVICE_DISCONNECTED:
1419 icm->xdomain_disconnected(tb, n->pkg); 1403 icm->device_disconnected(tb, n->pkg);
1420 break; 1404 break;
1405 case ICM_EVENT_XDOMAIN_CONNECTED:
1406 icm->xdomain_connected(tb, n->pkg);
1407 break;
1408 case ICM_EVENT_XDOMAIN_DISCONNECTED:
1409 icm->xdomain_disconnected(tb, n->pkg);
1410 break;
1411 }
1421 } 1412 }
1422 1413
1423 mutex_unlock(&tb->lock); 1414 mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 2874f9400123..9aa44f9762a3 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1192,5 +1192,5 @@ static void __exit nhi_unload(void)
1192 tb_domain_exit(); 1192 tb_domain_exit();
1193} 1193}
1194 1194
1195fs_initcall(nhi_init); 1195rootfs_initcall(nhi_init);
1196module_exit(nhi_unload); 1196module_exit(nhi_unload);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index fa8dcb470640..d31b975dd3fd 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
630 if (!data->skip_autocfg) 630 if (!data->skip_autocfg)
631 dw8250_setup_port(p); 631 dw8250_setup_port(p);
632 632
633#ifdef CONFIG_PM
634 uart.capabilities |= UART_CAP_RPM;
635#endif
636
637 /* If we have a valid fifosize, try hooking up DMA */ 633 /* If we have a valid fifosize, try hooking up DMA */
638 if (p->fifosize) { 634 if (p->fifosize) {
639 data->dma.rxconf.src_maxburst = p->fifosize / 4; 635 data->dma.rxconf.src_maxburst = p->fifosize / 4;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ac4424bf6b13..ab3f6e91853d 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -292,6 +292,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
292 }, 292 },
293 293
294 /* 294 /*
295 * The "SCIFA" that is in RZ/T and RZ/A2.
296 * It looks like a normal SCIF with FIFO data, but with a
297 * compressed address space. Also, the break out of interrupts
298 * are different: ERI/BRI, RXI, TXI, TEI, DRI.
299 */
300 [SCIx_RZ_SCIFA_REGTYPE] = {
301 .regs = {
302 [SCSMR] = { 0x00, 16 },
303 [SCBRR] = { 0x02, 8 },
304 [SCSCR] = { 0x04, 16 },
305 [SCxTDR] = { 0x06, 8 },
306 [SCxSR] = { 0x08, 16 },
307 [SCxRDR] = { 0x0A, 8 },
308 [SCFCR] = { 0x0C, 16 },
309 [SCFDR] = { 0x0E, 16 },
310 [SCSPTR] = { 0x10, 16 },
311 [SCLSR] = { 0x12, 16 },
312 },
313 .fifosize = 16,
314 .overrun_reg = SCLSR,
315 .overrun_mask = SCLSR_ORER,
316 .sampling_rate_mask = SCI_SR(32),
317 .error_mask = SCIF_DEFAULT_ERROR_MASK,
318 .error_clear = SCIF_ERROR_CLEAR,
319 },
320
321 /*
295 * Common SH-3 SCIF definitions. 322 * Common SH-3 SCIF definitions.
296 */ 323 */
297 [SCIx_SH3_SCIF_REGTYPE] = { 324 [SCIx_SH3_SCIF_REGTYPE] = {
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
319 [SCIx_SH4_SCIF_REGTYPE] = { 346 [SCIx_SH4_SCIF_REGTYPE] = {
320 .regs = { 347 .regs = {
321 [SCSMR] = { 0x00, 16 }, 348 [SCSMR] = { 0x00, 16 },
322 [SCBRR] = { 0x02, 8 }, 349 [SCBRR] = { 0x04, 8 },
323 [SCSCR] = { 0x04, 16 }, 350 [SCSCR] = { 0x08, 16 },
324 [SCxTDR] = { 0x06, 8 }, 351 [SCxTDR] = { 0x0c, 8 },
325 [SCxSR] = { 0x08, 16 }, 352 [SCxSR] = { 0x10, 16 },
326 [SCxRDR] = { 0x0a, 8 }, 353 [SCxRDR] = { 0x14, 8 },
327 [SCFCR] = { 0x0c, 16 }, 354 [SCFCR] = { 0x18, 16 },
328 [SCFDR] = { 0x0e, 16 }, 355 [SCFDR] = { 0x1c, 16 },
329 [SCSPTR] = { 0x10, 16 }, 356 [SCSPTR] = { 0x20, 16 },
330 [SCLSR] = { 0x12, 16 }, 357 [SCLSR] = { 0x24, 16 },
331 }, 358 },
332 .fifosize = 16, 359 .fifosize = 16,
333 .overrun_reg = SCLSR, 360 .overrun_reg = SCLSR,
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
2810{ 2837{
2811 struct uart_port *port = &sci_port->port; 2838 struct uart_port *port = &sci_port->port;
2812 const struct resource *res; 2839 const struct resource *res;
2813 unsigned int i, regtype; 2840 unsigned int i;
2814 int ret; 2841 int ret;
2815 2842
2816 sci_port->cfg = p; 2843 sci_port->cfg = p;
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
2847 if (unlikely(sci_port->params == NULL)) 2874 if (unlikely(sci_port->params == NULL))
2848 return -EINVAL; 2875 return -EINVAL;
2849 2876
2850 regtype = sci_port->params - sci_port_params;
2851 switch (p->type) { 2877 switch (p->type) {
2852 case PORT_SCIFB: 2878 case PORT_SCIFB:
2853 sci_port->rx_trigger = 48; 2879 sci_port->rx_trigger = 48;
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
2902 port->regshift = 1; 2928 port->regshift = 1;
2903 } 2929 }
2904 2930
2905 if (regtype == SCIx_SH4_SCIF_REGTYPE)
2906 if (sci_port->reg_size >= 0x20)
2907 port->regshift = 1;
2908
2909 /* 2931 /*
2910 * The UART port needs an IRQ value, so we peg this to the RX IRQ 2932 * The UART port needs an IRQ value, so we peg this to the RX IRQ
2911 * for the multi-IRQ ports, which is where we are primarily 2933 * for the multi-IRQ ports, which is where we are primarily
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
3110 .compatible = "renesas,scif-r7s72100", 3132 .compatible = "renesas,scif-r7s72100",
3111 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE), 3133 .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
3112 }, 3134 },
3135 {
3136 .compatible = "renesas,scif-r7s9210",
3137 .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
3138 },
3113 /* Family-specific types */ 3139 /* Family-specific types */
3114 { 3140 {
3115 .compatible = "renesas,rcar-gen1-scif", 3141 .compatible = "renesas,rcar-gen1-scif",
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f9b40a9dc4d3..bc03b0a690b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1514,6 +1514,7 @@ static void acm_disconnect(struct usb_interface *intf)
1514{ 1514{
1515 struct acm *acm = usb_get_intfdata(intf); 1515 struct acm *acm = usb_get_intfdata(intf);
1516 struct tty_struct *tty; 1516 struct tty_struct *tty;
1517 int i;
1517 1518
1518 /* sibling interface is already cleaning up */ 1519 /* sibling interface is already cleaning up */
1519 if (!acm) 1520 if (!acm)
@@ -1544,6 +1545,11 @@ static void acm_disconnect(struct usb_interface *intf)
1544 1545
1545 tty_unregister_device(acm_tty_driver, acm->minor); 1546 tty_unregister_device(acm_tty_driver, acm->minor);
1546 1547
1548 usb_free_urb(acm->ctrlurb);
1549 for (i = 0; i < ACM_NW; i++)
1550 usb_free_urb(acm->wb[i].urb);
1551 for (i = 0; i < acm->rx_buflimit; i++)
1552 usb_free_urb(acm->read_urbs[i]);
1547 acm_write_buffers_free(acm); 1553 acm_write_buffers_free(acm);
1548 usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); 1554 usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
1549 acm_read_buffers_free(acm); 1555 acm_read_buffers_free(acm);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 7334da9e9779..71d0d33c3286 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
642 xhci_mtk_host_enable(mtk); 642 xhci_mtk_host_enable(mtk);
643 643
644 xhci_dbg(xhci, "%s: restart port polling\n", __func__); 644 xhci_dbg(xhci, "%s: restart port polling\n", __func__);
645 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
646 usb_hcd_poll_rh_status(hcd);
647 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 645 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
648 usb_hcd_poll_rh_status(xhci->shared_hcd); 646 usb_hcd_poll_rh_status(xhci->shared_hcd);
647 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
648 usb_hcd_poll_rh_status(hcd);
649 return 0; 649 return 0;
650} 650}
651 651
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6372edf339d9..722860eb5a91 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -185,6 +185,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
185 } 185 }
186 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 186 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
187 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 187 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
188 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
189 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
188 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || 190 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
189 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) 191 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
190 xhci->quirks |= XHCI_MISSING_CAS; 192 xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0215b70c4efc..e72ad9f81c73 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
561/* Interface is reserved */ 561/* Interface is reserved */
562#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0) 562#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
563 563
564/* Interface must have two endpoints */
565#define NUMEP2 BIT(16)
566
564 567
565static const struct usb_device_id option_ids[] = { 568static const struct usb_device_id option_ids[] = {
566 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 569 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
1081 .driver_info = RSVD(4) }, 1084 .driver_info = RSVD(4) },
1082 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), 1085 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
1083 .driver_info = RSVD(4) }, 1086 .driver_info = RSVD(4) },
1084 { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), 1087 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
1085 .driver_info = RSVD(4) | RSVD(5) }, 1088 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
1086 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1090 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1087 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1091 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1088 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1092 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial,
1999 if (device_flags & RSVD(iface_desc->bInterfaceNumber)) 2003 if (device_flags & RSVD(iface_desc->bInterfaceNumber))
2000 return -ENODEV; 2004 return -ENODEV;
2001 2005
2006 /*
2007 * Allow matching on bNumEndpoints for devices whose interface numbers
2008 * can change (e.g. Quectel EP06).
2009 */
2010 if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
2011 return -ENODEV;
2012
2002 /* Store the device flags so we can use them during attach. */ 2013 /* Store the device flags so we can use them during attach. */
2003 usb_set_serial_data(serial, (void *)device_flags); 2014 usb_set_serial_data(serial, (void *)device_flags);
2004 2015
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 40864c2bd9dc..4d0273508043 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
84 84
85/* Motorola Tetra driver */ 85/* Motorola Tetra driver */
86#define MOTOROLA_TETRA_IDS() \ 86#define MOTOROLA_TETRA_IDS() \
87 { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */ 87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
88 { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
88DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); 89DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
89 90
90/* Novatel Wireless GPS driver */ 91/* Novatel Wireless GPS driver */
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3946649b85c8..ba906876cc45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -42,6 +42,7 @@ struct bmp_dib_header {
42 u32 colors_important; 42 u32 colors_important;
43} __packed; 43} __packed;
44 44
45static bool use_bgrt = true;
45static bool request_mem_succeeded = false; 46static bool request_mem_succeeded = false;
46static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC; 47static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
47 48
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
160 void *bgrt_image = NULL; 161 void *bgrt_image = NULL;
161 u8 *dst = info->screen_base; 162 u8 *dst = info->screen_base;
162 163
164 if (!use_bgrt)
165 return;
166
163 if (!bgrt_tab.image_address) { 167 if (!bgrt_tab.image_address) {
164 pr_info("efifb: No BGRT, not showing boot graphics\n"); 168 pr_info("efifb: No BGRT, not showing boot graphics\n");
165 return; 169 return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
290 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 294 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
291 else if (!strcmp(this_opt, "nowc")) 295 else if (!strcmp(this_opt, "nowc"))
292 mem_flags &= ~EFI_MEMORY_WC; 296 mem_flags &= ~EFI_MEMORY_WC;
297 else if (!strcmp(this_opt, "nobgrt"))
298 use_bgrt = false;
293 } 299 }
294 } 300 }
295 301
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273074ba..a3edb20ea4c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) 496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
497 return -EFAULT; 497 return -EFAULT;
498 498
499 if (mr->w > 4096 || mr->h > 4096)
500 return -EINVAL;
501
499 if (mr->w * mr->h * 3 > mr->buffer_size) 502 if (mr->w * mr->h * 3 > mr->buffer_size)
500 return -EINVAL; 503 return -EINVAL;
501 504
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
509 mr->x, mr->y, mr->w, mr->h); 512 mr->x, mr->y, mr->w, mr->h);
510 513
511 if (r > 0) { 514 if (r > 0) {
512 if (copy_to_user(mr->buffer, buf, mr->buffer_size)) 515 if (copy_to_user(mr->buffer, buf, r))
513 r = -EFAULT; 516 r = -EFAULT;
514 } 517 }
515 518
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index def3a501acd6..d059d04c63ac 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
712 /* 712 /*
713 * enable controller clock 713 * enable controller clock
714 */ 714 */
715 clk_enable(fbi->clk); 715 clk_prepare_enable(fbi->clk);
716 716
717 pxa168fb_set_par(info); 717 pxa168fb_set_par(info);
718 718
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
767failed_free_cmap: 767failed_free_cmap:
768 fb_dealloc_cmap(&info->cmap); 768 fb_dealloc_cmap(&info->cmap);
769failed_free_clk: 769failed_free_clk:
770 clk_disable(fbi->clk); 770 clk_disable_unprepare(fbi->clk);
771failed_free_fbmem: 771failed_free_fbmem:
772 dma_free_coherent(fbi->dev, info->fix.smem_len, 772 dma_free_coherent(fbi->dev, info->fix.smem_len,
773 info->screen_base, fbi->fb_start_dma); 773 info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
807 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len), 807 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
808 info->screen_base, info->fix.smem_start); 808 info->screen_base, info->fix.smem_start);
809 809
810 clk_disable(fbi->clk); 810 clk_disable_unprepare(fbi->clk);
811 811
812 framebuffer_release(info); 812 framebuffer_release(info);
813 813
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 045e8afe398b..9e88e3f594c2 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
1157 dev_name); 1157 dev_name);
1158 goto out_err0; 1158 goto out_err0;
1159 } 1159 }
1160 /* fall though */ 1160 /* fall through */
1161 case S9000_ID_ARTIST: 1161 case S9000_ID_ARTIST:
1162 case S9000_ID_HCRX: 1162 case S9000_ID_HCRX:
1163 case S9000_ID_TIMBER: 1163 case S9000_ID_TIMBER:
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0c9ab62c3df4..9dcaed031843 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1553 1553
1554/* Flags */ 1554/* Flags */
1555#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */ 1555#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
1556#define MID_DELETED 2 /* Mid has been dequeued/deleted */
1556 1557
1557/* Types of response buffer returned from SendReceive2 */ 1558/* Types of response buffer returned from SendReceive2 */
1558#define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 1559#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7aa08dba4719..52d71b64c0c6 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
659 mid->mid_state = MID_RESPONSE_RECEIVED; 659 mid->mid_state = MID_RESPONSE_RECEIVED;
660 else 660 else
661 mid->mid_state = MID_RESPONSE_MALFORMED; 661 mid->mid_state = MID_RESPONSE_MALFORMED;
662 list_del_init(&mid->qhead); 662 /*
663 * Trying to handle/dequeue a mid after the send_recv()
664 * function has finished processing it is a bug.
665 */
666 if (mid->mid_flags & MID_DELETED)
667 printk_once(KERN_WARNING
668 "trying to dequeue a deleted mid\n");
669 else
670 list_del_init(&mid->qhead);
663 spin_unlock(&GlobalMid_Lock); 671 spin_unlock(&GlobalMid_Lock);
664} 672}
665 673
@@ -938,8 +946,7 @@ next_pdu:
938 } else { 946 } else {
939 mids[0] = server->ops->find_mid(server, buf); 947 mids[0] = server->ops->find_mid(server, buf);
940 bufs[0] = buf; 948 bufs[0] = buf;
941 if (mids[0]) 949 num_mids = 1;
942 num_mids = 1;
943 950
944 if (!mids[0] || !mids[0]->receive) 951 if (!mids[0] || !mids[0]->receive)
945 length = standard_receive3(server, mids[0]); 952 length = standard_receive3(server, mids[0]);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d954ce36b473..89985a0a6819 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1477,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1477 } 1477 }
1478 1478
1479 srch_inf->entries_in_buffer = 0; 1479 srch_inf->entries_in_buffer = 0;
1480 srch_inf->index_of_last_entry = 0; 1480 srch_inf->index_of_last_entry = 2;
1481 1481
1482 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, 1482 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
1483 fid->volatile_fid, 0, srch_inf); 1483 fid->volatile_fid, 0, srch_inf);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 78f96fa3d7d9..b48f43963da6 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -142,7 +142,8 @@ void
142cifs_delete_mid(struct mid_q_entry *mid) 142cifs_delete_mid(struct mid_q_entry *mid)
143{ 143{
144 spin_lock(&GlobalMid_Lock); 144 spin_lock(&GlobalMid_Lock);
145 list_del(&mid->qhead); 145 list_del_init(&mid->qhead);
146 mid->mid_flags |= MID_DELETED;
146 spin_unlock(&GlobalMid_Lock); 147 spin_unlock(&GlobalMid_Lock);
147 148
148 DeleteMidQEntry(mid); 149 DeleteMidQEntry(mid);
@@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
772 return mid; 773 return mid;
773} 774}
774 775
776static void
777cifs_noop_callback(struct mid_q_entry *mid)
778{
779}
780
775int 781int
776compound_send_recv(const unsigned int xid, struct cifs_ses *ses, 782compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
777 const int flags, const int num_rqst, struct smb_rqst *rqst, 783 const int flags, const int num_rqst, struct smb_rqst *rqst,
@@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
826 } 832 }
827 833
828 midQ[i]->mid_state = MID_REQUEST_SUBMITTED; 834 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
835 /*
836 * We don't invoke the callback compounds unless it is the last
837 * request.
838 */
839 if (i < num_rqst - 1)
840 midQ[i]->callback = cifs_noop_callback;
829 } 841 }
830
831 cifs_in_send_inc(ses->server); 842 cifs_in_send_inc(ses->server);
832 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags); 843 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
833 cifs_in_send_dec(ses->server); 844 cifs_in_send_dec(ses->server);
@@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
908 midQ[i]->resp_buf = NULL; 919 midQ[i]->resp_buf = NULL;
909 } 920 }
910out: 921out:
922 /*
923 * This will dequeue all mids. After this it is important that the
924 * demultiplex_thread will not process any of these mids any futher.
925 * This is prevented above by using a noop callback that will not
926 * wake this thread except for the very last PDU.
927 */
911 for (i = 0; i < num_rqst; i++) 928 for (i = 0; i < num_rqst; i++)
912 cifs_delete_mid(midQ[i]); 929 cifs_delete_mid(midQ[i]);
913 add_credits(ses->server, credits, optype); 930 add_credits(ses->server, credits, optype);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 3212c29235ce..2005529af560 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
230 ret = -EXDEV; 230 ret = -EXDEV;
231 if (src_file.file->f_path.mnt != dst_file->f_path.mnt) 231 if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
232 goto fdput; 232 goto fdput;
233 ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen); 233 ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
234fdput: 234fdput:
235 fdput(src_file); 235 fdput(src_file);
236 return ret; 236 return ret;
diff --git a/fs/iomap.c b/fs/iomap.c
index 74762b1ec233..ec15cf2ec696 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1051 } else { 1051 } else {
1052 WARN_ON_ONCE(!PageUptodate(page)); 1052 WARN_ON_ONCE(!PageUptodate(page));
1053 iomap_page_create(inode, page); 1053 iomap_page_create(inode, page);
1054 set_page_dirty(page);
1054 } 1055 }
1055 1056
1056 return length; 1057 return length;
@@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1090 length -= ret; 1091 length -= ret;
1091 } 1092 }
1092 1093
1093 set_page_dirty(page);
1094 wait_for_stable_page(page); 1094 wait_for_stable_page(page);
1095 return VM_FAULT_LOCKED; 1095 return VM_FAULT_LOCKED;
1096out_unlock: 1096out_unlock:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 55a099e47ba2..b53e76391e52 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
541__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, 541__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
542 u64 dst_pos, u64 count) 542 u64 dst_pos, u64 count)
543{ 543{
544 return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count)); 544 return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
545 count));
545} 546}
546 547
547ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 548ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index aaca0949fe53..826f0567ec43 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
584 584
585 res->last_used = 0; 585 res->last_used = 0;
586 586
587 spin_lock(&dlm->spinlock); 587 spin_lock(&dlm->track_lock);
588 list_add_tail(&res->tracking, &dlm->tracking_list); 588 list_add_tail(&res->tracking, &dlm->tracking_list);
589 spin_unlock(&dlm->spinlock); 589 spin_unlock(&dlm->track_lock);
590 590
591 memset(res->lvb, 0, DLM_LVB_LEN); 591 memset(res->lvb, 0, DLM_LVB_LEN);
592 memset(res->refmap, 0, sizeof(res->refmap)); 592 memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 7869622af22a..7a5ee145c733 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2946 if (map_end & (PAGE_SIZE - 1)) 2946 if (map_end & (PAGE_SIZE - 1))
2947 to = map_end & (PAGE_SIZE - 1); 2947 to = map_end & (PAGE_SIZE - 1);
2948 2948
2949retry:
2949 page = find_or_create_page(mapping, page_index, GFP_NOFS); 2950 page = find_or_create_page(mapping, page_index, GFP_NOFS);
2950 if (!page) { 2951 if (!page) {
2951 ret = -ENOMEM; 2952 ret = -ENOMEM;
@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2954 } 2955 }
2955 2956
2956 /* 2957 /*
2957 * In case PAGE_SIZE <= CLUSTER_SIZE, This page 2958 * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
2958 * can't be dirtied before we CoW it out. 2959 * page, so write it back.
2959 */ 2960 */
2960 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2961 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
2961 BUG_ON(PageDirty(page)); 2962 if (PageDirty(page)) {
2963 /*
2964 * write_on_page will unlock the page on return
2965 */
2966 ret = write_one_page(page);
2967 goto retry;
2968 }
2969 }
2962 2970
2963 if (!PageUptodate(page)) { 2971 if (!PageUptodate(page)) {
2964 ret = block_read_full_page(page, ocfs2_get_block); 2972 ret = block_read_full_page(page, ocfs2_get_block);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 296037afecdb..1cc797a08a5b 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
141 } 141 }
142 142
143 /* Try to use clone_file_range to clone up within the same fs */ 143 /* Try to use clone_file_range to clone up within the same fs */
144 error = vfs_clone_file_range(old_file, 0, new_file, 0, len); 144 error = do_clone_file_range(old_file, 0, new_file, 0, len);
145 if (!error) 145 if (!error)
146 goto out; 146 goto out;
147 /* Couldn't clone, so now we try to copy the data */ 147 /* Couldn't clone, so now we try to copy the data */
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index aeaefd2a551b..986313da0c88 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -240,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
240 goto out_unlock; 240 goto out_unlock;
241 241
242 old_cred = ovl_override_creds(file_inode(file)->i_sb); 242 old_cred = ovl_override_creds(file_inode(file)->i_sb);
243 file_start_write(real.file);
243 ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, 244 ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
244 ovl_iocb_to_rwf(iocb)); 245 ovl_iocb_to_rwf(iocb));
246 file_end_write(real.file);
245 revert_creds(old_cred); 247 revert_creds(old_cred);
246 248
247 /* Update size */ 249 /* Update size */
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b6ac545b5a32..3b7ed5d2279c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -504,7 +504,7 @@ static const struct inode_operations ovl_special_inode_operations = {
504 .update_time = ovl_update_time, 504 .update_time = ovl_update_time,
505}; 505};
506 506
507const struct address_space_operations ovl_aops = { 507static const struct address_space_operations ovl_aops = {
508 /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */ 508 /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
509 .direct_IO = noop_direct_IO, 509 .direct_IO = noop_direct_IO,
510}; 510};
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index f28711846dd6..9c0ca6a7becf 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
686 index = NULL; 686 index = NULL;
687 goto out; 687 goto out;
688 } 688 }
689 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" 689 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
690 "overlayfs: mount with '-o index=off' to disable inodes index.\n", 690 "overlayfs: mount with '-o index=off' to disable inodes index.\n",
691 d_inode(origin)->i_ino, name.len, name.name, 691 d_inode(origin)->i_ino, name.len, name.name,
692 err); 692 err);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index f61839e1054c..a3c0d9584312 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
152 const void *value, size_t size, int flags) 152 const void *value, size_t size, int flags)
153{ 153{
154 int err = vfs_setxattr(dentry, name, value, size, flags); 154 int err = vfs_setxattr(dentry, name, value, size, flags);
155 pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n", 155 pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
156 dentry, name, (int) size, (char *) value, flags, err); 156 dentry, name, min((int)size, 48), value, size, flags, err);
157 return err; 157 return err;
158} 158}
159 159
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 8cfb62cc8672..ace4fe4c39a9 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
683 struct dentry *upperdentry = ovl_dentry_upper(dentry); 683 struct dentry *upperdentry = ovl_dentry_upper(dentry);
684 struct dentry *index = NULL; 684 struct dentry *index = NULL;
685 struct inode *inode; 685 struct inode *inode;
686 struct qstr name; 686 struct qstr name = { };
687 int err; 687 int err;
688 688
689 err = ovl_get_index_name(lowerdentry, &name); 689 err = ovl_get_index_name(lowerdentry, &name);
@@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
726 goto fail; 726 goto fail;
727 727
728out: 728out:
729 kfree(name.name);
729 dput(index); 730 dput(index);
730 return; 731 return;
731 732
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ccf86f16d9f0..7e9f07bf260d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
407 unsigned long *entries; 407 unsigned long *entries;
408 int err; 408 int err;
409 409
410 /*
411 * The ability to racily run the kernel stack unwinder on a running task
412 * and then observe the unwinder output is scary; while it is useful for
413 * debugging kernel issues, it can also allow an attacker to leak kernel
414 * stack contents.
415 * Doing this in a manner that is at least safe from races would require
416 * some work to ensure that the remote task can not be scheduled; and
417 * even then, this would still expose the unwinder as local attack
418 * surface.
419 * Therefore, this interface is restricted to root.
420 */
421 if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
422 return -EACCES;
423
410 entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries), 424 entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
411 GFP_KERNEL); 425 GFP_KERNEL);
412 if (!entries) 426 if (!entries)
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index bbd1e357c23d..f4fd2e72add4 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
898 }, 898 },
899}; 899};
900 900
901static void ramoops_register_dummy(void) 901static inline void ramoops_unregister_dummy(void)
902{ 902{
903 platform_device_unregister(dummy);
904 dummy = NULL;
905
906 kfree(dummy_data);
907 dummy_data = NULL;
908}
909
910static void __init ramoops_register_dummy(void)
911{
912 /*
913 * Prepare a dummy platform data structure to carry the module
914 * parameters. If mem_size isn't set, then there are no module
915 * parameters, and we can skip this.
916 */
903 if (!mem_size) 917 if (!mem_size)
904 return; 918 return;
905 919
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
932 if (IS_ERR(dummy)) { 946 if (IS_ERR(dummy)) {
933 pr_info("could not create platform device: %ld\n", 947 pr_info("could not create platform device: %ld\n",
934 PTR_ERR(dummy)); 948 PTR_ERR(dummy));
949 dummy = NULL;
950 ramoops_unregister_dummy();
935 } 951 }
936} 952}
937 953
938static int __init ramoops_init(void) 954static int __init ramoops_init(void)
939{ 955{
956 int ret;
957
940 ramoops_register_dummy(); 958 ramoops_register_dummy();
941 return platform_driver_register(&ramoops_driver); 959 ret = platform_driver_register(&ramoops_driver);
960 if (ret != 0)
961 ramoops_unregister_dummy();
962
963 return ret;
942} 964}
943late_initcall(ramoops_init); 965late_initcall(ramoops_init);
944 966
945static void __exit ramoops_exit(void) 967static void __exit ramoops_exit(void)
946{ 968{
947 platform_driver_unregister(&ramoops_driver); 969 platform_driver_unregister(&ramoops_driver);
948 platform_device_unregister(dummy); 970 ramoops_unregister_dummy();
949 kfree(dummy_data);
950} 971}
951module_exit(ramoops_exit); 972module_exit(ramoops_exit);
952 973
diff --git a/fs/read_write.c b/fs/read_write.c
index 39b4a21dd933..8a2737f0d61d 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
1818} 1818}
1819EXPORT_SYMBOL(vfs_clone_file_prep_inodes); 1819EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
1820 1820
1821int vfs_clone_file_range(struct file *file_in, loff_t pos_in, 1821int do_clone_file_range(struct file *file_in, loff_t pos_in,
1822 struct file *file_out, loff_t pos_out, u64 len) 1822 struct file *file_out, loff_t pos_out, u64 len)
1823{ 1823{
1824 struct inode *inode_in = file_inode(file_in); 1824 struct inode *inode_in = file_inode(file_in);
1825 struct inode *inode_out = file_inode(file_out); 1825 struct inode *inode_out = file_inode(file_out);
@@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1866 1866
1867 return ret; 1867 return ret;
1868} 1868}
1869EXPORT_SYMBOL(do_clone_file_range);
1870
1871int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1872 struct file *file_out, loff_t pos_out, u64 len)
1873{
1874 int ret;
1875
1876 file_start_write(file_out);
1877 ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
1878 file_end_write(file_out);
1879
1880 return ret;
1881}
1869EXPORT_SYMBOL(vfs_clone_file_range); 1882EXPORT_SYMBOL(vfs_clone_file_range);
1870 1883
1871/* 1884/*
diff --git a/fs/xattr.c b/fs/xattr.c
index daa732550088..0d6a6a4af861 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
948 int err = 0; 948 int err = 0;
949 949
950#ifdef CONFIG_FS_POSIX_ACL 950#ifdef CONFIG_FS_POSIX_ACL
951 if (inode->i_acl) { 951 if (IS_POSIXACL(inode)) {
952 err = xattr_list_one(&buffer, &remaining_size, 952 if (inode->i_acl) {
953 XATTR_NAME_POSIX_ACL_ACCESS); 953 err = xattr_list_one(&buffer, &remaining_size,
954 if (err) 954 XATTR_NAME_POSIX_ACL_ACCESS);
955 return err; 955 if (err)
956 } 956 return err;
957 if (inode->i_default_acl) { 957 }
958 err = xattr_list_one(&buffer, &remaining_size, 958 if (inode->i_default_acl) {
959 XATTR_NAME_POSIX_ACL_DEFAULT); 959 err = xattr_list_one(&buffer, &remaining_size,
960 if (err) 960 XATTR_NAME_POSIX_ACL_DEFAULT);
961 return err; 961 if (err)
962 return err;
963 }
962 } 964 }
963#endif 965#endif
964 966
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 1e671d4eb6fa..c6299f82a6e4 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -587,7 +587,7 @@ xfs_attr_leaf_addname(
587 */ 587 */
588 error = xfs_attr3_leaf_to_node(args); 588 error = xfs_attr3_leaf_to_node(args);
589 if (error) 589 if (error)
590 goto out_defer_cancel; 590 return error;
591 error = xfs_defer_finish(&args->trans); 591 error = xfs_defer_finish(&args->trans);
592 if (error) 592 if (error)
593 return error; 593 return error;
@@ -675,7 +675,7 @@ xfs_attr_leaf_addname(
675 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); 675 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
676 /* bp is gone due to xfs_da_shrink_inode */ 676 /* bp is gone due to xfs_da_shrink_inode */
677 if (error) 677 if (error)
678 goto out_defer_cancel; 678 return error;
679 error = xfs_defer_finish(&args->trans); 679 error = xfs_defer_finish(&args->trans);
680 if (error) 680 if (error)
681 return error; 681 return error;
@@ -693,9 +693,6 @@ xfs_attr_leaf_addname(
693 error = xfs_attr3_leaf_clearflag(args); 693 error = xfs_attr3_leaf_clearflag(args);
694 } 694 }
695 return error; 695 return error;
696out_defer_cancel:
697 xfs_defer_cancel(args->trans);
698 return error;
699} 696}
700 697
701/* 698/*
@@ -738,15 +735,12 @@ xfs_attr_leaf_removename(
738 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); 735 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
739 /* bp is gone due to xfs_da_shrink_inode */ 736 /* bp is gone due to xfs_da_shrink_inode */
740 if (error) 737 if (error)
741 goto out_defer_cancel; 738 return error;
742 error = xfs_defer_finish(&args->trans); 739 error = xfs_defer_finish(&args->trans);
743 if (error) 740 if (error)
744 return error; 741 return error;
745 } 742 }
746 return 0; 743 return 0;
747out_defer_cancel:
748 xfs_defer_cancel(args->trans);
749 return error;
750} 744}
751 745
752/* 746/*
@@ -864,7 +858,7 @@ restart:
864 state = NULL; 858 state = NULL;
865 error = xfs_attr3_leaf_to_node(args); 859 error = xfs_attr3_leaf_to_node(args);
866 if (error) 860 if (error)
867 goto out_defer_cancel; 861 goto out;
868 error = xfs_defer_finish(&args->trans); 862 error = xfs_defer_finish(&args->trans);
869 if (error) 863 if (error)
870 goto out; 864 goto out;
@@ -888,7 +882,7 @@ restart:
888 */ 882 */
889 error = xfs_da3_split(state); 883 error = xfs_da3_split(state);
890 if (error) 884 if (error)
891 goto out_defer_cancel; 885 goto out;
892 error = xfs_defer_finish(&args->trans); 886 error = xfs_defer_finish(&args->trans);
893 if (error) 887 if (error)
894 goto out; 888 goto out;
@@ -984,7 +978,7 @@ restart:
984 if (retval && (state->path.active > 1)) { 978 if (retval && (state->path.active > 1)) {
985 error = xfs_da3_join(state); 979 error = xfs_da3_join(state);
986 if (error) 980 if (error)
987 goto out_defer_cancel; 981 goto out;
988 error = xfs_defer_finish(&args->trans); 982 error = xfs_defer_finish(&args->trans);
989 if (error) 983 if (error)
990 goto out; 984 goto out;
@@ -1013,9 +1007,6 @@ out:
1013 if (error) 1007 if (error)
1014 return error; 1008 return error;
1015 return retval; 1009 return retval;
1016out_defer_cancel:
1017 xfs_defer_cancel(args->trans);
1018 goto out;
1019} 1010}
1020 1011
1021/* 1012/*
@@ -1107,7 +1098,7 @@ xfs_attr_node_removename(
1107 if (retval && (state->path.active > 1)) { 1098 if (retval && (state->path.active > 1)) {
1108 error = xfs_da3_join(state); 1099 error = xfs_da3_join(state);
1109 if (error) 1100 if (error)
1110 goto out_defer_cancel; 1101 goto out;
1111 error = xfs_defer_finish(&args->trans); 1102 error = xfs_defer_finish(&args->trans);
1112 if (error) 1103 if (error)
1113 goto out; 1104 goto out;
@@ -1138,7 +1129,7 @@ xfs_attr_node_removename(
1138 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); 1129 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
1139 /* bp is gone due to xfs_da_shrink_inode */ 1130 /* bp is gone due to xfs_da_shrink_inode */
1140 if (error) 1131 if (error)
1141 goto out_defer_cancel; 1132 goto out;
1142 error = xfs_defer_finish(&args->trans); 1133 error = xfs_defer_finish(&args->trans);
1143 if (error) 1134 if (error)
1144 goto out; 1135 goto out;
@@ -1150,9 +1141,6 @@ xfs_attr_node_removename(
1150out: 1141out:
1151 xfs_da_state_free(state); 1142 xfs_da_state_free(state);
1152 return error; 1143 return error;
1153out_defer_cancel:
1154 xfs_defer_cancel(args->trans);
1155 goto out;
1156} 1144}
1157 1145
1158/* 1146/*
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index af094063e402..d89363c6b523 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
485 blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map, 485 blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
486 &nmap); 486 &nmap);
487 if (error) 487 if (error)
488 goto out_defer_cancel; 488 return error;
489 error = xfs_defer_finish(&args->trans); 489 error = xfs_defer_finish(&args->trans);
490 if (error) 490 if (error)
491 return error; 491 return error;
@@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
553 } 553 }
554 ASSERT(valuelen == 0); 554 ASSERT(valuelen == 0);
555 return 0; 555 return 0;
556out_defer_cancel:
557 xfs_defer_cancel(args->trans);
558 return error;
559} 556}
560 557
561/* 558/*
@@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
625 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, 622 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
626 XFS_BMAPI_ATTRFORK, 1, &done); 623 XFS_BMAPI_ATTRFORK, 1, &done);
627 if (error) 624 if (error)
628 goto out_defer_cancel; 625 return error;
629 error = xfs_defer_finish(&args->trans); 626 error = xfs_defer_finish(&args->trans);
630 if (error) 627 if (error)
631 return error; 628 return error;
@@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
638 return error; 635 return error;
639 } 636 }
640 return 0; 637 return 0;
641out_defer_cancel:
642 xfs_defer_cancel(args->trans);
643 return error;
644} 638}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760314fdf7f..a47670332326 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
674 674
675 /* 675 /*
676 * Make space in the inode incore. 676 * Make space in the inode incore. This needs to be undone if we fail
677 * to expand the root.
677 */ 678 */
678 xfs_iroot_realloc(ip, 1, whichfork); 679 xfs_iroot_realloc(ip, 1, whichfork);
679 ifp->if_flags |= XFS_IFBROOT; 680 ifp->if_flags |= XFS_IFBROOT;
@@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
711 args.minlen = args.maxlen = args.prod = 1; 712 args.minlen = args.maxlen = args.prod = 1;
712 args.wasdel = wasdel; 713 args.wasdel = wasdel;
713 *logflagsp = 0; 714 *logflagsp = 0;
714 if ((error = xfs_alloc_vextent(&args))) { 715 error = xfs_alloc_vextent(&args);
715 ASSERT(ifp->if_broot == NULL); 716 if (error)
716 goto err1; 717 goto out_root_realloc;
717 }
718 718
719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
720 ASSERT(ifp->if_broot == NULL);
721 error = -ENOSPC; 720 error = -ENOSPC;
722 goto err1; 721 goto out_root_realloc;
723 } 722 }
723
724 /* 724 /*
725 * Allocation can't fail, the space was reserved. 725 * Allocation can't fail, the space was reserved.
726 */ 726 */
@@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
733 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 733 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
734 if (!abp) { 734 if (!abp) {
735 error = -ENOSPC; 735 error = -EFSCORRUPTED;
736 goto err2; 736 goto out_unreserve_dquot;
737 } 737 }
738
738 /* 739 /*
739 * Fill in the child block. 740 * Fill in the child block.
740 */ 741 */
@@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
775 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 776 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
776 return 0; 777 return 0;
777 778
778err2: 779out_unreserve_dquot:
779 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 780 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
780err1: 781out_root_realloc:
781 xfs_iroot_realloc(ip, -1, whichfork); 782 xfs_iroot_realloc(ip, -1, whichfork);
782 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 783 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
784 ASSERT(ifp->if_broot == NULL);
783 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 785 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
784 786
785 return error; 787 return error;
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 059bc44c27e8..afbe336600e1 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -1016,6 +1016,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
1016#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ 1016#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
1017#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ 1017#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
1018#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */ 1018#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
1019/* Do not use bit 15, di_flags is legacy and unchanging now */
1020
1019#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 1021#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
1020#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 1022#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
1021#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) 1023#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 30d1d60f1d46..09d9c8cfa4a0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
415 return NULL; 415 return NULL;
416} 416}
417 417
418static xfs_failaddr_t
419xfs_dinode_verify_forkoff(
420 struct xfs_dinode *dip,
421 struct xfs_mount *mp)
422{
423 if (!XFS_DFORK_Q(dip))
424 return NULL;
425
426 switch (dip->di_format) {
427 case XFS_DINODE_FMT_DEV:
428 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
429 return __this_address;
430 break;
431 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
432 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
433 case XFS_DINODE_FMT_BTREE:
434 if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
435 return __this_address;
436 break;
437 default:
438 return __this_address;
439 }
440 return NULL;
441}
442
418xfs_failaddr_t 443xfs_failaddr_t
419xfs_dinode_verify( 444xfs_dinode_verify(
420 struct xfs_mount *mp, 445 struct xfs_mount *mp,
@@ -470,6 +495,11 @@ xfs_dinode_verify(
470 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) 495 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
471 return __this_address; 496 return __this_address;
472 497
498 /* check for illegal values of forkoff */
499 fa = xfs_dinode_verify_forkoff(dip, mp);
500 if (fa)
501 return fa;
502
473 /* Do we have appropriate data fork formats for the mode? */ 503 /* Do we have appropriate data fork formats for the mode? */
474 switch (mode & S_IFMT) { 504 switch (mode & S_IFMT) {
475 case S_IFIFO: 505 case S_IFIFO:
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 036b5c7021eb..376bcb585ae6 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -17,7 +17,6 @@
17#include "xfs_sb.h" 17#include "xfs_sb.h"
18#include "xfs_alloc.h" 18#include "xfs_alloc.h"
19#include "xfs_rmap.h" 19#include "xfs_rmap.h"
20#include "xfs_alloc.h"
21#include "scrub/xfs_scrub.h" 20#include "scrub/xfs_scrub.h"
22#include "scrub/scrub.h" 21#include "scrub/scrub.h"
23#include "scrub/common.h" 22#include "scrub/common.h"
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 5b3b177c0fc9..e386c9b0b4ab 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -126,6 +126,7 @@ xchk_inode_flags(
126{ 126{
127 struct xfs_mount *mp = sc->mp; 127 struct xfs_mount *mp = sc->mp;
128 128
129 /* di_flags are all taken, last bit cannot be used */
129 if (flags & ~XFS_DIFLAG_ANY) 130 if (flags & ~XFS_DIFLAG_ANY)
130 goto bad; 131 goto bad;
131 132
@@ -172,8 +173,9 @@ xchk_inode_flags2(
172{ 173{
173 struct xfs_mount *mp = sc->mp; 174 struct xfs_mount *mp = sc->mp;
174 175
176 /* Unknown di_flags2 could be from a future kernel */
175 if (flags2 & ~XFS_DIFLAG2_ANY) 177 if (flags2 & ~XFS_DIFLAG2_ANY)
176 goto bad; 178 xchk_ino_set_warning(sc, ino);
177 179
178 /* reflink flag requires reflink feature */ 180 /* reflink flag requires reflink feature */
179 if ((flags2 & XFS_DIFLAG2_REFLINK) && 181 if ((flags2 & XFS_DIFLAG2_REFLINK) &&
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index addbd74ecd8e..6de8d90041ff 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
702 struct xfs_iext_cursor icur; 702 struct xfs_iext_cursor icur;
703 int error = 0; 703 int error = 0;
704 704
705 xfs_ilock(ip, XFS_ILOCK_EXCL); 705 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
706 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
707 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
708 if (error)
709 goto out_unlock;
710 }
711 706
707 xfs_ilock(ip, XFS_ILOCK_EXCL);
712 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 708 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
713 goto out_unlock; 709 goto out_unlock;
714 710
@@ -1584,7 +1580,7 @@ xfs_swap_extent_rmap(
1584 tirec.br_blockcount, &irec, 1580 tirec.br_blockcount, &irec,
1585 &nimaps, 0); 1581 &nimaps, 0);
1586 if (error) 1582 if (error)
1587 goto out_defer; 1583 goto out;
1588 ASSERT(nimaps == 1); 1584 ASSERT(nimaps == 1);
1589 ASSERT(tirec.br_startoff == irec.br_startoff); 1585 ASSERT(tirec.br_startoff == irec.br_startoff);
1590 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec); 1586 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
@@ -1599,22 +1595,22 @@ xfs_swap_extent_rmap(
1599 /* Remove the mapping from the donor file. */ 1595 /* Remove the mapping from the donor file. */
1600 error = xfs_bmap_unmap_extent(tp, tip, &uirec); 1596 error = xfs_bmap_unmap_extent(tp, tip, &uirec);
1601 if (error) 1597 if (error)
1602 goto out_defer; 1598 goto out;
1603 1599
1604 /* Remove the mapping from the source file. */ 1600 /* Remove the mapping from the source file. */
1605 error = xfs_bmap_unmap_extent(tp, ip, &irec); 1601 error = xfs_bmap_unmap_extent(tp, ip, &irec);
1606 if (error) 1602 if (error)
1607 goto out_defer; 1603 goto out;
1608 1604
1609 /* Map the donor file's blocks into the source file. */ 1605 /* Map the donor file's blocks into the source file. */
1610 error = xfs_bmap_map_extent(tp, ip, &uirec); 1606 error = xfs_bmap_map_extent(tp, ip, &uirec);
1611 if (error) 1607 if (error)
1612 goto out_defer; 1608 goto out;
1613 1609
1614 /* Map the source file's blocks into the donor file. */ 1610 /* Map the source file's blocks into the donor file. */
1615 error = xfs_bmap_map_extent(tp, tip, &irec); 1611 error = xfs_bmap_map_extent(tp, tip, &irec);
1616 if (error) 1612 if (error)
1617 goto out_defer; 1613 goto out;
1618 1614
1619 error = xfs_defer_finish(tpp); 1615 error = xfs_defer_finish(tpp);
1620 tp = *tpp; 1616 tp = *tpp;
@@ -1636,8 +1632,6 @@ xfs_swap_extent_rmap(
1636 tip->i_d.di_flags2 = tip_flags2; 1632 tip->i_d.di_flags2 = tip_flags2;
1637 return 0; 1633 return 0;
1638 1634
1639out_defer:
1640 xfs_defer_cancel(tp);
1641out: 1635out:
1642 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_); 1636 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1643 tip->i_d.di_flags2 = tip_flags2; 1637 tip->i_d.di_flags2 = tip_flags2;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1c9d1398980b..12d8455bfbb2 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -532,6 +532,49 @@ xfs_buf_item_push(
532} 532}
533 533
534/* 534/*
535 * Drop the buffer log item refcount and take appropriate action. This helper
536 * determines whether the bli must be freed or not, since a decrement to zero
537 * does not necessarily mean the bli is unused.
538 *
539 * Return true if the bli is freed, false otherwise.
540 */
541bool
542xfs_buf_item_put(
543 struct xfs_buf_log_item *bip)
544{
545 struct xfs_log_item *lip = &bip->bli_item;
546 bool aborted;
547 bool dirty;
548
549 /* drop the bli ref and return if it wasn't the last one */
550 if (!atomic_dec_and_test(&bip->bli_refcount))
551 return false;
552
553 /*
554 * We dropped the last ref and must free the item if clean or aborted.
555 * If the bli is dirty and non-aborted, the buffer was clean in the
556 * transaction but still awaiting writeback from previous changes. In
557 * that case, the bli is freed on buffer writeback completion.
558 */
559 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
560 XFS_FORCED_SHUTDOWN(lip->li_mountp);
561 dirty = bip->bli_flags & XFS_BLI_DIRTY;
562 if (dirty && !aborted)
563 return false;
564
565 /*
566 * The bli is aborted or clean. An aborted item may be in the AIL
567 * regardless of dirty state. For example, consider an aborted
568 * transaction that invalidated a dirty bli and cleared the dirty
569 * state.
570 */
571 if (aborted)
572 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
573 xfs_buf_item_relse(bip->bli_buf);
574 return true;
575}
576
577/*
535 * Release the buffer associated with the buf log item. If there is no dirty 578 * Release the buffer associated with the buf log item. If there is no dirty
536 * logged data associated with the buffer recorded in the buf log item, then 579 * logged data associated with the buffer recorded in the buf log item, then
537 * free the buf log item and remove the reference to it in the buffer. 580 * free the buf log item and remove the reference to it in the buffer.
@@ -556,76 +599,42 @@ xfs_buf_item_unlock(
556{ 599{
557 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 600 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
558 struct xfs_buf *bp = bip->bli_buf; 601 struct xfs_buf *bp = bip->bli_buf;
559 bool aborted; 602 bool released;
560 bool hold = !!(bip->bli_flags & XFS_BLI_HOLD); 603 bool hold = bip->bli_flags & XFS_BLI_HOLD;
561 bool dirty = !!(bip->bli_flags & XFS_BLI_DIRTY); 604 bool stale = bip->bli_flags & XFS_BLI_STALE;
562#if defined(DEBUG) || defined(XFS_WARN) 605#if defined(DEBUG) || defined(XFS_WARN)
563 bool ordered = !!(bip->bli_flags & XFS_BLI_ORDERED); 606 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
607 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
564#endif 608#endif
565 609
566 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
567
568 /* Clear the buffer's association with this transaction. */
569 bp->b_transp = NULL;
570
571 /*
572 * The per-transaction state has been copied above so clear it from the
573 * bli.
574 */
575 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
576
577 /*
578 * If the buf item is marked stale, then don't do anything. We'll
579 * unlock the buffer and free the buf item when the buffer is unpinned
580 * for the last time.
581 */
582 if (bip->bli_flags & XFS_BLI_STALE) {
583 trace_xfs_buf_item_unlock_stale(bip);
584 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
585 if (!aborted) {
586 atomic_dec(&bip->bli_refcount);
587 return;
588 }
589 }
590
591 trace_xfs_buf_item_unlock(bip); 610 trace_xfs_buf_item_unlock(bip);
592 611
593 /* 612 /*
594 * If the buf item isn't tracking any data, free it, otherwise drop the
595 * reference we hold to it. If we are aborting the transaction, this may
596 * be the only reference to the buf item, so we free it anyway
597 * regardless of whether it is dirty or not. A dirty abort implies a
598 * shutdown, anyway.
599 *
600 * The bli dirty state should match whether the blf has logged segments 613 * The bli dirty state should match whether the blf has logged segments
601 * except for ordered buffers, where only the bli should be dirty. 614 * except for ordered buffers, where only the bli should be dirty.
602 */ 615 */
603 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) || 616 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
604 (ordered && dirty && !xfs_buf_item_dirty_format(bip))); 617 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
618 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
605 619
606 /* 620 /*
607 * Clean buffers, by definition, cannot be in the AIL. However, aborted 621 * Clear the buffer's association with this transaction and
608 * buffers may be in the AIL regardless of dirty state. An aborted 622 * per-transaction state from the bli, which has been copied above.
609 * transaction that invalidates a buffer already in the AIL may have
610 * marked it stale and cleared the dirty state, for example.
611 *
612 * Therefore if we are aborting a buffer and we've just taken the last
613 * reference away, we have to check if it is in the AIL before freeing
614 * it. We need to free it in this case, because an aborted transaction
615 * has already shut the filesystem down and this is the last chance we
616 * will have to do so.
617 */ 623 */
618 if (atomic_dec_and_test(&bip->bli_refcount)) { 624 bp->b_transp = NULL;
619 if (aborted) { 625 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
620 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
621 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
622 xfs_buf_item_relse(bp);
623 } else if (!dirty)
624 xfs_buf_item_relse(bp);
625 }
626 626
627 if (!hold) 627 /*
628 xfs_buf_relse(bp); 628 * Unref the item and unlock the buffer unless held or stale. Stale
629 * buffers remain locked until final unpin unless the bli is freed by
630 * the unref call. The latter implies shutdown because buffer
631 * invalidation dirties the bli and transaction.
632 */
633 released = xfs_buf_item_put(bip);
634 if (hold || (stale && !released))
635 return;
636 ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
637 xfs_buf_relse(bp);
629} 638}
630 639
631/* 640/*
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 3f7d7b72e7e6..90f65f891fab 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -51,6 +51,7 @@ struct xfs_buf_log_item {
51 51
52int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); 52int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
53void xfs_buf_item_relse(struct xfs_buf *); 53void xfs_buf_item_relse(struct xfs_buf *);
54bool xfs_buf_item_put(struct xfs_buf_log_item *);
54void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint); 55void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
55bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *); 56bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
56void xfs_buf_attach_iodone(struct xfs_buf *, 57void xfs_buf_attach_iodone(struct xfs_buf *,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d957a46dc1cb..05db9540e459 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
1563 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags, 1563 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
1564 XFS_ITRUNC_MAX_EXTENTS, &done); 1564 XFS_ITRUNC_MAX_EXTENTS, &done);
1565 if (error) 1565 if (error)
1566 goto out_bmap_cancel; 1566 goto out;
1567 1567
1568 /* 1568 /*
1569 * Duplicate the transaction that has the permanent 1569 * Duplicate the transaction that has the permanent
@@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
1599out: 1599out:
1600 *tpp = tp; 1600 *tpp = tp;
1601 return error; 1601 return error;
1602out_bmap_cancel:
1603 /*
1604 * If the bunmapi call encounters an error, return to the caller where
1605 * the transaction can be properly aborted. We just need to make sure
1606 * we're not holding any resources that we were not when we came in.
1607 */
1608 xfs_defer_cancel(tp);
1609 goto out;
1610} 1602}
1611 1603
1612int 1604int
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c3e74f9128e8..f48ffd7a8d3e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
471 struct inode *inode, 471 struct inode *inode,
472 struct delayed_call *done) 472 struct delayed_call *done)
473{ 473{
474 char *link;
475
474 ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE); 476 ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
475 return XFS_I(inode)->i_df.if_u1.if_data; 477
478 /*
479 * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
480 * if_data is junk.
481 */
482 link = XFS_I(inode)->i_df.if_u1.if_data;
483 if (!link)
484 return ERR_PTR(-EFSCORRUPTED);
485 return link;
476} 486}
477 487
478STATIC int 488STATIC int
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a21dc61ec09e..1fc9e9042e0e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1570,16 +1570,6 @@ xlog_find_zeroed(
1570 if (last_cycle != 0) { /* log completely written to */ 1570 if (last_cycle != 0) { /* log completely written to */
1571 xlog_put_bp(bp); 1571 xlog_put_bp(bp);
1572 return 0; 1572 return 0;
1573 } else if (first_cycle != 1) {
1574 /*
1575 * If the cycle of the last block is zero, the cycle of
1576 * the first block must be 1. If it's not, maybe we're
1577 * not looking at a log... Bail out.
1578 */
1579 xfs_warn(log->l_mp,
1580 "Log inconsistent or not a log (last==0, first!=1)");
1581 error = -EINVAL;
1582 goto bp_err;
1583 } 1573 }
1584 1574
1585 /* we have a partially zeroed log */ 1575 /* we have a partially zeroed log */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 38f405415b88..5289e22cb081 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -352,6 +352,47 @@ xfs_reflink_convert_cow(
352 return error; 352 return error;
353} 353}
354 354
355/*
356 * Find the extent that maps the given range in the COW fork. Even if the extent
357 * is not shared we might have a preallocation for it in the COW fork. If so we
358 * use it that rather than trigger a new allocation.
359 */
360static int
361xfs_find_trim_cow_extent(
362 struct xfs_inode *ip,
363 struct xfs_bmbt_irec *imap,
364 bool *shared,
365 bool *found)
366{
367 xfs_fileoff_t offset_fsb = imap->br_startoff;
368 xfs_filblks_t count_fsb = imap->br_blockcount;
369 struct xfs_iext_cursor icur;
370 struct xfs_bmbt_irec got;
371 bool trimmed;
372
373 *found = false;
374
375 /*
376 * If we don't find an overlapping extent, trim the range we need to
377 * allocate to fit the hole we found.
378 */
379 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
380 got.br_startoff > offset_fsb)
381 return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
382
383 *shared = true;
384 if (isnullstartblock(got.br_startblock)) {
385 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
386 return 0;
387 }
388
389 /* real extent found - no need to allocate */
390 xfs_trim_extent(&got, offset_fsb, count_fsb);
391 *imap = got;
392 *found = true;
393 return 0;
394}
395
355/* Allocate all CoW reservations covering a range of blocks in a file. */ 396/* Allocate all CoW reservations covering a range of blocks in a file. */
356int 397int
357xfs_reflink_allocate_cow( 398xfs_reflink_allocate_cow(
@@ -363,78 +404,64 @@ xfs_reflink_allocate_cow(
363 struct xfs_mount *mp = ip->i_mount; 404 struct xfs_mount *mp = ip->i_mount;
364 xfs_fileoff_t offset_fsb = imap->br_startoff; 405 xfs_fileoff_t offset_fsb = imap->br_startoff;
365 xfs_filblks_t count_fsb = imap->br_blockcount; 406 xfs_filblks_t count_fsb = imap->br_blockcount;
366 struct xfs_bmbt_irec got; 407 struct xfs_trans *tp;
367 struct xfs_trans *tp = NULL;
368 int nimaps, error = 0; 408 int nimaps, error = 0;
369 bool trimmed; 409 bool found;
370 xfs_filblks_t resaligned; 410 xfs_filblks_t resaligned;
371 xfs_extlen_t resblks = 0; 411 xfs_extlen_t resblks = 0;
372 struct xfs_iext_cursor icur;
373 412
374retry:
375 ASSERT(xfs_is_reflink_inode(ip));
376 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 413 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
414 ASSERT(xfs_is_reflink_inode(ip));
377 415
378 /* 416 error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
379 * Even if the extent is not shared we might have a preallocation for 417 if (error || !*shared)
380 * it in the COW fork. If so use it. 418 return error;
381 */ 419 if (found)
382 if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) && 420 goto convert;
383 got.br_startoff <= offset_fsb) {
384 *shared = true;
385
386 /* If we have a real allocation in the COW fork we're done. */
387 if (!isnullstartblock(got.br_startblock)) {
388 xfs_trim_extent(&got, offset_fsb, count_fsb);
389 *imap = got;
390 goto convert;
391 }
392 421
393 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount); 422 resaligned = xfs_aligned_fsb_count(imap->br_startoff,
394 } else { 423 imap->br_blockcount, xfs_get_cowextsz_hint(ip));
395 error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed); 424 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
396 if (error || !*shared)
397 goto out;
398 }
399 425
400 if (!tp) { 426 xfs_iunlock(ip, *lockmode);
401 resaligned = xfs_aligned_fsb_count(imap->br_startoff, 427 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
402 imap->br_blockcount, xfs_get_cowextsz_hint(ip)); 428 *lockmode = XFS_ILOCK_EXCL;
403 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 429 xfs_ilock(ip, *lockmode);
404 430
405 xfs_iunlock(ip, *lockmode); 431 if (error)
406 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); 432 return error;
407 *lockmode = XFS_ILOCK_EXCL;
408 xfs_ilock(ip, *lockmode);
409 433
410 if (error) 434 error = xfs_qm_dqattach_locked(ip, false);
411 return error; 435 if (error)
436 goto out_trans_cancel;
412 437
413 error = xfs_qm_dqattach_locked(ip, false); 438 /*
414 if (error) 439 * Check for an overlapping extent again now that we dropped the ilock.
415 goto out; 440 */
416 goto retry; 441 error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
442 if (error || !*shared)
443 goto out_trans_cancel;
444 if (found) {
445 xfs_trans_cancel(tp);
446 goto convert;
417 } 447 }
418 448
419 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, 449 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
420 XFS_QMOPT_RES_REGBLKS); 450 XFS_QMOPT_RES_REGBLKS);
421 if (error) 451 if (error)
422 goto out; 452 goto out_trans_cancel;
423 453
424 xfs_trans_ijoin(tp, ip, 0); 454 xfs_trans_ijoin(tp, ip, 0);
425 455
426 nimaps = 1;
427
428 /* Allocate the entire reservation as unwritten blocks. */ 456 /* Allocate the entire reservation as unwritten blocks. */
457 nimaps = 1;
429 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount, 458 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
430 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 459 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
431 resblks, imap, &nimaps); 460 resblks, imap, &nimaps);
432 if (error) 461 if (error)
433 goto out_trans_cancel; 462 goto out_unreserve;
434 463
435 xfs_inode_set_cowblocks_tag(ip); 464 xfs_inode_set_cowblocks_tag(ip);
436
437 /* Finish up. */
438 error = xfs_trans_commit(tp); 465 error = xfs_trans_commit(tp);
439 if (error) 466 if (error)
440 return error; 467 return error;
@@ -447,12 +474,12 @@ retry:
447 return -ENOSPC; 474 return -ENOSPC;
448convert: 475convert:
449 return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb); 476 return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
450out_trans_cancel: 477
478out_unreserve:
451 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0, 479 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
452 XFS_QMOPT_RES_REGBLKS); 480 XFS_QMOPT_RES_REGBLKS);
453out: 481out_trans_cancel:
454 if (tp) 482 xfs_trans_cancel(tp);
455 xfs_trans_cancel(tp);
456 return error; 483 return error;
457} 484}
458 485
@@ -666,14 +693,12 @@ xfs_reflink_end_cow(
666 if (!del.br_blockcount) 693 if (!del.br_blockcount)
667 goto prev_extent; 694 goto prev_extent;
668 695
669 ASSERT(!isnullstartblock(got.br_startblock));
670
671 /* 696 /*
672 * Don't remap unwritten extents; these are 697 * Only remap real extent that contain data. With AIO
673 * speculatively preallocated CoW extents that have been 698 * speculatively preallocations can leak into the range we
674 * allocated but have not yet been involved in a write. 699 * are called upon, and we need to skip them.
675 */ 700 */
676 if (got.br_state == XFS_EXT_UNWRITTEN) 701 if (!xfs_bmap_is_real_extent(&got))
677 goto prev_extent; 702 goto prev_extent;
678 703
679 /* Unmap the old blocks in the data fork. */ 704 /* Unmap the old blocks in the data fork. */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad315e83bc02..3043e5ed6495 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
473DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); 473DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
474DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); 474DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
475DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock); 475DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
476DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
477DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); 476DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
478DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); 477DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
479DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); 478DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index bedc5a5133a5..912b42f5fe4a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -259,6 +259,14 @@ xfs_trans_alloc(
259 struct xfs_trans *tp; 259 struct xfs_trans *tp;
260 int error; 260 int error;
261 261
262 /*
263 * Allocate the handle before we do our freeze accounting and setting up
264 * GFP_NOFS allocation context so that we avoid lockdep false positives
265 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
266 */
267 tp = kmem_zone_zalloc(xfs_trans_zone,
268 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
269
262 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 270 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
263 sb_start_intwrite(mp->m_super); 271 sb_start_intwrite(mp->m_super);
264 272
@@ -270,8 +278,6 @@ xfs_trans_alloc(
270 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 278 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
271 atomic_inc(&mp->m_active_trans); 279 atomic_inc(&mp->m_active_trans);
272 280
273 tp = kmem_zone_zalloc(xfs_trans_zone,
274 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
275 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 281 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
276 tp->t_flags = flags; 282 tp->t_flags = flags;
277 tp->t_mountp = mp; 283 tp->t_mountp = mp;
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 15919f67a88f..286a287ac57a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -322,49 +322,38 @@ xfs_trans_read_buf_map(
322} 322}
323 323
324/* 324/*
325 * Release the buffer bp which was previously acquired with one of the 325 * Release a buffer previously joined to the transaction. If the buffer is
326 * xfs_trans_... buffer allocation routines if the buffer has not 326 * modified within this transaction, decrement the recursion count but do not
327 * been modified within this transaction. If the buffer is modified 327 * release the buffer even if the count goes to 0. If the buffer is not modified
328 * within this transaction, do decrement the recursion count but do 328 * within the transaction, decrement the recursion count and release the buffer
329 * not release the buffer even if the count goes to 0. If the buffer is not 329 * if the recursion count goes to 0.
330 * modified within the transaction, decrement the recursion count and
331 * release the buffer if the recursion count goes to 0.
332 * 330 *
333 * If the buffer is to be released and it was not modified before 331 * If the buffer is to be released and it was not already dirty before this
334 * this transaction began, then free the buf_log_item associated with it. 332 * transaction began, then also free the buf_log_item associated with it.
335 * 333 *
336 * If the transaction pointer is NULL, make this just a normal 334 * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
337 * brelse() call.
338 */ 335 */
339void 336void
340xfs_trans_brelse( 337xfs_trans_brelse(
341 xfs_trans_t *tp, 338 struct xfs_trans *tp,
342 xfs_buf_t *bp) 339 struct xfs_buf *bp)
343{ 340{
344 struct xfs_buf_log_item *bip; 341 struct xfs_buf_log_item *bip = bp->b_log_item;
345 int freed;
346 342
347 /* 343 ASSERT(bp->b_transp == tp);
348 * Default to a normal brelse() call if the tp is NULL. 344
349 */ 345 if (!tp) {
350 if (tp == NULL) {
351 ASSERT(bp->b_transp == NULL);
352 xfs_buf_relse(bp); 346 xfs_buf_relse(bp);
353 return; 347 return;
354 } 348 }
355 349
356 ASSERT(bp->b_transp == tp); 350 trace_xfs_trans_brelse(bip);
357 bip = bp->b_log_item;
358 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 351 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
359 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
360 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
361 ASSERT(atomic_read(&bip->bli_refcount) > 0); 352 ASSERT(atomic_read(&bip->bli_refcount) > 0);
362 353
363 trace_xfs_trans_brelse(bip);
364
365 /* 354 /*
366 * If the release is just for a recursive lock, 355 * If the release is for a recursive lookup, then decrement the count
367 * then decrement the count and return. 356 * and return.
368 */ 357 */
369 if (bip->bli_recur > 0) { 358 if (bip->bli_recur > 0) {
370 bip->bli_recur--; 359 bip->bli_recur--;
@@ -372,64 +361,24 @@ xfs_trans_brelse(
372 } 361 }
373 362
374 /* 363 /*
375 * If the buffer is dirty within this transaction, we can't 364 * If the buffer is invalidated or dirty in this transaction, we can't
376 * release it until we commit. 365 * release it until we commit.
377 */ 366 */
378 if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)) 367 if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
379 return; 368 return;
380
381 /*
382 * If the buffer has been invalidated, then we can't release
383 * it until the transaction commits to disk unless it is re-dirtied
384 * as part of this transaction. This prevents us from pulling
385 * the item from the AIL before we should.
386 */
387 if (bip->bli_flags & XFS_BLI_STALE) 369 if (bip->bli_flags & XFS_BLI_STALE)
388 return; 370 return;
389 371
390 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
391
392 /* 372 /*
393 * Free up the log item descriptor tracking the released item. 373 * Unlink the log item from the transaction and clear the hold flag, if
374 * set. We wouldn't want the next user of the buffer to get confused.
394 */ 375 */
376 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
395 xfs_trans_del_item(&bip->bli_item); 377 xfs_trans_del_item(&bip->bli_item);
378 bip->bli_flags &= ~XFS_BLI_HOLD;
396 379
397 /* 380 /* drop the reference to the bli */
398 * Clear the hold flag in the buf log item if it is set. 381 xfs_buf_item_put(bip);
399 * We wouldn't want the next user of the buffer to
400 * get confused.
401 */
402 if (bip->bli_flags & XFS_BLI_HOLD) {
403 bip->bli_flags &= ~XFS_BLI_HOLD;
404 }
405
406 /*
407 * Drop our reference to the buf log item.
408 */
409 freed = atomic_dec_and_test(&bip->bli_refcount);
410
411 /*
412 * If the buf item is not tracking data in the log, then we must free it
413 * before releasing the buffer back to the free pool.
414 *
415 * If the fs has shutdown and we dropped the last reference, it may fall
416 * on us to release a (possibly dirty) bli if it never made it to the
417 * AIL (e.g., the aborted unpin already happened and didn't release it
418 * due to our reference). Since we're already shutdown and need
419 * ail_lock, just force remove from the AIL and release the bli here.
420 */
421 if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
422 xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
423 xfs_buf_item_relse(bp);
424 } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
425/***
426 ASSERT(bp->b_pincount == 0);
427***/
428 ASSERT(atomic_read(&bip->bli_refcount) == 0);
429 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
430 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
431 xfs_buf_item_relse(bp);
432 }
433 382
434 bp->b_transp = NULL; 383 bp->b_transp = NULL;
435 xfs_buf_relse(bp); 384 xfs_buf_relse(bp);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 989f8e52864d..971bb7853776 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -87,9 +87,10 @@ struct drm_client_dev {
87 struct drm_file *file; 87 struct drm_file *file;
88}; 88};
89 89
90int drm_client_new(struct drm_device *dev, struct drm_client_dev *client, 90int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
91 const char *name, const struct drm_client_funcs *funcs); 91 const char *name, const struct drm_client_funcs *funcs);
92void drm_client_release(struct drm_client_dev *client); 92void drm_client_release(struct drm_client_dev *client);
93void drm_client_add(struct drm_client_dev *client);
93 94
94void drm_client_dev_unregister(struct drm_device *dev); 95void drm_client_dev_unregister(struct drm_device *dev);
95void drm_client_dev_hotplug(struct drm_device *dev); 96void drm_client_dev_hotplug(struct drm_device *dev);
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8942e61f0028..8ab5df769923 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
53 FPGA_MGR_STATE_OPERATING, 53 FPGA_MGR_STATE_OPERATING,
54}; 54};
55 55
56/* 56/**
57 * FPGA Manager flags 57 * DOC: FPGA Manager flags
58 * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported 58 *
59 * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting 59 * Flags used in the &fpga_image_info->flags field
60 * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first 60 *
61 * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed 61 * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
62 *
63 * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
64 *
65 * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
66 *
67 * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
68 *
69 * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
62 */ 70 */
63#define FPGA_MGR_PARTIAL_RECONFIG BIT(0) 71#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
64#define FPGA_MGR_EXTERNAL_CONFIG BIT(1) 72#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6c0b4a1c22ff..897eae8faee1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1828,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
1828extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in, 1828extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
1829 struct inode *inode_out, loff_t pos_out, 1829 struct inode *inode_out, loff_t pos_out,
1830 u64 *len, bool is_dedupe); 1830 u64 *len, bool is_dedupe);
1831extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
1832 struct file *file_out, loff_t pos_out, u64 len);
1831extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, 1833extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1832 struct file *file_out, loff_t pos_out, u64 len); 1834 struct file *file_out, loff_t pos_out, u64 len);
1833extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, 1835extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1834 struct inode *dest, loff_t destoff, 1836 struct inode *dest, loff_t destoff,
1835 loff_t len, bool *is_same); 1837 loff_t len, bool *is_same);
@@ -2773,19 +2775,6 @@ static inline void file_end_write(struct file *file)
2773 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE); 2775 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
2774} 2776}
2775 2777
2776static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
2777 struct file *file_out, loff_t pos_out,
2778 u64 len)
2779{
2780 int ret;
2781
2782 file_start_write(file_out);
2783 ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
2784 file_end_write(file_out);
2785
2786 return ret;
2787}
2788
2789/* 2778/*
2790 * get_write_access() gets write permission for a file. 2779 * get_write_access() gets write permission for a file.
2791 * put_write_access() releases this write permission. 2780 * put_write_access() releases this write permission.
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b68e345f0ca..087fd5f48c91 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
140pte_t *huge_pte_offset(struct mm_struct *mm, 140pte_t *huge_pte_offset(struct mm_struct *mm,
141 unsigned long addr, unsigned long sz); 141 unsigned long addr, unsigned long sz);
142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
143void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
144 unsigned long *start, unsigned long *end);
143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 145struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 int write); 146 int write);
145struct page *follow_huge_pd(struct vm_area_struct *vma, 147struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
170 return 0; 172 return 0;
171} 173}
172 174
175static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
176 pte_t *ptep)
177{
178 return 0;
179}
180
181static inline void adjust_range_if_pmd_sharing_possible(
182 struct vm_area_struct *vma,
183 unsigned long *start, unsigned long *end)
184{
185}
186
173#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) 187#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
174#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 188#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
175#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 189#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 83a33a1873a6..7f5ca2cd3a32 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
90 90
91 u32 *rqn; 91 u32 *rqn;
92 u32 *sqn; 92 u32 *sqn;
93
94 bool peer_gone;
93}; 95};
94 96
95struct mlx5_hairpin * 97struct mlx5_hairpin *
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a61ebe8ad4ca..0416a7204be3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2455,6 +2455,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2455 return vma; 2455 return vma;
2456} 2456}
2457 2457
2458static inline bool range_in_vma(struct vm_area_struct *vma,
2459 unsigned long start, unsigned long end)
2460{
2461 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2462}
2463
2458#ifdef CONFIG_MMU 2464#ifdef CONFIG_MMU
2459pgprot_t vm_get_page_prot(unsigned long vm_flags); 2465pgprot_t vm_get_page_prot(unsigned long vm_flags);
2460void vma_set_page_prot(struct vm_area_struct *vma); 2466void vma_set_page_prot(struct vm_area_struct *vma);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1e22d96734e0..3f4c0b167333 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -671,12 +671,6 @@ typedef struct pglist_data {
671#ifdef CONFIG_NUMA_BALANCING 671#ifdef CONFIG_NUMA_BALANCING
672 /* Lock serializing the migrate rate limiting window */ 672 /* Lock serializing the migrate rate limiting window */
673 spinlock_t numabalancing_migrate_lock; 673 spinlock_t numabalancing_migrate_lock;
674
675 /* Rate limiting time interval */
676 unsigned long numabalancing_migrate_next_window;
677
678 /* Number of pages migrated during the rate limiting time interval */
679 unsigned long numabalancing_migrate_nr_pages;
680#endif 674#endif
681 /* 675 /*
682 * This is a per-node reserve of pages that are not available 676 * This is a per-node reserve of pages that are not available
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5ab98053c8..c7861e4b402c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
1730 * switch driver and used to set the phys state of the 1730 * switch driver and used to set the phys state of the
1731 * switch port. 1731 * switch port.
1732 * 1732 *
1733 * @wol_enabled: Wake-on-LAN is enabled
1734 *
1733 * FIXME: cleanup struct net_device such that network protocol info 1735 * FIXME: cleanup struct net_device such that network protocol info
1734 * moves out. 1736 * moves out.
1735 */ 1737 */
@@ -2014,6 +2016,7 @@ struct net_device {
2014 struct lock_class_key *qdisc_tx_busylock; 2016 struct lock_class_key *qdisc_tx_busylock;
2015 struct lock_class_key *qdisc_running_key; 2017 struct lock_class_key *qdisc_running_key;
2016 bool proto_down; 2018 bool proto_down;
2019 unsigned wol_enabled:1;
2017}; 2020};
2018#define to_net_dev(d) container_of(d, struct net_device, dev) 2021#define to_net_dev(d) container_of(d, struct net_device, dev)
2019 2022
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 07efffd0c759..bbe99d2b28b4 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
215 break; 215 break;
216 case NFPROTO_ARP: 216 case NFPROTO_ARP:
217#ifdef CONFIG_NETFILTER_FAMILY_ARP 217#ifdef CONFIG_NETFILTER_FAMILY_ARP
218 if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
219 break;
218 hook_head = rcu_dereference(net->nf.hooks_arp[hook]); 220 hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
219#endif 221#endif
220 break; 222 break;
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index c0e795d95477..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -36,6 +36,7 @@ enum {
36 SCIx_SH4_SCIF_FIFODATA_REGTYPE, 36 SCIx_SH4_SCIF_FIFODATA_REGTYPE,
37 SCIx_SH7705_SCIF_REGTYPE, 37 SCIx_SH7705_SCIF_REGTYPE,
38 SCIx_HSCIF_REGTYPE, 38 SCIx_HSCIF_REGTYPE,
39 SCIx_RZ_SCIFA_REGTYPE,
39 40
40 SCIx_NR_REGTYPES, 41 SCIx_NR_REGTYPES,
41}; 42};
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 9397628a1967..cb462f9ab7dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -5,6 +5,24 @@
5#include <linux/if_vlan.h> 5#include <linux/if_vlan.h>
6#include <uapi/linux/virtio_net.h> 6#include <uapi/linux/virtio_net.h>
7 7
8static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
9 const struct virtio_net_hdr *hdr)
10{
11 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
12 case VIRTIO_NET_HDR_GSO_TCPV4:
13 case VIRTIO_NET_HDR_GSO_UDP:
14 skb->protocol = cpu_to_be16(ETH_P_IP);
15 break;
16 case VIRTIO_NET_HDR_GSO_TCPV6:
17 skb->protocol = cpu_to_be16(ETH_P_IPV6);
18 break;
19 default:
20 return -EINVAL;
21 }
22
23 return 0;
24}
25
8static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, 26static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
9 const struct virtio_net_hdr *hdr, 27 const struct virtio_net_hdr *hdr,
10 bool little_endian) 28 bool little_endian)
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index ea73fef8bdc0..8586cfb49828 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
38 * @prio: priority of the file handler, as defined by &enum v4l2_priority 38 * @prio: priority of the file handler, as defined by &enum v4l2_priority
39 * 39 *
40 * @wait: event' s wait queue 40 * @wait: event' s wait queue
41 * @subscribe_lock: serialise changes to the subscribed list; guarantee that
42 * the add and del event callbacks are orderly called
41 * @subscribed: list of subscribed events 43 * @subscribed: list of subscribed events
42 * @available: list of events waiting to be dequeued 44 * @available: list of events waiting to be dequeued
43 * @navailable: number of available events at @available list 45 * @navailable: number of available events at @available list
44 * @sequence: event sequence number 46 * @sequence: event sequence number
47 *
45 * @m2m_ctx: pointer to &struct v4l2_m2m_ctx 48 * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
46 */ 49 */
47struct v4l2_fh { 50struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
52 55
53 /* Events */ 56 /* Events */
54 wait_queue_head_t wait; 57 wait_queue_head_t wait;
58 struct mutex subscribe_lock;
55 struct list_head subscribed; 59 struct list_head subscribed;
56 struct list_head available; 60 struct list_head available;
57 unsigned int navailable; 61 unsigned int navailable;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index a2d058170ea3..b46d68acf701 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
139 int mode; 139 int mode;
140}; 140};
141 141
142struct netdev_notify_work {
143 struct delayed_work work;
144 struct net_device *dev;
145 struct netdev_bonding_info bonding_info;
146};
147
148struct slave { 142struct slave {
149 struct net_device *dev; /* first - useful for panic debug */ 143 struct net_device *dev; /* first - useful for panic debug */
150 struct bonding *bond; /* our master */ 144 struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
172#ifdef CONFIG_NET_POLL_CONTROLLER 166#ifdef CONFIG_NET_POLL_CONTROLLER
173 struct netpoll *np; 167 struct netpoll *np;
174#endif 168#endif
169 struct delayed_work notify_work;
175 struct kobject kobj; 170 struct kobject kobj;
176 struct rtnl_link_stats64 slave_stats; 171 struct rtnl_link_stats64 slave_stats;
177}; 172};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8ebabc9873d1..4de121e24ce5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
4852 * 4852 *
4853 * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. 4853 * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
4854 * @freq: the freqency(in MHz) to be queried. 4854 * @freq: the freqency(in MHz) to be queried.
4855 * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
4856 * irrelevant). This can be used later for deduplication.
4857 * @rule: pointer to store the wmm rule from the regulatory db. 4855 * @rule: pointer to store the wmm rule from the regulatory db.
4858 * 4856 *
4859 * Self-managed wireless drivers can use this function to query 4857 * Self-managed wireless drivers can use this function to query
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index e03b93360f33..a80fd0ac4563 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
130 return sk->sk_bound_dev_if; 130 return sk->sk_bound_dev_if;
131} 131}
132 132
133static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
134{
135 return rcu_dereference_check(ireq->ireq_opt,
136 refcount_read(&ireq->req.rsk_refcnt) > 0);
137}
138
139struct inet_cork { 133struct inet_cork {
140 unsigned int flags; 134 unsigned int flags;
141 __be32 addr; 135 __be32 addr;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0c154f98e987..39e1d875d507 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -153,7 +153,7 @@
153 * nla_find() find attribute in stream of attributes 153 * nla_find() find attribute in stream of attributes
154 * nla_find_nested() find attribute in nested attributes 154 * nla_find_nested() find attribute in nested attributes
155 * nla_parse() parse and validate stream of attrs 155 * nla_parse() parse and validate stream of attrs
156 * nla_parse_nested() parse nested attribuets 156 * nla_parse_nested() parse nested attributes
157 * nla_for_each_attr() loop over all attributes 157 * nla_for_each_attr() loop over all attributes
158 * nla_for_each_nested() loop over the nested attributes 158 * nla_for_each_nested() loop over the nested attributes
159 *========================================================================= 159 *=========================================================================
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 711372845945..705b33d1e395 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
70 __print_symbolic(__entry->mode, MIGRATE_MODE), 70 __print_symbolic(__entry->mode, MIGRATE_MODE),
71 __print_symbolic(__entry->reason, MIGRATE_REASON)) 71 __print_symbolic(__entry->reason, MIGRATE_REASON))
72); 72);
73
74TRACE_EVENT(mm_numa_migrate_ratelimit,
75
76 TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
77
78 TP_ARGS(p, dst_nid, nr_pages),
79
80 TP_STRUCT__entry(
81 __array( char, comm, TASK_COMM_LEN)
82 __field( pid_t, pid)
83 __field( int, dst_nid)
84 __field( unsigned long, nr_pages)
85 ),
86
87 TP_fast_assign(
88 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
89 __entry->pid = p->pid;
90 __entry->dst_nid = dst_nid;
91 __entry->nr_pages = nr_pages;
92 ),
93
94 TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
95 __entry->comm,
96 __entry->pid,
97 __entry->dst_nid,
98 __entry->nr_pages)
99);
100#endif /* _TRACE_MIGRATE_H */ 73#endif /* _TRACE_MIGRATE_H */
101 74
102/* This part must be outside protection */ 75/* This part must be outside protection */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 196587b8f204..837393fa897b 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
56 rxrpc_peer_new, 56 rxrpc_peer_new,
57 rxrpc_peer_processing, 57 rxrpc_peer_processing,
58 rxrpc_peer_put, 58 rxrpc_peer_put,
59 rxrpc_peer_queued_error,
60}; 59};
61 60
62enum rxrpc_conn_trace { 61enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
257 EM(rxrpc_peer_got, "GOT") \ 256 EM(rxrpc_peer_got, "GOT") \
258 EM(rxrpc_peer_new, "NEW") \ 257 EM(rxrpc_peer_new, "NEW") \
259 EM(rxrpc_peer_processing, "PRO") \ 258 EM(rxrpc_peer_processing, "PRO") \
260 EM(rxrpc_peer_put, "PUT") \ 259 E_(rxrpc_peer_put, "PUT")
261 E_(rxrpc_peer_queued_error, "QER")
262 260
263#define rxrpc_conn_traces \ 261#define rxrpc_conn_traces \
264 EM(rxrpc_conn_got, "GOT") \ 262 EM(rxrpc_conn_got, "GOT") \
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index e4732d3c2998..b0f8e87235bd 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -26,7 +26,9 @@
26#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT) 26#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
27#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT) 27#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
28#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT) 28#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
29#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
29#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT) 30#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
31#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
30#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT) 32#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
31#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT) 33#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
32#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT) 34#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 015a4c0bbb47..7a8a26751c23 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -25,7 +25,9 @@
25#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB 25#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
26#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB 26#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
27#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB 27#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
28#define MFD_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
28#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB 29#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
30#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
29#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB 31#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
30#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB 32#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
31#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB 33#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index bfd5938fede6..d0f515d53299 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -28,7 +28,9 @@
28#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB 28#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
29#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB 29#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
30#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB 30#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
31#define MAP_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
31#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB 32#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
33#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
32#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB 34#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
33#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB 35#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
34#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB 36#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index dde1344f047c..6507ad0afc81 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -65,7 +65,9 @@ struct shmid_ds {
65#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB 65#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
66#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB 66#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
67#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB 67#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
68#define SHM_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
68#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB 69#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
70#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
69#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB 71#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
70#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB 72#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
71#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB 73#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/ipc/shm.c b/ipc/shm.c
index 4cd402e4cfeb..1c65fb357395 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -206,7 +206,7 @@ err:
206 * Callers of shm_lock() must validate the status of the returned ipc 206 * Callers of shm_lock() must validate the status of the returned ipc
207 * object pointer and error out as appropriate. 207 * object pointer and error out as appropriate.
208 */ 208 */
209 return (void *)ipcp; 209 return ERR_CAST(ipcp);
210} 210}
211 211
212static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) 212static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 22ad967d1e5f..830d7f095748 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -129,7 +129,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
129 struct bpf_cgroup_storage *storage; 129 struct bpf_cgroup_storage *storage;
130 struct bpf_storage_buffer *new; 130 struct bpf_storage_buffer *new;
131 131
132 if (flags & BPF_NOEXIST) 132 if (flags != BPF_ANY && flags != BPF_EXIST)
133 return -EINVAL; 133 return -EINVAL;
134 134
135 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, 135 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -195,6 +195,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
195 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) 195 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
196 return ERR_PTR(-EINVAL); 196 return ERR_PTR(-EINVAL);
197 197
198 if (attr->value_size == 0)
199 return ERR_PTR(-EINVAL);
200
198 if (attr->value_size > PAGE_SIZE) 201 if (attr->value_size > PAGE_SIZE)
199 return ERR_PTR(-E2BIG); 202 return ERR_PTR(-E2BIG);
200 203
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bb07e74b34a2..465952a8e465 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2896,6 +2896,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2896 u64 umin_val, umax_val; 2896 u64 umin_val, umax_val;
2897 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 2897 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2898 2898
2899 if (insn_bitness == 32) {
2900 /* Relevant for 32-bit RSH: Information can propagate towards
2901 * LSB, so it isn't sufficient to only truncate the output to
2902 * 32 bits.
2903 */
2904 coerce_reg_to_size(dst_reg, 4);
2905 coerce_reg_to_size(&src_reg, 4);
2906 }
2907
2899 smin_val = src_reg.smin_value; 2908 smin_val = src_reg.smin_value;
2900 smax_val = src_reg.smax_value; 2909 smax_val = src_reg.smax_value;
2901 umin_val = src_reg.umin_value; 2910 umin_val = src_reg.umin_value;
@@ -3131,7 +3140,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3131 if (BPF_CLASS(insn->code) != BPF_ALU64) { 3140 if (BPF_CLASS(insn->code) != BPF_ALU64) {
3132 /* 32-bit ALU ops are (32,32)->32 */ 3141 /* 32-bit ALU ops are (32,32)->32 */
3133 coerce_reg_to_size(dst_reg, 4); 3142 coerce_reg_to_size(dst_reg, 4);
3134 coerce_reg_to_size(&src_reg, 4);
3135 } 3143 }
3136 3144
3137 __reg_deduce_bounds(dst_reg); 3145 __reg_deduce_bounds(dst_reg);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dcb093e7b377..5a97f34bc14c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8314,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
8314 goto unlock; 8314 goto unlock;
8315 8315
8316 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 8316 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
8317 if (event->cpu != smp_processor_id())
8318 continue;
8317 if (event->attr.type != PERF_TYPE_TRACEPOINT) 8319 if (event->attr.type != PERF_TYPE_TRACEPOINT)
8318 continue; 8320 continue;
8319 if (event->attr.config != entry->type) 8321 if (event->attr.config != entry->type)
@@ -9431,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu)
9431 if (pmu->task_ctx_nr > perf_invalid_context) 9433 if (pmu->task_ctx_nr > perf_invalid_context)
9432 return; 9434 return;
9433 9435
9434 mutex_lock(&pmus_lock);
9435 free_percpu(pmu->pmu_cpu_context); 9436 free_percpu(pmu->pmu_cpu_context);
9436 mutex_unlock(&pmus_lock);
9437} 9437}
9438 9438
9439/* 9439/*
@@ -9689,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
9689 9689
9690void perf_pmu_unregister(struct pmu *pmu) 9690void perf_pmu_unregister(struct pmu *pmu)
9691{ 9691{
9692 int remove_device;
9693
9694 mutex_lock(&pmus_lock); 9692 mutex_lock(&pmus_lock);
9695 remove_device = pmu_bus_running;
9696 list_del_rcu(&pmu->entry); 9693 list_del_rcu(&pmu->entry);
9697 mutex_unlock(&pmus_lock);
9698 9694
9699 /* 9695 /*
9700 * We dereference the pmu list under both SRCU and regular RCU, so 9696 * We dereference the pmu list under both SRCU and regular RCU, so
@@ -9706,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu)
9706 free_percpu(pmu->pmu_disable_count); 9702 free_percpu(pmu->pmu_disable_count);
9707 if (pmu->type >= PERF_TYPE_MAX) 9703 if (pmu->type >= PERF_TYPE_MAX)
9708 idr_remove(&pmu_idr, pmu->type); 9704 idr_remove(&pmu_idr, pmu->type);
9709 if (remove_device) { 9705 if (pmu_bus_running) {
9710 if (pmu->nr_addr_filters) 9706 if (pmu->nr_addr_filters)
9711 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); 9707 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
9712 device_del(pmu->dev); 9708 device_del(pmu->dev);
9713 put_device(pmu->dev); 9709 put_device(pmu->dev);
9714 } 9710 }
9715 free_pmu_context(pmu); 9711 free_pmu_context(pmu);
9712 mutex_unlock(&pmus_lock);
9716} 9713}
9717EXPORT_SYMBOL_GPL(perf_pmu_unregister); 9714EXPORT_SYMBOL_GPL(perf_pmu_unregister);
9718 9715
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 0be047dbd897..65a3b7e55b9f 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
260{ 260{
261 struct test_cycle *cycle = container_of(work, typeof(*cycle), work); 261 struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
262 struct ww_acquire_ctx ctx; 262 struct ww_acquire_ctx ctx;
263 int err; 263 int err, erra = 0;
264 264
265 ww_acquire_init(&ctx, &ww_class); 265 ww_acquire_init(&ctx, &ww_class);
266 ww_mutex_lock(&cycle->a_mutex, &ctx); 266 ww_mutex_lock(&cycle->a_mutex, &ctx);
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
270 270
271 err = ww_mutex_lock(cycle->b_mutex, &ctx); 271 err = ww_mutex_lock(cycle->b_mutex, &ctx);
272 if (err == -EDEADLK) { 272 if (err == -EDEADLK) {
273 err = 0;
273 ww_mutex_unlock(&cycle->a_mutex); 274 ww_mutex_unlock(&cycle->a_mutex);
274 ww_mutex_lock_slow(cycle->b_mutex, &ctx); 275 ww_mutex_lock_slow(cycle->b_mutex, &ctx);
275 err = ww_mutex_lock(&cycle->a_mutex, &ctx); 276 erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
276 } 277 }
277 278
278 if (!err) 279 if (!err)
279 ww_mutex_unlock(cycle->b_mutex); 280 ww_mutex_unlock(cycle->b_mutex);
280 ww_mutex_unlock(&cycle->a_mutex); 281 if (!erra)
282 ww_mutex_unlock(&cycle->a_mutex);
281 ww_acquire_fini(&ctx); 283 ww_acquire_fini(&ctx);
282 284
283 cycle->result = err; 285 cycle->result = err ?: erra;
284} 286}
285 287
286static int __test_cycle(unsigned int nthreads) 288static int __test_cycle(unsigned int nthreads)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc9897f62..ad97f3ba5ec5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1167 1167
1168 if (task_cpu(p) != new_cpu) { 1168 if (task_cpu(p) != new_cpu) {
1169 if (p->sched_class->migrate_task_rq) 1169 if (p->sched_class->migrate_task_rq)
1170 p->sched_class->migrate_task_rq(p); 1170 p->sched_class->migrate_task_rq(p, new_cpu);
1171 p->se.nr_migrations++; 1171 p->se.nr_migrations++;
1172 rseq_migrate(p); 1172 rseq_migrate(p);
1173 perf_event_task_migrate(p); 1173 perf_event_task_migrate(p);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 997ea7b839fa..91e4202b0634 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1607,7 +1607,7 @@ out:
1607 return cpu; 1607 return cpu;
1608} 1608}
1609 1609
1610static void migrate_task_rq_dl(struct task_struct *p) 1610static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1611{ 1611{
1612 struct rq *rq; 1612 struct rq *rq;
1613 1613
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f808ddf2a868..7fc4a371bdd2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1392,6 +1392,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1392 int last_cpupid, this_cpupid; 1392 int last_cpupid, this_cpupid;
1393 1393
1394 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); 1394 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1395 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1396
1397 /*
1398 * Allow first faults or private faults to migrate immediately early in
1399 * the lifetime of a task. The magic number 4 is based on waiting for
1400 * two full passes of the "multi-stage node selection" test that is
1401 * executed below.
1402 */
1403 if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
1404 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
1405 return true;
1395 1406
1396 /* 1407 /*
1397 * Multi-stage node selection is used in conjunction with a periodic 1408 * Multi-stage node selection is used in conjunction with a periodic
@@ -1410,7 +1421,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1410 * This quadric squishes small probabilities, making it less likely we 1421 * This quadric squishes small probabilities, making it less likely we
1411 * act on an unlikely task<->page relation. 1422 * act on an unlikely task<->page relation.
1412 */ 1423 */
1413 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1414 if (!cpupid_pid_unset(last_cpupid) && 1424 if (!cpupid_pid_unset(last_cpupid) &&
1415 cpupid_to_nid(last_cpupid) != dst_nid) 1425 cpupid_to_nid(last_cpupid) != dst_nid)
1416 return false; 1426 return false;
@@ -1514,6 +1524,21 @@ struct task_numa_env {
1514static void task_numa_assign(struct task_numa_env *env, 1524static void task_numa_assign(struct task_numa_env *env,
1515 struct task_struct *p, long imp) 1525 struct task_struct *p, long imp)
1516{ 1526{
1527 struct rq *rq = cpu_rq(env->dst_cpu);
1528
1529 /* Bail out if run-queue part of active NUMA balance. */
1530 if (xchg(&rq->numa_migrate_on, 1))
1531 return;
1532
1533 /*
1534 * Clear previous best_cpu/rq numa-migrate flag, since task now
1535 * found a better CPU to move/swap.
1536 */
1537 if (env->best_cpu != -1) {
1538 rq = cpu_rq(env->best_cpu);
1539 WRITE_ONCE(rq->numa_migrate_on, 0);
1540 }
1541
1517 if (env->best_task) 1542 if (env->best_task)
1518 put_task_struct(env->best_task); 1543 put_task_struct(env->best_task);
1519 if (p) 1544 if (p)
@@ -1553,6 +1578,13 @@ static bool load_too_imbalanced(long src_load, long dst_load,
1553} 1578}
1554 1579
1555/* 1580/*
1581 * Maximum NUMA importance can be 1998 (2*999);
1582 * SMALLIMP @ 30 would be close to 1998/64.
1583 * Used to deter task migration.
1584 */
1585#define SMALLIMP 30
1586
1587/*
1556 * This checks if the overall compute and NUMA accesses of the system would 1588 * This checks if the overall compute and NUMA accesses of the system would
1557 * be improved if the source tasks was migrated to the target dst_cpu taking 1589 * be improved if the source tasks was migrated to the target dst_cpu taking
1558 * into account that it might be best if task running on the dst_cpu should 1590 * into account that it might be best if task running on the dst_cpu should
@@ -1569,6 +1601,9 @@ static void task_numa_compare(struct task_numa_env *env,
1569 long moveimp = imp; 1601 long moveimp = imp;
1570 int dist = env->dist; 1602 int dist = env->dist;
1571 1603
1604 if (READ_ONCE(dst_rq->numa_migrate_on))
1605 return;
1606
1572 rcu_read_lock(); 1607 rcu_read_lock();
1573 cur = task_rcu_dereference(&dst_rq->curr); 1608 cur = task_rcu_dereference(&dst_rq->curr);
1574 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) 1609 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
@@ -1582,7 +1617,7 @@ static void task_numa_compare(struct task_numa_env *env,
1582 goto unlock; 1617 goto unlock;
1583 1618
1584 if (!cur) { 1619 if (!cur) {
1585 if (maymove || imp > env->best_imp) 1620 if (maymove && moveimp >= env->best_imp)
1586 goto assign; 1621 goto assign;
1587 else 1622 else
1588 goto unlock; 1623 goto unlock;
@@ -1625,16 +1660,22 @@ static void task_numa_compare(struct task_numa_env *env,
1625 task_weight(cur, env->dst_nid, dist); 1660 task_weight(cur, env->dst_nid, dist);
1626 } 1661 }
1627 1662
1628 if (imp <= env->best_imp)
1629 goto unlock;
1630
1631 if (maymove && moveimp > imp && moveimp > env->best_imp) { 1663 if (maymove && moveimp > imp && moveimp > env->best_imp) {
1632 imp = moveimp - 1; 1664 imp = moveimp;
1633 cur = NULL; 1665 cur = NULL;
1634 goto assign; 1666 goto assign;
1635 } 1667 }
1636 1668
1637 /* 1669 /*
1670 * If the NUMA importance is less than SMALLIMP,
1671 * task migration might only result in ping pong
1672 * of tasks and also hurt performance due to cache
1673 * misses.
1674 */
1675 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
1676 goto unlock;
1677
1678 /*
1638 * In the overloaded case, try and keep the load balanced. 1679 * In the overloaded case, try and keep the load balanced.
1639 */ 1680 */
1640 load = task_h_load(env->p) - task_h_load(cur); 1681 load = task_h_load(env->p) - task_h_load(cur);
@@ -1710,6 +1751,7 @@ static int task_numa_migrate(struct task_struct *p)
1710 .best_cpu = -1, 1751 .best_cpu = -1,
1711 }; 1752 };
1712 struct sched_domain *sd; 1753 struct sched_domain *sd;
1754 struct rq *best_rq;
1713 unsigned long taskweight, groupweight; 1755 unsigned long taskweight, groupweight;
1714 int nid, ret, dist; 1756 int nid, ret, dist;
1715 long taskimp, groupimp; 1757 long taskimp, groupimp;
@@ -1805,20 +1847,17 @@ static int task_numa_migrate(struct task_struct *p)
1805 if (env.best_cpu == -1) 1847 if (env.best_cpu == -1)
1806 return -EAGAIN; 1848 return -EAGAIN;
1807 1849
1808 /* 1850 best_rq = cpu_rq(env.best_cpu);
1809 * Reset the scan period if the task is being rescheduled on an
1810 * alternative node to recheck if the tasks is now properly placed.
1811 */
1812 p->numa_scan_period = task_scan_start(p);
1813
1814 if (env.best_task == NULL) { 1851 if (env.best_task == NULL) {
1815 ret = migrate_task_to(p, env.best_cpu); 1852 ret = migrate_task_to(p, env.best_cpu);
1853 WRITE_ONCE(best_rq->numa_migrate_on, 0);
1816 if (ret != 0) 1854 if (ret != 0)
1817 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); 1855 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1818 return ret; 1856 return ret;
1819 } 1857 }
1820 1858
1821 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); 1859 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
1860 WRITE_ONCE(best_rq->numa_migrate_on, 0);
1822 1861
1823 if (ret != 0) 1862 if (ret != 0)
1824 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); 1863 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
@@ -2596,6 +2635,39 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
2596 } 2635 }
2597} 2636}
2598 2637
2638static void update_scan_period(struct task_struct *p, int new_cpu)
2639{
2640 int src_nid = cpu_to_node(task_cpu(p));
2641 int dst_nid = cpu_to_node(new_cpu);
2642
2643 if (!static_branch_likely(&sched_numa_balancing))
2644 return;
2645
2646 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
2647 return;
2648
2649 if (src_nid == dst_nid)
2650 return;
2651
2652 /*
2653 * Allow resets if faults have been trapped before one scan
2654 * has completed. This is most likely due to a new task that
2655 * is pulled cross-node due to wakeups or load balancing.
2656 */
2657 if (p->numa_scan_seq) {
2658 /*
2659 * Avoid scan adjustments if moving to the preferred
2660 * node or if the task was not previously running on
2661 * the preferred node.
2662 */
2663 if (dst_nid == p->numa_preferred_nid ||
2664 (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
2665 return;
2666 }
2667
2668 p->numa_scan_period = task_scan_start(p);
2669}
2670
2599#else 2671#else
2600static void task_tick_numa(struct rq *rq, struct task_struct *curr) 2672static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2601{ 2673{
@@ -2609,6 +2681,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2609{ 2681{
2610} 2682}
2611 2683
2684static inline void update_scan_period(struct task_struct *p, int new_cpu)
2685{
2686}
2687
2612#endif /* CONFIG_NUMA_BALANCING */ 2688#endif /* CONFIG_NUMA_BALANCING */
2613 2689
2614static void 2690static void
@@ -6275,7 +6351,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
6275 * cfs_rq_of(p) references at time of call are still valid and identify the 6351 * cfs_rq_of(p) references at time of call are still valid and identify the
6276 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. 6352 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
6277 */ 6353 */
6278static void migrate_task_rq_fair(struct task_struct *p) 6354static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
6279{ 6355{
6280 /* 6356 /*
6281 * As blocked tasks retain absolute vruntime the migration needs to 6357 * As blocked tasks retain absolute vruntime the migration needs to
@@ -6328,6 +6404,8 @@ static void migrate_task_rq_fair(struct task_struct *p)
6328 6404
6329 /* We have migrated, no longer consider this task hot */ 6405 /* We have migrated, no longer consider this task hot */
6330 p->se.exec_start = 0; 6406 p->se.exec_start = 0;
6407
6408 update_scan_period(p, new_cpu);
6331} 6409}
6332 6410
6333static void task_dead_fair(struct task_struct *p) 6411static void task_dead_fair(struct task_struct *p)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4a2e8cae63c4..455fa330de04 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -783,6 +783,7 @@ struct rq {
783#ifdef CONFIG_NUMA_BALANCING 783#ifdef CONFIG_NUMA_BALANCING
784 unsigned int nr_numa_running; 784 unsigned int nr_numa_running;
785 unsigned int nr_preferred_running; 785 unsigned int nr_preferred_running;
786 unsigned int numa_migrate_on;
786#endif 787#endif
787 #define CPU_LOAD_IDX_MAX 5 788 #define CPU_LOAD_IDX_MAX 5
788 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 789 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
@@ -1523,7 +1524,7 @@ struct sched_class {
1523 1524
1524#ifdef CONFIG_SMP 1525#ifdef CONFIG_SMP
1525 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1526 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1526 void (*migrate_task_rq)(struct task_struct *p); 1527 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1527 1528
1528 void (*task_woken)(struct rq *this_rq, struct task_struct *task); 1529 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1529 1530
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 25a5d87e2e4c..912aae5fa09e 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,7 +15,6 @@
15 * but they are bigger and use more memory for the lookup table. 15 * but they are bigger and use more memory for the lookup table.
16 */ 16 */
17 17
18#include <linux/crc32poly.h>
19#include "xz_private.h" 18#include "xz_private.h"
20 19
21/* 20/*
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 482b90f363fe..09360ebb510e 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -102,6 +102,10 @@
102# endif 102# endif
103#endif 103#endif
104 104
105#ifndef CRC32_POLY_LE
106#define CRC32_POLY_LE 0xedb88320
107#endif
108
105/* 109/*
106 * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used 110 * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
107 * before calling xz_dec_lzma2_run(). 111 * before calling xz_dec_lzma2_run().
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 6a473709e9b6..7405c9d89d65 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
19 struct gup_benchmark *gup) 19 struct gup_benchmark *gup)
20{ 20{
21 ktime_t start_time, end_time; 21 ktime_t start_time, end_time;
22 unsigned long i, nr, nr_pages, addr, next; 22 unsigned long i, nr_pages, addr, next;
23 int nr;
23 struct page **pages; 24 struct page **pages;
24 25
25 nr_pages = gup->size / PAGE_SIZE; 26 nr_pages = gup->size / PAGE_SIZE;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 533f9b00147d..00704060b7f7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2931,7 +2931,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
2931 else 2931 else
2932 page_add_file_rmap(new, true); 2932 page_add_file_rmap(new, true);
2933 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); 2933 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
2934 if (vma->vm_flags & VM_LOCKED) 2934 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
2935 mlock_vma_page(new); 2935 mlock_vma_page(new);
2936 update_mmu_cache_pmd(vma, address, pvmw->pmd); 2936 update_mmu_cache_pmd(vma, address, pvmw->pmd);
2937} 2937}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3c21775f196b..5c390f5a5207 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3326,8 +3326,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3326 struct page *page; 3326 struct page *page;
3327 struct hstate *h = hstate_vma(vma); 3327 struct hstate *h = hstate_vma(vma);
3328 unsigned long sz = huge_page_size(h); 3328 unsigned long sz = huge_page_size(h);
3329 const unsigned long mmun_start = start; /* For mmu_notifiers */ 3329 unsigned long mmun_start = start; /* For mmu_notifiers */
3330 const unsigned long mmun_end = end; /* For mmu_notifiers */ 3330 unsigned long mmun_end = end; /* For mmu_notifiers */
3331 3331
3332 WARN_ON(!is_vm_hugetlb_page(vma)); 3332 WARN_ON(!is_vm_hugetlb_page(vma));
3333 BUG_ON(start & ~huge_page_mask(h)); 3333 BUG_ON(start & ~huge_page_mask(h));
@@ -3339,6 +3339,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3339 */ 3339 */
3340 tlb_remove_check_page_size_change(tlb, sz); 3340 tlb_remove_check_page_size_change(tlb, sz);
3341 tlb_start_vma(tlb, vma); 3341 tlb_start_vma(tlb, vma);
3342
3343 /*
3344 * If sharing possible, alert mmu notifiers of worst case.
3345 */
3346 adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
3342 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 3347 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3343 address = start; 3348 address = start;
3344 for (; address < end; address += sz) { 3349 for (; address < end; address += sz) {
@@ -3349,6 +3354,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3349 ptl = huge_pte_lock(h, mm, ptep); 3354 ptl = huge_pte_lock(h, mm, ptep);
3350 if (huge_pmd_unshare(mm, &address, ptep)) { 3355 if (huge_pmd_unshare(mm, &address, ptep)) {
3351 spin_unlock(ptl); 3356 spin_unlock(ptl);
3357 /*
3358 * We just unmapped a page of PMDs by clearing a PUD.
3359 * The caller's TLB flush range should cover this area.
3360 */
3352 continue; 3361 continue;
3353 } 3362 }
3354 3363
@@ -3431,12 +3440,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3431{ 3440{
3432 struct mm_struct *mm; 3441 struct mm_struct *mm;
3433 struct mmu_gather tlb; 3442 struct mmu_gather tlb;
3443 unsigned long tlb_start = start;
3444 unsigned long tlb_end = end;
3445
3446 /*
3447 * If shared PMDs were possibly used within this vma range, adjust
3448 * start/end for worst case tlb flushing.
3449 * Note that we can not be sure if PMDs are shared until we try to
3450 * unmap pages. However, we want to make sure TLB flushing covers
3451 * the largest possible range.
3452 */
3453 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3434 3454
3435 mm = vma->vm_mm; 3455 mm = vma->vm_mm;
3436 3456
3437 tlb_gather_mmu(&tlb, mm, start, end); 3457 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3438 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 3458 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3439 tlb_finish_mmu(&tlb, start, end); 3459 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3440} 3460}
3441 3461
3442/* 3462/*
@@ -4298,11 +4318,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4298 pte_t pte; 4318 pte_t pte;
4299 struct hstate *h = hstate_vma(vma); 4319 struct hstate *h = hstate_vma(vma);
4300 unsigned long pages = 0; 4320 unsigned long pages = 0;
4321 unsigned long f_start = start;
4322 unsigned long f_end = end;
4323 bool shared_pmd = false;
4324
4325 /*
4326 * In the case of shared PMDs, the area to flush could be beyond
4327 * start/end. Set f_start/f_end to cover the maximum possible
4328 * range if PMD sharing is possible.
4329 */
4330 adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
4301 4331
4302 BUG_ON(address >= end); 4332 BUG_ON(address >= end);
4303 flush_cache_range(vma, address, end); 4333 flush_cache_range(vma, f_start, f_end);
4304 4334
4305 mmu_notifier_invalidate_range_start(mm, start, end); 4335 mmu_notifier_invalidate_range_start(mm, f_start, f_end);
4306 i_mmap_lock_write(vma->vm_file->f_mapping); 4336 i_mmap_lock_write(vma->vm_file->f_mapping);
4307 for (; address < end; address += huge_page_size(h)) { 4337 for (; address < end; address += huge_page_size(h)) {
4308 spinlock_t *ptl; 4338 spinlock_t *ptl;
@@ -4313,6 +4343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4313 if (huge_pmd_unshare(mm, &address, ptep)) { 4343 if (huge_pmd_unshare(mm, &address, ptep)) {
4314 pages++; 4344 pages++;
4315 spin_unlock(ptl); 4345 spin_unlock(ptl);
4346 shared_pmd = true;
4316 continue; 4347 continue;
4317 } 4348 }
4318 pte = huge_ptep_get(ptep); 4349 pte = huge_ptep_get(ptep);
@@ -4348,9 +4379,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4348 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 4379 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4349 * may have cleared our pud entry and done put_page on the page table: 4380 * may have cleared our pud entry and done put_page on the page table:
4350 * once we release i_mmap_rwsem, another task can do the final put_page 4381 * once we release i_mmap_rwsem, another task can do the final put_page
4351 * and that page table be reused and filled with junk. 4382 * and that page table be reused and filled with junk. If we actually
4383 * did unshare a page of pmds, flush the range corresponding to the pud.
4352 */ 4384 */
4353 flush_hugetlb_tlb_range(vma, start, end); 4385 if (shared_pmd)
4386 flush_hugetlb_tlb_range(vma, f_start, f_end);
4387 else
4388 flush_hugetlb_tlb_range(vma, start, end);
4354 /* 4389 /*
4355 * No need to call mmu_notifier_invalidate_range() we are downgrading 4390 * No need to call mmu_notifier_invalidate_range() we are downgrading
4356 * page table protection not changing it to point to a new page. 4391 * page table protection not changing it to point to a new page.
@@ -4358,7 +4393,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4358 * See Documentation/vm/mmu_notifier.rst 4393 * See Documentation/vm/mmu_notifier.rst
4359 */ 4394 */
4360 i_mmap_unlock_write(vma->vm_file->f_mapping); 4395 i_mmap_unlock_write(vma->vm_file->f_mapping);
4361 mmu_notifier_invalidate_range_end(mm, start, end); 4396 mmu_notifier_invalidate_range_end(mm, f_start, f_end);
4362 4397
4363 return pages << h->order; 4398 return pages << h->order;
4364} 4399}
@@ -4545,13 +4580,41 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4545 /* 4580 /*
4546 * check on proper vm_flags and page table alignment 4581 * check on proper vm_flags and page table alignment
4547 */ 4582 */
4548 if (vma->vm_flags & VM_MAYSHARE && 4583 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4549 vma->vm_start <= base && end <= vma->vm_end)
4550 return true; 4584 return true;
4551 return false; 4585 return false;
4552} 4586}
4553 4587
4554/* 4588/*
4589 * Determine if start,end range within vma could be mapped by shared pmd.
4590 * If yes, adjust start and end to cover range associated with possible
4591 * shared pmd mappings.
4592 */
4593void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4594 unsigned long *start, unsigned long *end)
4595{
4596 unsigned long check_addr = *start;
4597
4598 if (!(vma->vm_flags & VM_MAYSHARE))
4599 return;
4600
4601 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4602 unsigned long a_start = check_addr & PUD_MASK;
4603 unsigned long a_end = a_start + PUD_SIZE;
4604
4605 /*
4606 * If sharing is possible, adjust start/end if necessary.
4607 */
4608 if (range_in_vma(vma, a_start, a_end)) {
4609 if (a_start < *start)
4610 *start = a_start;
4611 if (a_end > *end)
4612 *end = a_end;
4613 }
4614 }
4615}
4616
4617/*
4555 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4618 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4556 * and returns the corresponding pte. While this is not necessary for the 4619 * and returns the corresponding pte. While this is not necessary for the
4557 * !shared pmd case because we can allocate the pmd later as well, it makes the 4620 * !shared pmd case because we can allocate the pmd later as well, it makes the
@@ -4648,6 +4711,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4648{ 4711{
4649 return 0; 4712 return 0;
4650} 4713}
4714
4715void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4716 unsigned long *start, unsigned long *end)
4717{
4718}
4651#define want_pmd_share() (0) 4719#define want_pmd_share() (0)
4652#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 4720#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4653 4721
diff --git a/mm/madvise.c b/mm/madvise.c
index 972a9eaa898b..71d21df2a3f3 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
96 new_flags |= VM_DONTDUMP; 96 new_flags |= VM_DONTDUMP;
97 break; 97 break;
98 case MADV_DODUMP: 98 case MADV_DODUMP:
99 if (new_flags & VM_SPECIAL) { 99 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
100 error = -EINVAL; 100 error = -EINVAL;
101 goto out; 101 goto out;
102 } 102 }
diff --git a/mm/migrate.c b/mm/migrate.c
index d6a2e89b086a..84381b55b2bd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
275 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) 275 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
276 mlock_vma_page(new); 276 mlock_vma_page(new);
277 277
278 if (PageTransHuge(page) && PageMlocked(page))
279 clear_page_mlock(page);
280
278 /* No need to invalidate - it was non-present before */ 281 /* No need to invalidate - it was non-present before */
279 update_mmu_cache(vma, pvmw.address, pvmw.pte); 282 update_mmu_cache(vma, pvmw.address, pvmw.pte);
280 } 283 }
@@ -1411,7 +1414,7 @@ retry:
1411 * we encounter them after the rest of the list 1414 * we encounter them after the rest of the list
1412 * is processed. 1415 * is processed.
1413 */ 1416 */
1414 if (PageTransHuge(page)) { 1417 if (PageTransHuge(page) && !PageHuge(page)) {
1415 lock_page(page); 1418 lock_page(page);
1416 rc = split_huge_page_to_list(page, from); 1419 rc = split_huge_page_to_list(page, from);
1417 unlock_page(page); 1420 unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1855 return newpage; 1858 return newpage;
1856} 1859}
1857 1860
1858/*
1859 * page migration rate limiting control.
1860 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1861 * window of time. Default here says do not migrate more than 1280M per second.
1862 */
1863static unsigned int migrate_interval_millisecs __read_mostly = 100;
1864static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1865
1866/* Returns true if the node is migrate rate-limited after the update */
1867static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1868 unsigned long nr_pages)
1869{
1870 /*
1871 * Rate-limit the amount of data that is being migrated to a node.
1872 * Optimal placement is no good if the memory bus is saturated and
1873 * all the time is being spent migrating!
1874 */
1875 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1876 spin_lock(&pgdat->numabalancing_migrate_lock);
1877 pgdat->numabalancing_migrate_nr_pages = 0;
1878 pgdat->numabalancing_migrate_next_window = jiffies +
1879 msecs_to_jiffies(migrate_interval_millisecs);
1880 spin_unlock(&pgdat->numabalancing_migrate_lock);
1881 }
1882 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1883 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1884 nr_pages);
1885 return true;
1886 }
1887
1888 /*
1889 * This is an unlocked non-atomic update so errors are possible.
1890 * The consequences are failing to migrate when we potentiall should
1891 * have which is not severe enough to warrant locking. If it is ever
1892 * a problem, it can be converted to a per-cpu counter.
1893 */
1894 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1895 return false;
1896}
1897
1898static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) 1861static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1899{ 1862{
1900 int page_lru; 1863 int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1967 if (page_is_file_cache(page) && PageDirty(page)) 1930 if (page_is_file_cache(page) && PageDirty(page))
1968 goto out; 1931 goto out;
1969 1932
1970 /*
1971 * Rate-limit the amount of data that is being migrated to a node.
1972 * Optimal placement is no good if the memory bus is saturated and
1973 * all the time is being spent migrating!
1974 */
1975 if (numamigrate_update_ratelimit(pgdat, 1))
1976 goto out;
1977
1978 isolated = numamigrate_isolate_page(pgdat, page); 1933 isolated = numamigrate_isolate_page(pgdat, page);
1979 if (!isolated) 1934 if (!isolated)
1980 goto out; 1935 goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2021 unsigned long mmun_start = address & HPAGE_PMD_MASK; 1976 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2022 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE; 1977 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
2023 1978
2024 /*
2025 * Rate-limit the amount of data that is being migrated to a node.
2026 * Optimal placement is no good if the memory bus is saturated and
2027 * all the time is being spent migrating!
2028 */
2029 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
2030 goto out_dropref;
2031
2032 new_page = alloc_pages_node(node, 1979 new_page = alloc_pages_node(node,
2033 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE), 1980 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2034 HPAGE_PMD_ORDER); 1981 HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2125 2072
2126out_fail: 2073out_fail:
2127 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR); 2074 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2128out_dropref:
2129 ptl = pmd_lock(mm, pmd); 2075 ptl = pmd_lock(mm, pmd);
2130 if (pmd_same(*pmd, entry)) { 2076 if (pmd_same(*pmd, entry)) {
2131 entry = pmd_modify(entry, vma->vm_page_prot); 2077 entry = pmd_modify(entry, vma->vm_page_prot);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 89d2a2ab3fe6..706a738c0aee 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6197,8 +6197,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
6197static void pgdat_init_numabalancing(struct pglist_data *pgdat) 6197static void pgdat_init_numabalancing(struct pglist_data *pgdat)
6198{ 6198{
6199 spin_lock_init(&pgdat->numabalancing_migrate_lock); 6199 spin_lock_init(&pgdat->numabalancing_migrate_lock);
6200 pgdat->numabalancing_migrate_nr_pages = 0;
6201 pgdat->numabalancing_migrate_next_window = jiffies;
6202} 6200}
6203#else 6201#else
6204static void pgdat_init_numabalancing(struct pglist_data *pgdat) {} 6202static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
diff --git a/mm/rmap.c b/mm/rmap.c
index eb477809a5c0..1e79fac3186b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1362 } 1362 }
1363 1363
1364 /* 1364 /*
1365 * We have to assume the worse case ie pmd for invalidation. Note that 1365 * For THP, we have to assume the worse case ie pmd for invalidation.
1366 * the page can not be free in this function as call of try_to_unmap() 1366 * For hugetlb, it could be much worse if we need to do pud
1367 * must hold a reference on the page. 1367 * invalidation in the case of pmd sharing.
1368 *
1369 * Note that the page can not be free in this function as call of
1370 * try_to_unmap() must hold a reference on the page.
1368 */ 1371 */
1369 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); 1372 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1373 if (PageHuge(page)) {
1374 /*
1375 * If sharing is possible, start and end will be adjusted
1376 * accordingly.
1377 */
1378 adjust_range_if_pmd_sharing_possible(vma, &start, &end);
1379 }
1370 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end); 1380 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1371 1381
1372 while (page_vma_mapped_walk(&pvmw)) { 1382 while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1409 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1419 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1410 address = pvmw.address; 1420 address = pvmw.address;
1411 1421
1422 if (PageHuge(page)) {
1423 if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1424 /*
1425 * huge_pmd_unshare unmapped an entire PMD
1426 * page. There is no way of knowing exactly
1427 * which PMDs may be cached for this mm, so
1428 * we must flush them all. start/end were
1429 * already adjusted above to cover this range.
1430 */
1431 flush_cache_range(vma, start, end);
1432 flush_tlb_range(vma, start, end);
1433 mmu_notifier_invalidate_range(mm, start, end);
1434
1435 /*
1436 * The ref count of the PMD page was dropped
1437 * which is part of the way map counting
1438 * is done for shared PMDs. Return 'true'
1439 * here. When there is no other sharing,
1440 * huge_pmd_unshare returns false and we will
1441 * unmap the actual page and drop map count
1442 * to zero.
1443 */
1444 page_vma_mapped_walk_done(&pvmw);
1445 break;
1446 }
1447 }
1412 1448
1413 if (IS_ENABLED(CONFIG_MIGRATION) && 1449 if (IS_ENABLED(CONFIG_MIGRATION) &&
1414 (flags & TTU_MIGRATION) && 1450 (flags & TTU_MIGRATION) &&
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c7ce2c161225..c5ef7240cbcb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -580,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
580 struct mem_cgroup *memcg, int priority) 580 struct mem_cgroup *memcg, int priority)
581{ 581{
582 struct memcg_shrinker_map *map; 582 struct memcg_shrinker_map *map;
583 unsigned long freed = 0; 583 unsigned long ret, freed = 0;
584 int ret, i; 584 int i;
585 585
586 if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg)) 586 if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
587 return 0; 587 return 0;
@@ -677,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
677 struct mem_cgroup *memcg, 677 struct mem_cgroup *memcg,
678 int priority) 678 int priority)
679{ 679{
680 unsigned long ret, freed = 0;
680 struct shrinker *shrinker; 681 struct shrinker *shrinker;
681 unsigned long freed = 0;
682 int ret;
683 682
684 if (!mem_cgroup_is_root(memcg)) 683 if (!mem_cgroup_is_root(memcg))
685 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); 684 return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8ba0870ecddd..7878da76abf2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1275,6 +1275,9 @@ const char * const vmstat_text[] = {
1275#ifdef CONFIG_SMP 1275#ifdef CONFIG_SMP
1276 "nr_tlb_remote_flush", 1276 "nr_tlb_remote_flush",
1277 "nr_tlb_remote_flush_received", 1277 "nr_tlb_remote_flush_received",
1278#else
1279 "", /* nr_tlb_remote_flush */
1280 "", /* nr_tlb_remote_flush_received */
1278#endif /* CONFIG_SMP */ 1281#endif /* CONFIG_SMP */
1279 "nr_tlb_local_flush_all", 1282 "nr_tlb_local_flush_all",
1280 "nr_tlb_local_flush_one", 1283 "nr_tlb_local_flush_one",
@@ -1283,7 +1286,6 @@ const char * const vmstat_text[] = {
1283#ifdef CONFIG_DEBUG_VM_VMACACHE 1286#ifdef CONFIG_DEBUG_VM_VMACACHE
1284 "vmacache_find_calls", 1287 "vmacache_find_calls",
1285 "vmacache_find_hits", 1288 "vmacache_find_hits",
1286 "vmacache_full_flushes",
1287#endif 1289#endif
1288#ifdef CONFIG_SWAP 1290#ifdef CONFIG_SWAP
1289 "swap_ra", 1291 "swap_ra",
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3bdc8f3ca259..ccce954f8146 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2434 /* LE address type */ 2434 /* LE address type */
2435 addr_type = le_addr_type(cp->addr.type); 2435 addr_type = le_addr_type(cp->addr.type);
2436 2436
2437 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); 2437 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2438 2438 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2439 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2440 if (err < 0) { 2439 if (err < 0) {
2441 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 2440 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 MGMT_STATUS_NOT_PAIRED, &rp, 2441 MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2450 goto done; 2449 goto done;
2451 } 2450 }
2452 2451
2453 /* Abort any ongoing SMP pairing */
2454 smp_cancel_pairing(conn);
2455 2452
2456 /* Defer clearing up the connection parameters until closing to 2453 /* Defer clearing up the connection parameters until closing to
2457 * give a chance of keeping them if a repairing happens. 2454 * give a chance of keeping them if a repairing happens.
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3a7b0773536b..73f7211d0431 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2422,30 +2422,51 @@ unlock:
2422 return ret; 2422 return ret;
2423} 2423}
2424 2424
2425void smp_cancel_pairing(struct hci_conn *hcon) 2425int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
2426 u8 addr_type)
2426{ 2427{
2427 struct l2cap_conn *conn = hcon->l2cap_data; 2428 struct hci_conn *hcon;
2429 struct l2cap_conn *conn;
2428 struct l2cap_chan *chan; 2430 struct l2cap_chan *chan;
2429 struct smp_chan *smp; 2431 struct smp_chan *smp;
2432 int err;
2433
2434 err = hci_remove_ltk(hdev, bdaddr, addr_type);
2435 hci_remove_irk(hdev, bdaddr, addr_type);
2436
2437 hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
2438 if (!hcon)
2439 goto done;
2430 2440
2441 conn = hcon->l2cap_data;
2431 if (!conn) 2442 if (!conn)
2432 return; 2443 goto done;
2433 2444
2434 chan = conn->smp; 2445 chan = conn->smp;
2435 if (!chan) 2446 if (!chan)
2436 return; 2447 goto done;
2437 2448
2438 l2cap_chan_lock(chan); 2449 l2cap_chan_lock(chan);
2439 2450
2440 smp = chan->data; 2451 smp = chan->data;
2441 if (smp) { 2452 if (smp) {
2453 /* Set keys to NULL to make sure smp_failure() does not try to
2454 * remove and free already invalidated rcu list entries. */
2455 smp->ltk = NULL;
2456 smp->slave_ltk = NULL;
2457 smp->remote_irk = NULL;
2458
2442 if (test_bit(SMP_FLAG_COMPLETE, &smp->flags)) 2459 if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
2443 smp_failure(conn, 0); 2460 smp_failure(conn, 0);
2444 else 2461 else
2445 smp_failure(conn, SMP_UNSPECIFIED); 2462 smp_failure(conn, SMP_UNSPECIFIED);
2463 err = 0;
2446 } 2464 }
2447 2465
2448 l2cap_chan_unlock(chan); 2466 l2cap_chan_unlock(chan);
2467
2468done:
2469 return err;
2449} 2470}
2450 2471
2451static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) 2472static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 0ff6247eaa6c..121edadd5f8d 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -181,7 +181,8 @@ enum smp_key_pref {
181}; 181};
182 182
183/* SMP Commands */ 183/* SMP Commands */
184void smp_cancel_pairing(struct hci_conn *hcon); 184int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
185 u8 addr_type);
185bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, 186bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
186 enum smp_key_pref key_pref); 187 enum smp_key_pref key_pref);
187int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); 188int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index f0fc182d3db7..b64e1649993b 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -59,7 +59,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
59 req.is_set = is_set; 59 req.is_set = is_set;
60 req.pid = current->pid; 60 req.pid = current->pid;
61 req.cmd = optname; 61 req.cmd = optname;
62 req.addr = (long)optval; 62 req.addr = (long __force __user)optval;
63 req.len = optlen; 63 req.len = optlen;
64 mutex_lock(&bpfilter_lock); 64 mutex_lock(&bpfilter_lock);
65 if (!info.pid) 65 if (!info.pid)
@@ -98,7 +98,7 @@ static int __init load_umh(void)
98 pr_info("Loaded bpfilter_umh pid %d\n", info.pid); 98 pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
99 99
100 /* health check that usermode process started correctly */ 100 /* health check that usermode process started correctly */
101 if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) { 101 if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
102 stop_umh(); 102 stop_umh();
103 return -EFAULT; 103 return -EFAULT;
104 } 104 }
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 6e0dc6bcd32a..37278dc280eb 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
835 struct sk_buff *skb, 835 struct sk_buff *skb,
836 const struct nf_hook_state *state) 836 const struct nf_hook_state *state)
837{ 837{
838 if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) { 838 if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
839 !netif_is_l3_master(skb->dev)) {
839 state->okfn(state->net, state->sk, skb); 840 state->okfn(state->net, state->sk, skb);
840 return NF_STOLEN; 841 return NF_STOLEN;
841 } 842 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 234a0ec2e932..0762aaf8e964 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1483,6 +1483,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1483static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) 1483static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1484{ 1484{
1485 struct ethtool_wolinfo wol; 1485 struct ethtool_wolinfo wol;
1486 int ret;
1486 1487
1487 if (!dev->ethtool_ops->set_wol) 1488 if (!dev->ethtool_ops->set_wol)
1488 return -EOPNOTSUPP; 1489 return -EOPNOTSUPP;
@@ -1490,7 +1491,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1490 if (copy_from_user(&wol, useraddr, sizeof(wol))) 1491 if (copy_from_user(&wol, useraddr, sizeof(wol)))
1491 return -EFAULT; 1492 return -EFAULT;
1492 1493
1493 return dev->ethtool_ops->set_wol(dev, &wol); 1494 ret = dev->ethtool_ops->set_wol(dev, &wol);
1495 if (ret)
1496 return ret;
1497
1498 dev->wol_enabled = !!wol.wolopts;
1499
1500 return 0;
1494} 1501}
1495 1502
1496static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) 1503static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3219a2932463..de1d1ba92f2d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
135 } 135 }
136} 136}
137 137
138/*
139 * Check whether delayed processing was scheduled for our NIC. If so,
140 * we attempt to grab the poll lock and use ->poll() to pump the card.
141 * If this fails, either we've recursed in ->poll() or it's already
142 * running on another CPU.
143 *
144 * Note: we don't mask interrupts with this lock because we're using
145 * trylock here and interrupts are already disabled in the softirq
146 * case. Further, we test the poll_owner to avoid recursion on UP
147 * systems where the lock doesn't exist.
148 */
149static void poll_one_napi(struct napi_struct *napi) 138static void poll_one_napi(struct napi_struct *napi)
150{ 139{
151 int work = 0; 140 int work;
152
153 /* net_rx_action's ->poll() invocations and our's are
154 * synchronized by this test which is only made while
155 * holding the napi->poll_lock.
156 */
157 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
158 return;
159 141
160 /* If we set this bit but see that it has already been set, 142 /* If we set this bit but see that it has already been set,
161 * that indicates that napi has been disabled and we need 143 * that indicates that napi has been disabled and we need
@@ -330,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
330 /* It is up to the caller to keep npinfo alive. */ 312 /* It is up to the caller to keep npinfo alive. */
331 struct netpoll_info *npinfo; 313 struct netpoll_info *npinfo;
332 314
315 rcu_read_lock_bh();
333 lockdep_assert_irqs_disabled(); 316 lockdep_assert_irqs_disabled();
334 317
335 npinfo = rcu_dereference_bh(np->dev->npinfo); 318 npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -374,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
374 skb_queue_tail(&npinfo->txq, skb); 357 skb_queue_tail(&npinfo->txq, skb);
375 schedule_delayed_work(&npinfo->tx_work,0); 358 schedule_delayed_work(&npinfo->tx_work,0);
376 } 359 }
360 rcu_read_unlock_bh();
377} 361}
378EXPORT_SYMBOL(netpoll_send_skb_on_dev); 362EXPORT_SYMBOL(netpoll_send_skb_on_dev);
379 363
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 63ce2283a456..37c7936124e6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1898,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1898 if (tb[IFLA_IF_NETNSID]) { 1898 if (tb[IFLA_IF_NETNSID]) {
1899 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 1899 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1900 tgt_net = get_target_net(skb->sk, netnsid); 1900 tgt_net = get_target_net(skb->sk, netnsid);
1901 if (IS_ERR(tgt_net)) { 1901 if (IS_ERR(tgt_net))
1902 tgt_net = net; 1902 return PTR_ERR(tgt_net);
1903 netnsid = -1;
1904 }
1905 } 1903 }
1906 1904
1907 if (tb[IFLA_EXT_MASK]) 1905 if (tb[IFLA_EXT_MASK])
@@ -2837,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
2837 else if (ops->get_num_rx_queues) 2835 else if (ops->get_num_rx_queues)
2838 num_rx_queues = ops->get_num_rx_queues(); 2836 num_rx_queues = ops->get_num_rx_queues();
2839 2837
2838 if (num_tx_queues < 1 || num_tx_queues > 4096)
2839 return ERR_PTR(-EINVAL);
2840
2841 if (num_rx_queues < 1 || num_rx_queues > 4096)
2842 return ERR_PTR(-EINVAL);
2843
2840 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, 2844 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2841 ops->setup, num_tx_queues, num_rx_queues); 2845 ops->setup, num_tx_queues, num_rx_queues);
2842 if (!dev) 2846 if (!dev)
@@ -3744,16 +3748,27 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3744 int err = 0; 3748 int err = 0;
3745 int fidx = 0; 3749 int fidx = 0;
3746 3750
3747 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3751 /* A hack to preserve kernel<->userspace interface.
3748 IFLA_MAX, ifla_policy, NULL); 3752 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
3749 if (err < 0) { 3753 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
3750 return -EINVAL; 3754 * So, check for ndmsg with an optional u32 attribute (not used here).
3751 } else if (err == 0) { 3755 * Fortunately these sizes don't conflict with the size of ifinfomsg
3752 if (tb[IFLA_MASTER]) 3756 * with an optional attribute.
3753 br_idx = nla_get_u32(tb[IFLA_MASTER]); 3757 */
3754 } 3758 if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
3759 (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
3760 nla_attr_size(sizeof(u32)))) {
3761 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3762 IFLA_MAX, ifla_policy, NULL);
3763 if (err < 0) {
3764 return -EINVAL;
3765 } else if (err == 0) {
3766 if (tb[IFLA_MASTER])
3767 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3768 }
3755 3769
3756 brport_idx = ifm->ifi_index; 3770 brport_idx = ifm->ifi_index;
3771 }
3757 3772
3758 if (br_idx) { 3773 if (br_idx) {
3759 br_dev = __dev_get_by_index(net, br_idx); 3774 br_dev = __dev_get_by_index(net, br_idx);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index d28d46bff6ab..85d6c879383d 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
606 if (sk->sk_state == DCCP_LISTEN) { 606 if (sk->sk_state == DCCP_LISTEN) {
607 if (dh->dccph_type == DCCP_PKT_REQUEST) { 607 if (dh->dccph_type == DCCP_PKT_REQUEST) {
608 /* It is possible that we process SYN packets from backlog, 608 /* It is possible that we process SYN packets from backlog,
609 * so we need to make sure to disable BH right there. 609 * so we need to make sure to disable BH and RCU right there.
610 */ 610 */
611 rcu_read_lock();
611 local_bh_disable(); 612 local_bh_disable();
612 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; 613 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
613 local_bh_enable(); 614 local_bh_enable();
615 rcu_read_unlock();
614 if (!acceptable) 616 if (!acceptable)
615 return 1; 617 return 1;
616 consume_skb(skb); 618 consume_skb(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b08feb219b44..8e08cea6f178 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
493 493
494 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr, 494 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
495 ireq->ir_rmt_addr); 495 ireq->ir_rmt_addr);
496 rcu_read_lock();
496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 497 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
497 ireq->ir_rmt_addr, 498 ireq->ir_rmt_addr,
498 ireq_opt_deref(ireq)); 499 rcu_dereference(ireq->ireq_opt));
500 rcu_read_unlock();
499 err = net_xmit_eval(err); 501 err = net_xmit_eval(err);
500 } 502 }
501 503
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index dfd5009f96ef..15e7f7915a21 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
544 struct ip_options_rcu *opt; 544 struct ip_options_rcu *opt;
545 struct rtable *rt; 545 struct rtable *rt;
546 546
547 opt = ireq_opt_deref(ireq); 547 rcu_read_lock();
548 opt = rcu_dereference(ireq->ireq_opt);
548 549
549 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 550 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
550 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 551 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
558 goto no_route; 559 goto no_route;
559 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 560 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
560 goto route_err; 561 goto route_err;
562 rcu_read_unlock();
561 return &rt->dst; 563 return &rt->dst;
562 564
563route_err: 565route_err:
564 ip_rt_put(rt); 566 ip_rt_put(rt);
565no_route: 567no_route:
568 rcu_read_unlock();
566 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 569 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
567 return NULL; 570 return NULL;
568} 571}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c0fe5ad996f2..26c36cccabdc 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 struct sockaddr_in sin; 151 struct sockaddr_in sin;
152 const struct iphdr *iph = ip_hdr(skb);
153 __be16 *ports; 152 __be16 *ports;
154 int end; 153 int end;
155 154
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
164 ports = (__be16 *)skb_transport_header(skb); 163 ports = (__be16 *)skb_transport_header(skb);
165 164
166 sin.sin_family = AF_INET; 165 sin.sin_family = AF_INET;
167 sin.sin_addr.s_addr = iph->daddr; 166 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
168 sin.sin_port = ports[1]; 167 sin.sin_port = ports[1];
169 memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 168 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
170 169
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b92f422f2fa8..891ed2f91467 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
48static int ip_ping_group_range_min[] = { 0, 0 }; 48static int ip_ping_group_range_min[] = { 0, 0 };
49static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 49static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
50static int comp_sack_nr_max = 255; 50static int comp_sack_nr_max = 255;
51static u32 u32_max_div_HZ = UINT_MAX / HZ;
51 52
52/* obsolete */ 53/* obsolete */
53static int sysctl_tcp_low_latency __read_mostly; 54static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
745 { 746 {
746 .procname = "tcp_probe_interval", 747 .procname = "tcp_probe_interval",
747 .data = &init_net.ipv4.sysctl_tcp_probe_interval, 748 .data = &init_net.ipv4.sysctl_tcp_probe_interval,
748 .maxlen = sizeof(int), 749 .maxlen = sizeof(u32),
749 .mode = 0644, 750 .mode = 0644,
750 .proc_handler = proc_dointvec, 751 .proc_handler = proc_douintvec_minmax,
752 .extra2 = &u32_max_div_HZ,
751 }, 753 },
752 { 754 {
753 .procname = "igmp_link_local_mcast_reports", 755 .procname = "igmp_link_local_mcast_reports",
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4cf2f7bb2802..47e08c1b5bc3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
6009 if (th->fin) 6009 if (th->fin)
6010 goto discard; 6010 goto discard;
6011 /* It is possible that we process SYN packets from backlog, 6011 /* It is possible that we process SYN packets from backlog,
6012 * so we need to make sure to disable BH right there. 6012 * so we need to make sure to disable BH and RCU right there.
6013 */ 6013 */
6014 rcu_read_lock();
6014 local_bh_disable(); 6015 local_bh_disable();
6015 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; 6016 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
6016 local_bh_enable(); 6017 local_bh_enable();
6018 rcu_read_unlock();
6017 6019
6018 if (!acceptable) 6020 if (!acceptable)
6019 return 1; 6021 return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44c09eddbb78..cd426313a298 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
943 if (skb) { 943 if (skb) {
944 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 944 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
945 945
946 rcu_read_lock();
946 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 947 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
947 ireq->ir_rmt_addr, 948 ireq->ir_rmt_addr,
948 ireq_opt_deref(ireq)); 949 rcu_dereference(ireq->ireq_opt));
950 rcu_read_unlock();
949 err = net_xmit_eval(err); 951 err = net_xmit_eval(err);
950 } 952 }
951 953
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index bcfc00e88756..f8de2482a529 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
67 67
68 if (xo && (xo->flags & XFRM_GRO)) { 68 if (xo && (xo->flags & XFRM_GRO)) {
69 skb_mac_header_rebuild(skb); 69 skb_mac_header_rebuild(skb);
70 skb_reset_transport_header(skb);
70 return 0; 71 return 0;
71 } 72 }
72 73
diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
index 3d36644890bb..1ad2c2c4e250 100644
--- a/net/ipv4/xfrm4_mode_transport.c
+++ b/net/ipv4/xfrm4_mode_transport.c
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
46static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) 46static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
47{ 47{
48 int ihl = skb->data - skb_transport_header(skb); 48 int ihl = skb->data - skb_transport_header(skb);
49 struct xfrm_offload *xo = xfrm_offload(skb);
50 49
51 if (skb->transport_header != skb->network_header) { 50 if (skb->transport_header != skb->network_header) {
52 memmove(skb_transport_header(skb), 51 memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
54 skb->network_header = skb->transport_header; 53 skb->network_header = skb->transport_header;
55 } 54 }
56 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 55 ip_hdr(skb)->tot_len = htons(skb->len + ihl);
57 if (!xo || !(xo->flags & XFRM_GRO)) 56 skb_reset_transport_header(skb);
58 skb_reset_transport_header(skb);
59 return 0; 57 return 0;
60} 58}
61 59
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 413d98bf24f4..5e0efd3954e9 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -651,8 +651,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
651 skb->priority = sk->sk_priority; 651 skb->priority = sk->sk_priority;
652 skb->mark = sk->sk_mark; 652 skb->mark = sk->sk_mark;
653 skb->tstamp = sockc->transmit_time; 653 skb->tstamp = sockc->transmit_time;
654 skb_dst_set(skb, &rt->dst);
655 *dstp = NULL;
656 654
657 skb_put(skb, length); 655 skb_put(skb, length);
658 skb_reset_network_header(skb); 656 skb_reset_network_header(skb);
@@ -665,8 +663,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
665 663
666 skb->transport_header = skb->network_header; 664 skb->transport_header = skb->network_header;
667 err = memcpy_from_msg(iph, msg, length); 665 err = memcpy_from_msg(iph, msg, length);
668 if (err) 666 if (err) {
669 goto error_fault; 667 err = -EFAULT;
668 kfree_skb(skb);
669 goto error;
670 }
671
672 skb_dst_set(skb, &rt->dst);
673 *dstp = NULL;
670 674
671 /* if egress device is enslaved to an L3 master device pass the 675 /* if egress device is enslaved to an L3 master device pass the
672 * skb to its handler for processing 676 * skb to its handler for processing
@@ -675,21 +679,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
675 if (unlikely(!skb)) 679 if (unlikely(!skb))
676 return 0; 680 return 0;
677 681
682 /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
683 * in the error path. Since skb has been freed, the dst could
684 * have been queued for deletion.
685 */
686 rcu_read_lock();
678 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); 687 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
679 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, 688 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
680 NULL, rt->dst.dev, dst_output); 689 NULL, rt->dst.dev, dst_output);
681 if (err > 0) 690 if (err > 0)
682 err = net_xmit_errno(err); 691 err = net_xmit_errno(err);
683 if (err) 692 if (err) {
684 goto error; 693 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
694 rcu_read_unlock();
695 goto error_check;
696 }
697 rcu_read_unlock();
685out: 698out:
686 return 0; 699 return 0;
687 700
688error_fault:
689 err = -EFAULT;
690 kfree_skb(skb);
691error: 701error:
692 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 702 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
703error_check:
693 if (err == -ENOBUFS && !np->recverr) 704 if (err == -ENOBUFS && !np->recverr)
694 err = 0; 705 err = 0;
695 return err; 706 return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 826b14de7dbb..a366c05a239d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4321,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
4321 if (!nh) 4321 if (!nh)
4322 return -ENOMEM; 4322 return -ENOMEM;
4323 nh->fib6_info = rt; 4323 nh->fib6_info = rt;
4324 err = ip6_convert_metrics(net, rt, r_cfg);
4325 if (err) {
4326 kfree(nh);
4327 return err;
4328 }
4329 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 4324 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4330 list_add_tail(&nh->next, rt6_nh_list); 4325 list_add_tail(&nh->next, rt6_nh_list);
4331 4326
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 841f4a07438e..9ef490dddcea 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
59 59
60 if (xo && (xo->flags & XFRM_GRO)) { 60 if (xo && (xo->flags & XFRM_GRO)) {
61 skb_mac_header_rebuild(skb); 61 skb_mac_header_rebuild(skb);
62 skb_reset_transport_header(skb);
62 return -1; 63 return -1;
63 } 64 }
64 65
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 9ad07a91708e..3c29da5defe6 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
51static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) 51static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
52{ 52{
53 int ihl = skb->data - skb_transport_header(skb); 53 int ihl = skb->data - skb_transport_header(skb);
54 struct xfrm_offload *xo = xfrm_offload(skb);
55 54
56 if (skb->transport_header != skb->network_header) { 55 if (skb->transport_header != skb->network_header) {
57 memmove(skb_transport_header(skb), 56 memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
60 } 59 }
61 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 60 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
62 sizeof(struct ipv6hdr)); 61 sizeof(struct ipv6hdr));
63 if (!xo || !(xo->flags & XFRM_GRO)) 62 skb_reset_transport_header(skb);
64 skb_reset_transport_header(skb);
65 return 0; 63 return 0;
66} 64}
67 65
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5959ce9620eb..6a74080005cf 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
170 170
171 if (toobig && xfrm6_local_dontfrag(skb)) { 171 if (toobig && xfrm6_local_dontfrag(skb)) {
172 xfrm6_local_rxpmtu(skb, mtu); 172 xfrm6_local_rxpmtu(skb, mtu);
173 kfree_skb(skb);
173 return -EMSGSIZE; 174 return -EMSGSIZE;
174 } else if (!skb->ignore_df && toobig && skb->sk) { 175 } else if (!skb->ignore_df && toobig && skb->sk) {
175 xfrm_local_error(skb, mtu); 176 xfrm_local_error(skb, mtu);
177 kfree_skb(skb);
176 return -EMSGSIZE; 178 return -EMSGSIZE;
177 } 179 }
178 180
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d25da0e66da1..5d22eda8a6b1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -427,7 +427,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
427 case NL80211_IFTYPE_AP: 427 case NL80211_IFTYPE_AP:
428 case NL80211_IFTYPE_AP_VLAN: 428 case NL80211_IFTYPE_AP_VLAN:
429 /* Keys without a station are used for TX only */ 429 /* Keys without a station are used for TX only */
430 if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP)) 430 if (sta && test_sta_flag(sta, WLAN_STA_MFP))
431 key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; 431 key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
432 break; 432 break;
433 case NL80211_IFTYPE_ADHOC: 433 case NL80211_IFTYPE_ADHOC:
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5e6cf2cee965..5836ddeac9e3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1756 1756
1757 if (local->ops->wake_tx_queue && 1757 if (local->ops->wake_tx_queue &&
1758 type != NL80211_IFTYPE_AP_VLAN && 1758 type != NL80211_IFTYPE_AP_VLAN &&
1759 type != NL80211_IFTYPE_MONITOR) 1759 (type != NL80211_IFTYPE_MONITOR ||
1760 (params->flags & MONITOR_FLAG_ACTIVE)))
1760 txq_size += sizeof(struct txq_info) + 1761 txq_size += sizeof(struct txq_info) +
1761 local->hw.txq_data_size; 1762 local->hw.txq_data_size;
1762 1763
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ee56f18cad3f..21526630bf65 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
217int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 217int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
218void ieee80211s_init(void); 218void ieee80211s_init(void);
219void ieee80211s_update_metric(struct ieee80211_local *local, 219void ieee80211s_update_metric(struct ieee80211_local *local,
220 struct sta_info *sta, struct sk_buff *skb); 220 struct sta_info *sta,
221 struct ieee80211_tx_status *st);
221void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 222void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
222void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata); 223void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
223int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 224int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index daf9db3c8f24..6950cd0bf594 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
295} 295}
296 296
297void ieee80211s_update_metric(struct ieee80211_local *local, 297void ieee80211s_update_metric(struct ieee80211_local *local,
298 struct sta_info *sta, struct sk_buff *skb) 298 struct sta_info *sta,
299 struct ieee80211_tx_status *st)
299{ 300{
300 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); 301 struct ieee80211_tx_info *txinfo = st->info;
301 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
302 int failed; 302 int failed;
303 303
304 if (!ieee80211_is_data(hdr->frame_control))
305 return;
306
307 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); 304 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
308 305
309 /* moving average, scaled to 100. 306 /* moving average, scaled to 100.
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9a6d7208bf4f..91d7c0cd1882 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
479 if (!skb) 479 if (!skb)
480 return; 480 return;
481 481
482 if (dropped) {
483 dev_kfree_skb_any(skb);
484 return;
485 }
486
487 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 482 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
488 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; 483 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
489 struct ieee80211_sub_if_data *sdata; 484 struct ieee80211_sub_if_data *sdata;
@@ -507,6 +502,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
507 rcu_read_unlock(); 502 rcu_read_unlock();
508 503
509 dev_kfree_skb_any(skb); 504 dev_kfree_skb_any(skb);
505 } else if (dropped) {
506 dev_kfree_skb_any(skb);
510 } else { 507 } else {
511 /* consumes skb */ 508 /* consumes skb */
512 skb_complete_wifi_ack(skb, acked); 509 skb_complete_wifi_ack(skb, acked);
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
811 808
812 rate_control_tx_status(local, sband, status); 809 rate_control_tx_status(local, sband, status);
813 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) 810 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
814 ieee80211s_update_metric(local, sta, skb); 811 ieee80211s_update_metric(local, sta, status);
815 812
816 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked) 813 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
817 ieee80211_frame_acked(sta, skb); 814 ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
972 } 969 }
973 970
974 rate_control_tx_status(local, sband, status); 971 rate_control_tx_status(local, sband, status);
972 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
973 ieee80211s_update_metric(local, sta, status);
975 } 974 }
976 975
977 if (acked || noack_success) { 976 if (acked || noack_success) {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 5cd5e6e5834e..6c647f425e05 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -16,6 +16,7 @@
16#include "ieee80211_i.h" 16#include "ieee80211_i.h"
17#include "driver-ops.h" 17#include "driver-ops.h"
18#include "rate.h" 18#include "rate.h"
19#include "wme.h"
19 20
20/* give usermode some time for retries in setting up the TDLS session */ 21/* give usermode some time for retries in setting up the TDLS session */
21#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) 22#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
1010 switch (action_code) { 1011 switch (action_code) {
1011 case WLAN_TDLS_SETUP_REQUEST: 1012 case WLAN_TDLS_SETUP_REQUEST:
1012 case WLAN_TDLS_SETUP_RESPONSE: 1013 case WLAN_TDLS_SETUP_RESPONSE:
1013 skb_set_queue_mapping(skb, IEEE80211_AC_BK); 1014 skb->priority = 256 + 2;
1014 skb->priority = 2;
1015 break; 1015 break;
1016 default: 1016 default:
1017 skb_set_queue_mapping(skb, IEEE80211_AC_VI); 1017 skb->priority = 256 + 5;
1018 skb->priority = 5;
1019 break; 1018 break;
1020 } 1019 }
1020 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
1021 1021
1022 /* 1022 /*
1023 * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress. 1023 * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f353d9db54bc..25ba24bef8f5 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
214{ 214{
215 struct ieee80211_local *local = tx->local; 215 struct ieee80211_local *local = tx->local;
216 struct ieee80211_if_managed *ifmgd; 216 struct ieee80211_if_managed *ifmgd;
217 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
217 218
218 /* driver doesn't support power save */ 219 /* driver doesn't support power save */
219 if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) 220 if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
242 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) 243 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
243 return TX_CONTINUE; 244 return TX_CONTINUE;
244 245
246 if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
247 return TX_CONTINUE;
248
245 ifmgd = &tx->sdata->u.mgd; 249 ifmgd = &tx->sdata->u.mgd;
246 250
247 /* 251 /*
@@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1890 sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; 1894 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
1891 1895
1892 if (invoke_tx_handlers_early(&tx)) 1896 if (invoke_tx_handlers_early(&tx))
1893 return false; 1897 return true;
1894 1898
1895 if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) 1899 if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
1896 return true; 1900 return true;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index b4bdf9eda7b7..247b89784a6f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1213#define TCP_NLATTR_SIZE ( \ 1213#define TCP_NLATTR_SIZE ( \
1214 NLA_ALIGN(NLA_HDRLEN + 1) + \ 1214 NLA_ALIGN(NLA_HDRLEN + 1) + \
1215 NLA_ALIGN(NLA_HDRLEN + 1) + \ 1215 NLA_ALIGN(NLA_HDRLEN + 1) + \
1216 NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \ 1216 NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1217 NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags)))) 1217 NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1218 1218
1219static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) 1219static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1220{ 1220{
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 5af74b37f423..a35fb59ace73 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
49 49
50 priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]); 50 priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
51 err = nft_validate_register_store(ctx, priv->dreg, NULL, 51 err = nft_validate_register_store(ctx, priv->dreg, NULL,
52 NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN); 52 NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
53 if (err < 0) 53 if (err < 0)
54 return err; 54 return err;
55 55
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 55e2d9215c0d..0e5ec126f6ad 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -355,12 +355,11 @@ cont:
355 355
356static void nft_rbtree_gc(struct work_struct *work) 356static void nft_rbtree_gc(struct work_struct *work)
357{ 357{
358 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
358 struct nft_set_gc_batch *gcb = NULL; 359 struct nft_set_gc_batch *gcb = NULL;
359 struct rb_node *node, *prev = NULL;
360 struct nft_rbtree_elem *rbe;
361 struct nft_rbtree *priv; 360 struct nft_rbtree *priv;
361 struct rb_node *node;
362 struct nft_set *set; 362 struct nft_set *set;
363 int i;
364 363
365 priv = container_of(work, struct nft_rbtree, gc_work.work); 364 priv = container_of(work, struct nft_rbtree, gc_work.work);
366 set = nft_set_container_of(priv); 365 set = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
371 rbe = rb_entry(node, struct nft_rbtree_elem, node); 370 rbe = rb_entry(node, struct nft_rbtree_elem, node);
372 371
373 if (nft_rbtree_interval_end(rbe)) { 372 if (nft_rbtree_interval_end(rbe)) {
374 prev = node; 373 rbe_end = rbe;
375 continue; 374 continue;
376 } 375 }
377 if (!nft_set_elem_expired(&rbe->ext)) 376 if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
379 if (nft_set_elem_mark_busy(&rbe->ext)) 378 if (nft_set_elem_mark_busy(&rbe->ext))
380 continue; 379 continue;
381 380
381 if (rbe_prev) {
382 rb_erase(&rbe_prev->node, &priv->root);
383 rbe_prev = NULL;
384 }
382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); 385 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
383 if (!gcb) 386 if (!gcb)
384 break; 387 break;
385 388
386 atomic_dec(&set->nelems); 389 atomic_dec(&set->nelems);
387 nft_set_gc_batch_add(gcb, rbe); 390 nft_set_gc_batch_add(gcb, rbe);
391 rbe_prev = rbe;
388 392
389 if (prev) { 393 if (rbe_end) {
390 rbe = rb_entry(prev, struct nft_rbtree_elem, node);
391 atomic_dec(&set->nelems); 394 atomic_dec(&set->nelems);
392 nft_set_gc_batch_add(gcb, rbe); 395 nft_set_gc_batch_add(gcb, rbe_end);
393 prev = NULL; 396 rb_erase(&rbe_end->node, &priv->root);
397 rbe_end = NULL;
394 } 398 }
395 node = rb_next(node); 399 node = rb_next(node);
396 if (!node) 400 if (!node)
397 break; 401 break;
398 } 402 }
399 if (gcb) { 403 if (rbe_prev)
400 for (i = 0; i < gcb->head.cnt; i++) { 404 rb_erase(&rbe_prev->node, &priv->root);
401 rbe = gcb->elems[i];
402 rb_erase(&rbe->node, &priv->root);
403 }
404 }
405 write_seqcount_end(&priv->count); 405 write_seqcount_end(&priv->count);
406 write_unlock_bh(&priv->lock); 406 write_unlock_bh(&priv->lock);
407 407
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 0472f3472842..ada144e5645b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
56 struct sk_buff *pskb = (struct sk_buff *)skb; 56 struct sk_buff *pskb = (struct sk_buff *)skb;
57 struct sock *sk = skb->sk; 57 struct sock *sk = skb->sk;
58 58
59 if (!net_eq(xt_net(par), sock_net(sk))) 59 if (sk && !net_eq(xt_net(par), sock_net(sk)))
60 sk = NULL; 60 sk = NULL;
61 61
62 if (!sk) 62 if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
117 struct sk_buff *pskb = (struct sk_buff *)skb; 117 struct sk_buff *pskb = (struct sk_buff *)skb;
118 struct sock *sk = skb->sk; 118 struct sock *sk = skb->sk;
119 119
120 if (!net_eq(xt_net(par), sock_net(sk))) 120 if (sk && !net_eq(xt_net(par), sock_net(sk)))
121 sk = NULL; 121 sk = NULL;
122 122
123 if (!sk) 123 if (!sk)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 86a75105af1a..35ae64cbef33 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1312,6 +1312,10 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1312 1312
1313 rcu_assign_pointer(help->helper, helper); 1313 rcu_assign_pointer(help->helper, helper);
1314 info->helper = helper; 1314 info->helper = helper;
1315
1316 if (info->nat)
1317 request_module("ip_nat_%s", name);
1318
1315 return 0; 1319 return 0;
1316} 1320}
1317 1321
@@ -1624,10 +1628,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1624 OVS_NLERR(log, "Failed to allocate conntrack template"); 1628 OVS_NLERR(log, "Failed to allocate conntrack template");
1625 return -ENOMEM; 1629 return -ENOMEM;
1626 } 1630 }
1627
1628 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1629 nf_conntrack_get(&ct_info.ct->ct_general);
1630
1631 if (helper) { 1631 if (helper) {
1632 err = ovs_ct_add_helper(&ct_info, helper, key, log); 1632 err = ovs_ct_add_helper(&ct_info, helper, key, log);
1633 if (err) 1633 if (err)
@@ -1639,6 +1639,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1639 if (err) 1639 if (err)
1640 goto err_free_ct; 1640 goto err_free_ct;
1641 1641
1642 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1643 nf_conntrack_get(&ct_info.ct->ct_general);
1642 return 0; 1644 return 0;
1643err_free_ct: 1645err_free_ct:
1644 __ovs_ct_free_action(&ct_info); 1646 __ovs_ct_free_action(&ct_info);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 75c92a87e7b2..d6e94dc7e290 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2715,10 +2715,12 @@ tpacket_error:
2715 } 2715 }
2716 } 2716 }
2717 2717
2718 if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr, 2718 if (po->has_vnet_hdr) {
2719 vio_le())) { 2719 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2720 tp_len = -EINVAL; 2720 tp_len = -EINVAL;
2721 goto tpacket_error; 2721 goto tpacket_error;
2722 }
2723 virtio_net_hdr_set_proto(skb, vnet_hdr);
2722 } 2724 }
2723 2725
2724 skb->destructor = tpacket_destruct_skb; 2726 skb->destructor = tpacket_destruct_skb;
@@ -2915,6 +2917,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2915 if (err) 2917 if (err)
2916 goto out_free; 2918 goto out_free;
2917 len += sizeof(vnet_hdr); 2919 len += sizeof(vnet_hdr);
2920 virtio_net_hdr_set_proto(skb, &vnet_hdr);
2918 } 2921 }
2919 2922
2920 skb_probe_transport_header(skb, reserve); 2923 skb_probe_transport_header(skb, reserve);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c97558710421..ef9554131434 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
40struct rxrpc_connection; 40struct rxrpc_connection;
41 41
42/* 42/*
43 * Mark applied to socket buffers. 43 * Mark applied to socket buffers in skb->mark. skb->priority is used
44 * to pass supplementary information.
44 */ 45 */
45enum rxrpc_skb_mark { 46enum rxrpc_skb_mark {
46 RXRPC_SKB_MARK_DATA, /* data message */ 47 RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
47 RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ 48 RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
48 RXRPC_SKB_MARK_BUSY, /* server busy message */
49 RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
50 RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
51 RXRPC_SKB_MARK_NET_ERROR, /* network error message */
52 RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
53 RXRPC_SKB_MARK_NEW_CALL, /* local error message */
54}; 49};
55 50
56/* 51/*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
293 struct hlist_node hash_link; 288 struct hlist_node hash_link;
294 struct rxrpc_local *local; 289 struct rxrpc_local *local;
295 struct hlist_head error_targets; /* targets for net error distribution */ 290 struct hlist_head error_targets; /* targets for net error distribution */
296 struct work_struct error_distributor;
297 struct rb_root service_conns; /* Service connections */ 291 struct rb_root service_conns; /* Service connections */
298 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */ 292 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
299 time64_t last_tx_at; /* Last time packet sent here */ 293 time64_t last_tx_at; /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
304 unsigned int maxdata; /* data size (MTU - hdrsize) */ 298 unsigned int maxdata; /* data size (MTU - hdrsize) */
305 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 299 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
306 int debug_id; /* debug ID for printks */ 300 int debug_id; /* debug ID for printks */
307 int error_report; /* Net (+0) or local (+1000000) to distribute */
308#define RXRPC_LOCAL_ERROR_OFFSET 1000000
309 struct sockaddr_rxrpc srx; /* remote address */ 301 struct sockaddr_rxrpc srx; /* remote address */
310 302
311 /* calculated RTT cache */ 303 /* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
463 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 455 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
464}; 456};
465 457
458static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
459{
460 return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
461}
462
463static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
464{
465 return !rxrpc_to_server(sp);
466}
467
466/* 468/*
467 * Flags in call->flags. 469 * Flags in call->flags.
468 */ 470 */
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
717int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); 719int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
718void rxrpc_discard_prealloc(struct rxrpc_sock *); 720void rxrpc_discard_prealloc(struct rxrpc_sock *);
719struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, 721struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
722 struct rxrpc_sock *,
723 struct rxrpc_peer *,
720 struct rxrpc_connection *, 724 struct rxrpc_connection *,
721 struct sk_buff *); 725 struct sk_buff *);
722void rxrpc_accept_incoming_calls(struct rxrpc_local *); 726void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
908 912
909struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); 913struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
910struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, 914struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
911 struct sk_buff *); 915 struct sk_buff *,
916 struct rxrpc_peer **);
912void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); 917void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
913void rxrpc_disconnect_call(struct rxrpc_call *); 918void rxrpc_disconnect_call(struct rxrpc_call *);
914void rxrpc_kill_connection(struct rxrpc_connection *); 919void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
1031 * peer_event.c 1036 * peer_event.c
1032 */ 1037 */
1033void rxrpc_error_report(struct sock *); 1038void rxrpc_error_report(struct sock *);
1034void rxrpc_peer_error_distributor(struct work_struct *);
1035void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1039void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1036 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1040 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1037void rxrpc_peer_keepalive_worker(struct work_struct *); 1041void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
1044struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, 1048struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
1045 struct sockaddr_rxrpc *, gfp_t); 1049 struct sockaddr_rxrpc *, gfp_t);
1046struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); 1050struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1047struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *, 1051void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
1048 struct rxrpc_peer *);
1049void rxrpc_destroy_all_peers(struct rxrpc_net *); 1052void rxrpc_destroy_all_peers(struct rxrpc_net *);
1050struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1053struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1051struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); 1054struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1052void rxrpc_put_peer(struct rxrpc_peer *); 1055void rxrpc_put_peer(struct rxrpc_peer *);
1053void __rxrpc_queue_peer_error(struct rxrpc_peer *);
1054 1056
1055/* 1057/*
1056 * proc.c 1058 * proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9d1e298b784c..9c7f26d06a52 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
249 */ 249 */
250static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, 250static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
251 struct rxrpc_local *local, 251 struct rxrpc_local *local,
252 struct rxrpc_peer *peer,
252 struct rxrpc_connection *conn, 253 struct rxrpc_connection *conn,
253 struct sk_buff *skb) 254 struct sk_buff *skb)
254{ 255{
255 struct rxrpc_backlog *b = rx->backlog; 256 struct rxrpc_backlog *b = rx->backlog;
256 struct rxrpc_peer *peer, *xpeer;
257 struct rxrpc_call *call; 257 struct rxrpc_call *call;
258 unsigned short call_head, conn_head, peer_head; 258 unsigned short call_head, conn_head, peer_head;
259 unsigned short call_tail, conn_tail, peer_tail; 259 unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
276 return NULL; 276 return NULL;
277 277
278 if (!conn) { 278 if (!conn) {
279 /* No connection. We're going to need a peer to start off 279 if (peer && !rxrpc_get_peer_maybe(peer))
280 * with. If one doesn't yet exist, use a spare from the 280 peer = NULL;
281 * preallocation set. We dump the address into the spare in 281 if (!peer) {
282 * anticipation - and to save on stack space. 282 peer = b->peer_backlog[peer_tail];
283 */ 283 if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
284 xpeer = b->peer_backlog[peer_tail]; 284 return NULL;
285 if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
286 return NULL;
287
288 peer = rxrpc_lookup_incoming_peer(local, xpeer);
289 if (peer == xpeer) {
290 b->peer_backlog[peer_tail] = NULL; 285 b->peer_backlog[peer_tail] = NULL;
291 smp_store_release(&b->peer_backlog_tail, 286 smp_store_release(&b->peer_backlog_tail,
292 (peer_tail + 1) & 287 (peer_tail + 1) &
293 (RXRPC_BACKLOG_MAX - 1)); 288 (RXRPC_BACKLOG_MAX - 1));
289
290 rxrpc_new_incoming_peer(local, peer);
294 } 291 }
295 292
296 /* Now allocate and set up the connection */ 293 /* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
335 * The call is returned with the user access mutex held. 332 * The call is returned with the user access mutex held.
336 */ 333 */
337struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 334struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
335 struct rxrpc_sock *rx,
336 struct rxrpc_peer *peer,
338 struct rxrpc_connection *conn, 337 struct rxrpc_connection *conn,
339 struct sk_buff *skb) 338 struct sk_buff *skb)
340{ 339{
341 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 340 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
342 struct rxrpc_sock *rx;
343 struct rxrpc_call *call; 341 struct rxrpc_call *call;
344 u16 service_id = sp->hdr.serviceId;
345 342
346 _enter(""); 343 _enter("");
347 344
348 /* Get the socket providing the service */
349 rx = rcu_dereference(local->service);
350 if (rx && (service_id == rx->srx.srx_service ||
351 service_id == rx->second_service))
352 goto found_service;
353
354 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
355 RX_INVALID_OPERATION, EOPNOTSUPP);
356 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
357 skb->priority = RX_INVALID_OPERATION;
358 _leave(" = NULL [service]");
359 return NULL;
360
361found_service:
362 spin_lock(&rx->incoming_lock); 345 spin_lock(&rx->incoming_lock);
363 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || 346 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
364 rx->sk.sk_state == RXRPC_CLOSE) { 347 rx->sk.sk_state == RXRPC_CLOSE) {
365 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, 348 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
366 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); 349 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
367 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 350 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
368 skb->priority = RX_INVALID_OPERATION; 351 skb->priority = RX_INVALID_OPERATION;
369 _leave(" = NULL [close]"); 352 _leave(" = NULL [close]");
370 call = NULL; 353 call = NULL;
371 goto out; 354 goto out;
372 } 355 }
373 356
374 call = rxrpc_alloc_incoming_call(rx, local, conn, skb); 357 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
375 if (!call) { 358 if (!call) {
376 skb->mark = RXRPC_SKB_MARK_BUSY; 359 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
377 _leave(" = NULL [busy]"); 360 _leave(" = NULL [busy]");
378 call = NULL; 361 call = NULL;
379 goto out; 362 goto out;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9486293fef5c..799f75b6900d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
400 rcu_assign_pointer(conn->channels[chan].call, call); 400 rcu_assign_pointer(conn->channels[chan].call, call);
401 401
402 spin_lock(&conn->params.peer->lock); 402 spin_lock(&conn->params.peer->lock);
403 hlist_add_head(&call->error_link, &conn->params.peer->error_targets); 403 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
404 spin_unlock(&conn->params.peer->lock); 404 spin_unlock(&conn->params.peer->lock);
405 405
406 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 406 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index f8f37188a932..8acf74fe24c0 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
710 } 710 }
711 711
712 spin_lock_bh(&call->conn->params.peer->lock); 712 spin_lock_bh(&call->conn->params.peer->lock);
713 hlist_add_head(&call->error_link, 713 hlist_add_head_rcu(&call->error_link,
714 &call->conn->params.peer->error_targets); 714 &call->conn->params.peer->error_targets);
715 spin_unlock_bh(&call->conn->params.peer->lock); 715 spin_unlock_bh(&call->conn->params.peer->lock);
716 716
717out: 717out:
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 77440a356b14..885dae829f4a 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
69 * If successful, a pointer to the connection is returned, but no ref is taken. 69 * If successful, a pointer to the connection is returned, but no ref is taken.
70 * NULL is returned if there is no match. 70 * NULL is returned if there is no match.
71 * 71 *
72 * When searching for a service call, if we find a peer but no connection, we
73 * return that through *_peer in case we need to create a new service call.
74 *
72 * The caller must be holding the RCU read lock. 75 * The caller must be holding the RCU read lock.
73 */ 76 */
74struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, 77struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
75 struct sk_buff *skb) 78 struct sk_buff *skb,
79 struct rxrpc_peer **_peer)
76{ 80{
77 struct rxrpc_connection *conn; 81 struct rxrpc_connection *conn;
78 struct rxrpc_conn_proto k; 82 struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
85 if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0) 89 if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
86 goto not_found; 90 goto not_found;
87 91
88 k.epoch = sp->hdr.epoch;
89 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
90
91 /* We may have to handle mixing IPv4 and IPv6 */ 92 /* We may have to handle mixing IPv4 and IPv6 */
92 if (srx.transport.family != local->srx.transport.family) { 93 if (srx.transport.family != local->srx.transport.family) {
93 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", 94 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
99 k.epoch = sp->hdr.epoch; 100 k.epoch = sp->hdr.epoch;
100 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 101 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
101 102
102 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { 103 if (rxrpc_to_server(sp)) {
103 /* We need to look up service connections by the full protocol 104 /* We need to look up service connections by the full protocol
104 * parameter set. We look up the peer first as an intermediate 105 * parameter set. We look up the peer first as an intermediate
105 * step and then the connection from the peer's tree. 106 * step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
107 peer = rxrpc_lookup_peer_rcu(local, &srx); 108 peer = rxrpc_lookup_peer_rcu(local, &srx);
108 if (!peer) 109 if (!peer)
109 goto not_found; 110 goto not_found;
111 *_peer = peer;
110 conn = rxrpc_find_service_conn_rcu(peer, skb); 112 conn = rxrpc_find_service_conn_rcu(peer, skb);
111 if (!conn || atomic_read(&conn->usage) == 0) 113 if (!conn || atomic_read(&conn->usage) == 0)
112 goto not_found; 114 goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
214 call->peer->cong_cwnd = call->cong_cwnd; 216 call->peer->cong_cwnd = call->cong_cwnd;
215 217
216 spin_lock_bh(&conn->params.peer->lock); 218 spin_lock_bh(&conn->params.peer->lock);
217 hlist_del_init(&call->error_link); 219 hlist_del_rcu(&call->error_link);
218 spin_unlock_bh(&conn->params.peer->lock); 220 spin_unlock_bh(&conn->params.peer->lock);
219 221
220 if (rxrpc_is_client_call(call)) 222 if (rxrpc_is_client_call(call))
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index cfdc199c6351..800f5b8a1baa 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
622 if (!skb) 622 if (!skb)
623 continue; 623 continue;
624 624
625 sent_at = skb->tstamp;
626 smp_rmb(); /* Read timestamp before serial. */
625 sp = rxrpc_skb(skb); 627 sp = rxrpc_skb(skb);
626 if (sp->hdr.serial != orig_serial) 628 if (sp->hdr.serial != orig_serial)
627 continue; 629 continue;
628 smp_rmb();
629 sent_at = skb->tstamp;
630 goto found; 630 goto found;
631 } 631 }
632
632 return; 633 return;
633 634
634found: 635found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
1124{ 1125{
1125 struct rxrpc_connection *conn; 1126 struct rxrpc_connection *conn;
1126 struct rxrpc_channel *chan; 1127 struct rxrpc_channel *chan;
1127 struct rxrpc_call *call; 1128 struct rxrpc_call *call = NULL;
1128 struct rxrpc_skb_priv *sp; 1129 struct rxrpc_skb_priv *sp;
1129 struct rxrpc_local *local = udp_sk->sk_user_data; 1130 struct rxrpc_local *local = udp_sk->sk_user_data;
1131 struct rxrpc_peer *peer = NULL;
1132 struct rxrpc_sock *rx = NULL;
1130 struct sk_buff *skb; 1133 struct sk_buff *skb;
1131 unsigned int channel; 1134 unsigned int channel;
1132 int ret, skew; 1135 int ret, skew = 0;
1133 1136
1134 _enter("%p", udp_sk); 1137 _enter("%p", udp_sk);
1135 1138
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
1143 return; 1146 return;
1144 } 1147 }
1145 1148
1149 if (skb->tstamp == 0)
1150 skb->tstamp = ktime_get_real();
1151
1146 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1152 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1147 1153
1148 _net("recv skb %p", skb); 1154 _net("recv skb %p", skb);
@@ -1177,46 +1183,75 @@ void rxrpc_data_ready(struct sock *udp_sk)
1177 1183
1178 trace_rxrpc_rx_packet(sp); 1184 trace_rxrpc_rx_packet(sp);
1179 1185
1180 _net("Rx RxRPC %s ep=%x call=%x:%x",
1181 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
1182 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
1183
1184 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
1185 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
1186 _proto("Rx Bad Packet Type %u", sp->hdr.type);
1187 goto bad_message;
1188 }
1189
1190 switch (sp->hdr.type) { 1186 switch (sp->hdr.type) {
1191 case RXRPC_PACKET_TYPE_VERSION: 1187 case RXRPC_PACKET_TYPE_VERSION:
1192 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) 1188 if (rxrpc_to_client(sp))
1193 goto discard; 1189 goto discard;
1194 rxrpc_post_packet_to_local(local, skb); 1190 rxrpc_post_packet_to_local(local, skb);
1195 goto out; 1191 goto out;
1196 1192
1197 case RXRPC_PACKET_TYPE_BUSY: 1193 case RXRPC_PACKET_TYPE_BUSY:
1198 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) 1194 if (rxrpc_to_server(sp))
1199 goto discard; 1195 goto discard;
1200 /* Fall through */ 1196 /* Fall through */
1197 case RXRPC_PACKET_TYPE_ACK:
1198 case RXRPC_PACKET_TYPE_ACKALL:
1199 if (sp->hdr.callNumber == 0)
1200 goto bad_message;
1201 /* Fall through */
1202 case RXRPC_PACKET_TYPE_ABORT:
1203 break;
1201 1204
1202 case RXRPC_PACKET_TYPE_DATA: 1205 case RXRPC_PACKET_TYPE_DATA:
1203 if (sp->hdr.callNumber == 0) 1206 if (sp->hdr.callNumber == 0 ||
1207 sp->hdr.seq == 0)
1204 goto bad_message; 1208 goto bad_message;
1205 if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1209 if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
1206 !rxrpc_validate_jumbo(skb)) 1210 !rxrpc_validate_jumbo(skb))
1207 goto bad_message; 1211 goto bad_message;
1208 break; 1212 break;
1209 1213
1214 case RXRPC_PACKET_TYPE_CHALLENGE:
1215 if (rxrpc_to_server(sp))
1216 goto discard;
1217 break;
1218 case RXRPC_PACKET_TYPE_RESPONSE:
1219 if (rxrpc_to_client(sp))
1220 goto discard;
1221 break;
1222
1210 /* Packet types 9-11 should just be ignored. */ 1223 /* Packet types 9-11 should just be ignored. */
1211 case RXRPC_PACKET_TYPE_PARAMS: 1224 case RXRPC_PACKET_TYPE_PARAMS:
1212 case RXRPC_PACKET_TYPE_10: 1225 case RXRPC_PACKET_TYPE_10:
1213 case RXRPC_PACKET_TYPE_11: 1226 case RXRPC_PACKET_TYPE_11:
1214 goto discard; 1227 goto discard;
1228
1229 default:
1230 _proto("Rx Bad Packet Type %u", sp->hdr.type);
1231 goto bad_message;
1215 } 1232 }
1216 1233
1234 if (sp->hdr.serviceId == 0)
1235 goto bad_message;
1236
1217 rcu_read_lock(); 1237 rcu_read_lock();
1218 1238
1219 conn = rxrpc_find_connection_rcu(local, skb); 1239 if (rxrpc_to_server(sp)) {
1240 /* Weed out packets to services we're not offering. Packets
1241 * that would begin a call are explicitly rejected and the rest
1242 * are just discarded.
1243 */
1244 rx = rcu_dereference(local->service);
1245 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
1246 sp->hdr.serviceId != rx->second_service)) {
1247 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
1248 sp->hdr.seq == 1)
1249 goto unsupported_service;
1250 goto discard_unlock;
1251 }
1252 }
1253
1254 conn = rxrpc_find_connection_rcu(local, skb, &peer);
1220 if (conn) { 1255 if (conn) {
1221 if (sp->hdr.securityIndex != conn->security_ix) 1256 if (sp->hdr.securityIndex != conn->security_ix)
1222 goto wrong_security; 1257 goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
1280 call = rcu_dereference(chan->call); 1315 call = rcu_dereference(chan->call);
1281 1316
1282 if (sp->hdr.callNumber > chan->call_id) { 1317 if (sp->hdr.callNumber > chan->call_id) {
1283 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) { 1318 if (rxrpc_to_client(sp)) {
1284 rcu_read_unlock(); 1319 rcu_read_unlock();
1285 goto reject_packet; 1320 goto reject_packet;
1286 } 1321 }
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
1297 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags)) 1332 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1298 set_bit(RXRPC_CALL_RX_HEARD, &call->flags); 1333 set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
1299 } 1334 }
1300 } else {
1301 skew = 0;
1302 call = NULL;
1303 } 1335 }
1304 1336
1305 if (!call || atomic_read(&call->usage) == 0) { 1337 if (!call || atomic_read(&call->usage) == 0) {
1306 if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) || 1338 if (rxrpc_to_client(sp) ||
1307 sp->hdr.callNumber == 0 ||
1308 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1339 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1309 goto bad_message_unlock; 1340 goto bad_message_unlock;
1310 if (sp->hdr.seq != 1) 1341 if (sp->hdr.seq != 1)
1311 goto discard_unlock; 1342 goto discard_unlock;
1312 call = rxrpc_new_incoming_call(local, conn, skb); 1343 call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
1313 if (!call) { 1344 if (!call) {
1314 rcu_read_unlock(); 1345 rcu_read_unlock();
1315 goto reject_packet; 1346 goto reject_packet;
@@ -1340,6 +1371,13 @@ wrong_security:
1340 skb->priority = RXKADINCONSISTENCY; 1371 skb->priority = RXKADINCONSISTENCY;
1341 goto post_abort; 1372 goto post_abort;
1342 1373
1374unsupported_service:
1375 rcu_read_unlock();
1376 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1377 RX_INVALID_OPERATION, EOPNOTSUPP);
1378 skb->priority = RX_INVALID_OPERATION;
1379 goto post_abort;
1380
1343reupgrade: 1381reupgrade:
1344 rcu_read_unlock(); 1382 rcu_read_unlock();
1345 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1383 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ bad_message:
1354protocol_error: 1392protocol_error:
1355 skb->priority = RX_PROTOCOL_ERROR; 1393 skb->priority = RX_PROTOCOL_ERROR;
1356post_abort: 1394post_abort:
1357 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 1395 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
1358reject_packet: 1396reject_packet:
1359 trace_rxrpc_rx_done(skb->mark, skb->priority); 1397 trace_rxrpc_rx_done(skb->mark, skb->priority);
1360 rxrpc_reject_packet(local, skb); 1398 rxrpc_reject_packet(local, skb);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 777c3ed4cfc0..94d234e9c685 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
135 } 135 }
136 136
137 switch (local->srx.transport.family) { 137 switch (local->srx.transport.family) {
138 case AF_INET: 138 case AF_INET6:
139 /* we want to receive ICMP errors */ 139 /* we want to receive ICMPv6 errors */
140 opt = 1; 140 opt = 1;
141 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, 141 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
142 (char *) &opt, sizeof(opt)); 142 (char *) &opt, sizeof(opt));
143 if (ret < 0) { 143 if (ret < 0) {
144 _debug("setsockopt failed"); 144 _debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
146 } 146 }
147 147
148 /* we want to set the don't fragment bit */ 148 /* we want to set the don't fragment bit */
149 opt = IP_PMTUDISC_DO; 149 opt = IPV6_PMTUDISC_DO;
150 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, 150 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
151 (char *) &opt, sizeof(opt)); 151 (char *) &opt, sizeof(opt));
152 if (ret < 0) { 152 if (ret < 0) {
153 _debug("setsockopt failed"); 153 _debug("setsockopt failed");
154 goto error; 154 goto error;
155 } 155 }
156 break;
157 156
158 case AF_INET6: 157 /* Fall through and set IPv4 options too otherwise we don't get
158 * errors from IPv4 packets sent through the IPv6 socket.
159 */
160
161 case AF_INET:
159 /* we want to receive ICMP errors */ 162 /* we want to receive ICMP errors */
160 opt = 1; 163 opt = 1;
161 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, 164 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
162 (char *) &opt, sizeof(opt)); 165 (char *) &opt, sizeof(opt));
163 if (ret < 0) { 166 if (ret < 0) {
164 _debug("setsockopt failed"); 167 _debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
166 } 169 }
167 170
168 /* we want to set the don't fragment bit */ 171 /* we want to set the don't fragment bit */
169 opt = IPV6_PMTUDISC_DO; 172 opt = IP_PMTUDISC_DO;
170 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, 173 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
171 (char *) &opt, sizeof(opt)); 174 (char *) &opt, sizeof(opt));
172 if (ret < 0) { 175 if (ret < 0) {
173 _debug("setsockopt failed"); 176 _debug("setsockopt failed");
174 goto error; 177 goto error;
175 } 178 }
179
180 /* We want receive timestamps. */
181 opt = 1;
182 ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
183 (char *)&opt, sizeof(opt));
184 if (ret < 0) {
185 _debug("setsockopt failed");
186 goto error;
187 }
176 break; 188 break;
177 189
178 default: 190 default:
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index ccf5de160444..e8fb8922bca8 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
124 struct kvec iov[2]; 124 struct kvec iov[2];
125 rxrpc_serial_t serial; 125 rxrpc_serial_t serial;
126 rxrpc_seq_t hard_ack, top; 126 rxrpc_seq_t hard_ack, top;
127 ktime_t now;
128 size_t len, n; 127 size_t len, n;
129 int ret; 128 int ret;
130 u8 reason; 129 u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
196 /* We need to stick a time in before we send the packet in case 195 /* We need to stick a time in before we send the packet in case
197 * the reply gets back before kernel_sendmsg() completes - but 196 * the reply gets back before kernel_sendmsg() completes - but
198 * asking UDP to send the packet can take a relatively long 197 * asking UDP to send the packet can take a relatively long
199 * time, so we update the time after, on the assumption that 198 * time.
200 * the packet transmission is more likely to happen towards the
201 * end of the kernel_sendmsg() call.
202 */ 199 */
203 call->ping_time = ktime_get_real(); 200 call->ping_time = ktime_get_real();
204 set_bit(RXRPC_CALL_PINGING, &call->flags); 201 set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
206 } 203 }
207 204
208 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 205 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
209 now = ktime_get_real();
210 if (ping)
211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_seconds(); 206 conn->params.peer->last_tx_at = ktime_get_seconds();
213 if (ret < 0) 207 if (ret < 0)
214 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 208 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
363 357
364 /* If our RTT cache needs working on, request an ACK. Also request 358 /* If our RTT cache needs working on, request an ACK. Also request
365 * ACKs if a DATA packet appears to have been lost. 359 * ACKs if a DATA packet appears to have been lost.
360 *
361 * However, we mustn't request an ACK on the last reply packet of a
362 * service call, lest OpenAFS incorrectly send us an ACK with some
363 * soft-ACKs in it and then never follow up with a proper hard ACK.
366 */ 364 */
367 if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && 365 if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
366 rxrpc_to_server(sp)
367 ) &&
368 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || 368 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
369 retrans || 369 retrans ||
370 call->cong_mode == RXRPC_CALL_SLOW_START || 370 call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
390 goto send_fragmentable; 390 goto send_fragmentable;
391 391
392 down_read(&conn->params.local->defrag_sem); 392 down_read(&conn->params.local->defrag_sem);
393
394 sp->hdr.serial = serial;
395 smp_wmb(); /* Set serial before timestamp */
396 skb->tstamp = ktime_get_real();
397
393 /* send the packet by UDP 398 /* send the packet by UDP
394 * - returns -EMSGSIZE if UDP would have to fragment the packet 399 * - returns -EMSGSIZE if UDP would have to fragment the packet
395 * to go out of the interface 400 * to go out of the interface
@@ -413,12 +418,8 @@ done:
413 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, 418 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
414 retrans, lost); 419 retrans, lost);
415 if (ret >= 0) { 420 if (ret >= 0) {
416 ktime_t now = ktime_get_real();
417 skb->tstamp = now;
418 smp_wmb();
419 sp->hdr.serial = serial;
420 if (whdr.flags & RXRPC_REQUEST_ACK) { 421 if (whdr.flags & RXRPC_REQUEST_ACK) {
421 call->peer->rtt_last_req = now; 422 call->peer->rtt_last_req = skb->tstamp;
422 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 423 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
423 if (call->peer->rtt_usage > 1) { 424 if (call->peer->rtt_usage > 1) {
424 unsigned long nowj = jiffies, ack_lost_at; 425 unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
457 458
458 down_write(&conn->params.local->defrag_sem); 459 down_write(&conn->params.local->defrag_sem);
459 460
461 sp->hdr.serial = serial;
462 smp_wmb(); /* Set serial before timestamp */
463 skb->tstamp = ktime_get_real();
464
460 switch (conn->params.local->srx.transport.family) { 465 switch (conn->params.local->srx.transport.family) {
461 case AF_INET: 466 case AF_INET:
462 opt = IP_PMTUDISC_DONT; 467 opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
519 struct kvec iov[2]; 524 struct kvec iov[2];
520 size_t size; 525 size_t size;
521 __be32 code; 526 __be32 code;
522 int ret; 527 int ret, ioc;
523 528
524 _enter("%d", local->debug_id); 529 _enter("%d", local->debug_id);
525 530
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
527 iov[0].iov_len = sizeof(whdr); 532 iov[0].iov_len = sizeof(whdr);
528 iov[1].iov_base = &code; 533 iov[1].iov_base = &code;
529 iov[1].iov_len = sizeof(code); 534 iov[1].iov_len = sizeof(code);
530 size = sizeof(whdr) + sizeof(code);
531 535
532 msg.msg_name = &srx.transport; 536 msg.msg_name = &srx.transport;
533 msg.msg_control = NULL; 537 msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
535 msg.msg_flags = 0; 539 msg.msg_flags = 0;
536 540
537 memset(&whdr, 0, sizeof(whdr)); 541 memset(&whdr, 0, sizeof(whdr));
538 whdr.type = RXRPC_PACKET_TYPE_ABORT;
539 542
540 while ((skb = skb_dequeue(&local->reject_queue))) { 543 while ((skb = skb_dequeue(&local->reject_queue))) {
541 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 544 rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
542 sp = rxrpc_skb(skb); 545 sp = rxrpc_skb(skb);
543 546
547 switch (skb->mark) {
548 case RXRPC_SKB_MARK_REJECT_BUSY:
549 whdr.type = RXRPC_PACKET_TYPE_BUSY;
550 size = sizeof(whdr);
551 ioc = 1;
552 break;
553 case RXRPC_SKB_MARK_REJECT_ABORT:
554 whdr.type = RXRPC_PACKET_TYPE_ABORT;
555 code = htonl(skb->priority);
556 size = sizeof(whdr) + sizeof(code);
557 ioc = 2;
558 break;
559 default:
560 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
561 continue;
562 }
563
544 if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) { 564 if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
545 msg.msg_namelen = srx.transport_len; 565 msg.msg_namelen = srx.transport_len;
546 566
547 code = htonl(skb->priority);
548
549 whdr.epoch = htonl(sp->hdr.epoch); 567 whdr.epoch = htonl(sp->hdr.epoch);
550 whdr.cid = htonl(sp->hdr.cid); 568 whdr.cid = htonl(sp->hdr.cid);
551 whdr.callNumber = htonl(sp->hdr.callNumber); 569 whdr.callNumber = htonl(sp->hdr.callNumber);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 4f9da2f51c69..f3e6fc670da2 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -23,6 +23,8 @@
23#include "ar-internal.h" 23#include "ar-internal.h"
24 24
25static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); 25static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
26static void rxrpc_distribute_error(struct rxrpc_peer *, int,
27 enum rxrpc_call_completion);
26 28
27/* 29/*
28 * Find the peer associated with an ICMP packet. 30 * Find the peer associated with an ICMP packet.
@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
194 rcu_read_unlock(); 196 rcu_read_unlock();
195 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 197 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
196 198
197 /* The ref we obtained is passed off to the work item */
198 __rxrpc_queue_peer_error(peer);
199 _leave(""); 199 _leave("");
200} 200}
201 201
@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
205static void rxrpc_store_error(struct rxrpc_peer *peer, 205static void rxrpc_store_error(struct rxrpc_peer *peer,
206 struct sock_exterr_skb *serr) 206 struct sock_exterr_skb *serr)
207{ 207{
208 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
208 struct sock_extended_err *ee; 209 struct sock_extended_err *ee;
209 int err; 210 int err;
210 211
@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
255 case SO_EE_ORIGIN_NONE: 256 case SO_EE_ORIGIN_NONE:
256 case SO_EE_ORIGIN_LOCAL: 257 case SO_EE_ORIGIN_LOCAL:
257 _proto("Rx Received local error { error=%d }", err); 258 _proto("Rx Received local error { error=%d }", err);
258 err += RXRPC_LOCAL_ERROR_OFFSET; 259 compl = RXRPC_CALL_LOCAL_ERROR;
259 break; 260 break;
260 261
261 case SO_EE_ORIGIN_ICMP6: 262 case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
264 break; 265 break;
265 } 266 }
266 267
267 peer->error_report = err; 268 rxrpc_distribute_error(peer, err, compl);
268} 269}
269 270
270/* 271/*
271 * Distribute an error that occurred on a peer 272 * Distribute an error that occurred on a peer.
272 */ 273 */
273void rxrpc_peer_error_distributor(struct work_struct *work) 274static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
275 enum rxrpc_call_completion compl)
274{ 276{
275 struct rxrpc_peer *peer =
276 container_of(work, struct rxrpc_peer, error_distributor);
277 struct rxrpc_call *call; 277 struct rxrpc_call *call;
278 enum rxrpc_call_completion compl;
279 int error;
280
281 _enter("");
282
283 error = READ_ONCE(peer->error_report);
284 if (error < RXRPC_LOCAL_ERROR_OFFSET) {
285 compl = RXRPC_CALL_NETWORK_ERROR;
286 } else {
287 compl = RXRPC_CALL_LOCAL_ERROR;
288 error -= RXRPC_LOCAL_ERROR_OFFSET;
289 }
290 278
291 _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error); 279 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
292
293 spin_lock_bh(&peer->lock);
294
295 while (!hlist_empty(&peer->error_targets)) {
296 call = hlist_entry(peer->error_targets.first,
297 struct rxrpc_call, error_link);
298 hlist_del_init(&call->error_link);
299 rxrpc_see_call(call); 280 rxrpc_see_call(call);
300 281 if (call->state < RXRPC_CALL_COMPLETE &&
301 if (rxrpc_set_call_completion(call, compl, 0, -error)) 282 rxrpc_set_call_completion(call, compl, 0, -error))
302 rxrpc_notify_socket(call); 283 rxrpc_notify_socket(call);
303 } 284 }
304
305 spin_unlock_bh(&peer->lock);
306
307 rxrpc_put_peer(peer);
308 _leave("");
309} 285}
310 286
311/* 287/*
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1dc7648e3eff..01a9febfa367 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
124 struct rxrpc_net *rxnet = local->rxnet; 124 struct rxrpc_net *rxnet = local->rxnet;
125 125
126 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { 126 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
127 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) { 127 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
128 if (atomic_read(&peer->usage) == 0) 128 atomic_read(&peer->usage) > 0)
129 return NULL;
130 return peer; 129 return peer;
131 }
132 } 130 }
133 131
134 return NULL; 132 return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
222 atomic_set(&peer->usage, 1); 220 atomic_set(&peer->usage, 1);
223 peer->local = local; 221 peer->local = local;
224 INIT_HLIST_HEAD(&peer->error_targets); 222 INIT_HLIST_HEAD(&peer->error_targets);
225 INIT_WORK(&peer->error_distributor,
226 &rxrpc_peer_error_distributor);
227 peer->service_conns = RB_ROOT; 223 peer->service_conns = RB_ROOT;
228 seqlock_init(&peer->service_conn_lock); 224 seqlock_init(&peer->service_conn_lock);
229 spin_lock_init(&peer->lock); 225 spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
299} 295}
300 296
301/* 297/*
302 * Set up a new incoming peer. The address is prestored in the preallocated 298 * Set up a new incoming peer. There shouldn't be any other matching peers
303 * peer. 299 * since we've already done a search in the list from the non-reentrant context
300 * (the data_ready handler) that is the only place we can add new peers.
304 */ 301 */
305struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, 302void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
306 struct rxrpc_peer *prealloc)
307{ 303{
308 struct rxrpc_peer *peer;
309 struct rxrpc_net *rxnet = local->rxnet; 304 struct rxrpc_net *rxnet = local->rxnet;
310 unsigned long hash_key; 305 unsigned long hash_key;
311 306
312 hash_key = rxrpc_peer_hash_key(local, &prealloc->srx); 307 hash_key = rxrpc_peer_hash_key(local, &peer->srx);
313 prealloc->local = local; 308 peer->local = local;
314 rxrpc_init_peer(prealloc, hash_key); 309 rxrpc_init_peer(peer, hash_key);
315 310
316 spin_lock(&rxnet->peer_hash_lock); 311 spin_lock(&rxnet->peer_hash_lock);
317 312 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
318 /* Need to check that we aren't racing with someone else */ 313 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
319 peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
320 if (peer && !rxrpc_get_peer_maybe(peer))
321 peer = NULL;
322 if (!peer) {
323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
326 }
327
328 spin_unlock(&rxnet->peer_hash_lock); 314 spin_unlock(&rxnet->peer_hash_lock);
329 return peer;
330} 315}
331 316
332/* 317/*
@@ -416,21 +401,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
416} 401}
417 402
418/* 403/*
419 * Queue a peer record. This passes the caller's ref to the workqueue.
420 */
421void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
422{
423 const void *here = __builtin_return_address(0);
424 int n;
425
426 n = atomic_read(&peer->usage);
427 if (rxrpc_queue_work(&peer->error_distributor))
428 trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
429 else
430 rxrpc_put_peer(peer);
431}
432
433/*
434 * Discard a peer record. 404 * Discard a peer record.
435 */ 405 */
436static void __rxrpc_put_peer(struct rxrpc_peer *peer) 406static void __rxrpc_put_peer(struct rxrpc_peer *peer)
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 93da73bf7098..f9cb83c938f3 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
50#define RXRPC_PACKET_TYPE_10 10 /* Ignored */ 50#define RXRPC_PACKET_TYPE_10 10 /* Ignored */
51#define RXRPC_PACKET_TYPE_11 11 /* Ignored */ 51#define RXRPC_PACKET_TYPE_11 11 /* Ignored */
52#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */ 52#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
53#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
54 53
55 uint8_t flags; /* packet flags */ 54 uint8_t flags; /* packet flags */
56#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */ 55#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
72 71
73} __packed; 72} __packed;
74 73
75#define RXRPC_SUPPORTED_PACKET_TYPES ( \
76 (1 << RXRPC_PACKET_TYPE_DATA) | \
77 (1 << RXRPC_PACKET_TYPE_ACK) | \
78 (1 << RXRPC_PACKET_TYPE_BUSY) | \
79 (1 << RXRPC_PACKET_TYPE_ABORT) | \
80 (1 << RXRPC_PACKET_TYPE_ACKALL) | \
81 (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
82 (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
83 /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
84 (1 << RXRPC_PACKET_TYPE_PARAMS) | \
85 (1 << RXRPC_PACKET_TYPE_10) | \
86 (1 << RXRPC_PACKET_TYPE_11) | \
87 (1 << RXRPC_PACKET_TYPE_VERSION))
88
89/*****************************************************************************/ 74/*****************************************************************************/
90/* 75/*
91 * jumbo packet secondary header 76 * jumbo packet secondary header
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 23273b5303fd..8525de811616 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
135 } 135 }
136 136
137 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 137 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
138 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { 138 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
139 if (exists) 139 if (exists)
140 tcf_idr_release(*a, bind); 140 tcf_idr_release(*a, bind);
141 else 141 else
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 98541c6399db..85e73f48e48f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1311,6 +1311,18 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1311 * Delete/get qdisc. 1311 * Delete/get qdisc.
1312 */ 1312 */
1313 1313
1314const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1315 [TCA_KIND] = { .type = NLA_STRING },
1316 [TCA_OPTIONS] = { .type = NLA_NESTED },
1317 [TCA_RATE] = { .type = NLA_BINARY,
1318 .len = sizeof(struct tc_estimator) },
1319 [TCA_STAB] = { .type = NLA_NESTED },
1320 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1321 [TCA_CHAIN] = { .type = NLA_U32 },
1322 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1323 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1324};
1325
1314static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1326static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1315 struct netlink_ext_ack *extack) 1327 struct netlink_ext_ack *extack)
1316{ 1328{
@@ -1327,7 +1339,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1327 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1339 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1328 return -EPERM; 1340 return -EPERM;
1329 1341
1330 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); 1342 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1343 extack);
1331 if (err < 0) 1344 if (err < 0)
1332 return err; 1345 return err;
1333 1346
@@ -1411,7 +1424,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1411 1424
1412replay: 1425replay:
1413 /* Reinit, just in case something touches this. */ 1426 /* Reinit, just in case something touches this. */
1414 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); 1427 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1428 extack);
1415 if (err < 0) 1429 if (err < 0)
1416 return err; 1430 return err;
1417 1431
@@ -1645,7 +1659,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1645 idx = 0; 1659 idx = 0;
1646 ASSERT_RTNL(); 1660 ASSERT_RTNL();
1647 1661
1648 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL); 1662 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1663 rtm_tca_policy, NULL);
1649 if (err < 0) 1664 if (err < 0)
1650 return err; 1665 return err;
1651 1666
@@ -1864,7 +1879,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1864 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1879 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1865 return -EPERM; 1880 return -EPERM;
1866 1881
1867 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack); 1882 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1883 extack);
1868 if (err < 0) 1884 if (err < 0)
1869 return err; 1885 return err;
1870 1886
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d74d00b29942..42191ed9902b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
1048 if (!ctx->packet || !ctx->packet->has_cookie_echo) 1048 if (!ctx->packet || !ctx->packet->has_cookie_echo)
1049 return; 1049 return;
1050 1050
1051 /* fallthru */ 1051 /* fall through */
1052 case SCTP_STATE_ESTABLISHED: 1052 case SCTP_STATE_ESTABLISHED:
1053 case SCTP_STATE_SHUTDOWN_PENDING: 1053 case SCTP_STATE_SHUTDOWN_PENDING:
1054 case SCTP_STATE_SHUTDOWN_RECEIVED: 1054 case SCTP_STATE_SHUTDOWN_RECEIVED:
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 418f03d0be90..645c16052052 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
609 609
610 switch (evt) { 610 switch (evt) {
611 case NETDEV_CHANGE: 611 case NETDEV_CHANGE:
612 if (netif_carrier_ok(dev)) 612 if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
613 test_and_set_bit_lock(0, &b->up);
613 break; 614 break;
614 /* else: fall through */ 615 }
615 case NETDEV_UP: 616 /* fall through */
616 test_and_set_bit_lock(0, &b->up);
617 break;
618 case NETDEV_GOING_DOWN: 617 case NETDEV_GOING_DOWN:
619 clear_bit_unlock(0, &b->up); 618 clear_bit_unlock(0, &b->up);
620 tipc_reset_bearer(net, b); 619 tipc_reset_bearer(net, b);
621 break; 620 break;
621 case NETDEV_UP:
622 test_and_set_bit_lock(0, &b->up);
623 break;
622 case NETDEV_CHANGEMTU: 624 case NETDEV_CHANGEMTU:
623 if (tipc_mtu_bad(dev, 0)) { 625 if (tipc_mtu_bad(dev, 0)) {
624 bearer_disable(net, b); 626 bearer_disable(net, b);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b1f0bee54eac..fb886b525d95 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
410 return l->name; 410 return l->name;
411} 411}
412 412
413u32 tipc_link_state(struct tipc_link *l)
414{
415 return l->state;
416}
417
413/** 418/**
414 * tipc_link_create - create a new link 419 * tipc_link_create - create a new link
415 * @n: pointer to associated node 420 * @n: pointer to associated node
@@ -841,9 +846,14 @@ void tipc_link_reset(struct tipc_link *l)
841 l->in_session = false; 846 l->in_session = false;
842 l->session++; 847 l->session++;
843 l->mtu = l->advertised_mtu; 848 l->mtu = l->advertised_mtu;
849 spin_lock_bh(&l->wakeupq.lock);
850 spin_lock_bh(&l->inputq->lock);
851 skb_queue_splice_init(&l->wakeupq, l->inputq);
852 spin_unlock_bh(&l->inputq->lock);
853 spin_unlock_bh(&l->wakeupq.lock);
854
844 __skb_queue_purge(&l->transmq); 855 __skb_queue_purge(&l->transmq);
845 __skb_queue_purge(&l->deferdq); 856 __skb_queue_purge(&l->deferdq);
846 skb_queue_splice_init(&l->wakeupq, l->inputq);
847 __skb_queue_purge(&l->backlogq); 857 __skb_queue_purge(&l->backlogq);
848 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 858 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
849 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 859 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1380,6 +1390,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1380 __skb_queue_tail(xmitq, skb); 1390 __skb_queue_tail(xmitq, skb);
1381} 1391}
1382 1392
1393void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1394 struct sk_buff_head *xmitq)
1395{
1396 u32 onode = tipc_own_addr(l->net);
1397 struct tipc_msg *hdr, *ihdr;
1398 struct sk_buff_head tnlq;
1399 struct sk_buff *skb;
1400 u32 dnode = l->addr;
1401
1402 skb_queue_head_init(&tnlq);
1403 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1404 INT_H_SIZE, BASIC_H_SIZE,
1405 dnode, onode, 0, 0, 0);
1406 if (!skb) {
1407 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1408 return;
1409 }
1410
1411 hdr = buf_msg(skb);
1412 msg_set_msgcnt(hdr, 1);
1413 msg_set_bearer_id(hdr, l->peer_bearer_id);
1414
1415 ihdr = (struct tipc_msg *)msg_data(hdr);
1416 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1417 BASIC_H_SIZE, dnode);
1418 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1419 __skb_queue_tail(&tnlq, skb);
1420 tipc_link_xmit(l, &tnlq, xmitq);
1421}
1422
1383/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1423/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1384 * with contents of the link's transmit and backlog queues. 1424 * with contents of the link's transmit and backlog queues.
1385 */ 1425 */
@@ -1476,6 +1516,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1476 return false; 1516 return false;
1477 if (session != curr_session) 1517 if (session != curr_session)
1478 return false; 1518 return false;
1519 /* Extra sanity check */
1520 if (!link_is_up(l) && msg_ack(hdr))
1521 return false;
1479 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) 1522 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1480 return true; 1523 return true;
1481 /* Accept only STATE with new sequence number */ 1524 /* Accept only STATE with new sequence number */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 7bc494a33fdf..90488c538a4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
88 struct tipc_link **link); 88 struct tipc_link **link);
89void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 89void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
90 int mtyp, struct sk_buff_head *xmitq); 90 int mtyp, struct sk_buff_head *xmitq);
91void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
92 struct sk_buff_head *xmitq);
91void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq); 93void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
92int tipc_link_fsm_evt(struct tipc_link *l, int evt); 94int tipc_link_fsm_evt(struct tipc_link *l, int evt);
93bool tipc_link_is_up(struct tipc_link *l); 95bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
107u16 tipc_link_acked(struct tipc_link *l); 109u16 tipc_link_acked(struct tipc_link *l);
108u32 tipc_link_id(struct tipc_link *l); 110u32 tipc_link_id(struct tipc_link *l);
109char *tipc_link_name(struct tipc_link *l); 111char *tipc_link_name(struct tipc_link *l);
112u32 tipc_link_state(struct tipc_link *l);
110char tipc_link_plane(struct tipc_link *l); 113char tipc_link_plane(struct tipc_link *l);
111int tipc_link_prio(struct tipc_link *l); 114int tipc_link_prio(struct tipc_link *l);
112int tipc_link_window(struct tipc_link *l); 115int tipc_link_window(struct tipc_link *l);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 68014f1b6976..2afc4f8c37a7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -111,6 +111,7 @@ struct tipc_node {
111 int action_flags; 111 int action_flags;
112 struct list_head list; 112 struct list_head list;
113 int state; 113 int state;
114 bool failover_sent;
114 u16 sync_point; 115 u16 sync_point;
115 int link_cnt; 116 int link_cnt;
116 u16 working_links; 117 u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
680 *slot0 = bearer_id; 681 *slot0 = bearer_id;
681 *slot1 = bearer_id; 682 *slot1 = bearer_id;
682 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 683 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
684 n->failover_sent = false;
683 n->action_flags |= TIPC_NOTIFY_NODE_UP; 685 n->action_flags |= TIPC_NOTIFY_NODE_UP;
684 tipc_link_set_active(nl, true); 686 tipc_link_set_active(nl, true);
685 tipc_bcast_add_peer(n->net, nl, xmitq); 687 tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
911 bool reset = true; 913 bool reset = true;
912 char *if_name; 914 char *if_name;
913 unsigned long intv; 915 unsigned long intv;
916 u16 session;
914 917
915 *dupl_addr = false; 918 *dupl_addr = false;
916 *respond = false; 919 *respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
997 goto exit; 1000 goto exit;
998 1001
999 if_name = strchr(b->name, ':') + 1; 1002 if_name = strchr(b->name, ':') + 1;
1003 get_random_bytes(&session, sizeof(u16));
1000 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1004 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1001 b->net_plane, b->mtu, b->priority, 1005 b->net_plane, b->mtu, b->priority,
1002 b->window, mod(tipc_net(net)->random), 1006 b->window, session,
1003 tipc_own_addr(net), addr, peer_id, 1007 tipc_own_addr(net), addr, peer_id,
1004 n->capabilities, 1008 n->capabilities,
1005 tipc_bc_sndlink(n->net), n->bc_entry.link, 1009 tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1615 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1619 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1616 tipc_link_inputq(l)); 1620 tipc_link_inputq(l));
1617 } 1621 }
1622 /* If parallel link was already down, and this happened before
1623 * the tunnel link came up, FAILOVER was never sent. Ensure that
1624 * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
1625 */
1626 if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
1627 tipc_link_create_dummy_tnl_msg(l, xmitq);
1628 n->failover_sent = true;
1629 }
1618 /* If pkts arrive out of order, use lowest calculated syncpt */ 1630 /* If pkts arrive out of order, use lowest calculated syncpt */
1619 if (less(syncpt, n->sync_point)) 1631 if (less(syncpt, n->sync_point))
1620 n->sync_point = syncpt; 1632 n->sync_point = syncpt;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3f03ddd0e35b..b6f99b021d09 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1419,8 +1419,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1419 /* Handle implicit connection setup */ 1419 /* Handle implicit connection setup */
1420 if (unlikely(dest)) { 1420 if (unlikely(dest)) {
1421 rc = __tipc_sendmsg(sock, m, dlen); 1421 rc = __tipc_sendmsg(sock, m, dlen);
1422 if (dlen && (dlen == rc)) 1422 if (dlen && dlen == rc) {
1423 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1423 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1424 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1425 }
1424 return rc; 1426 return rc;
1425 } 1427 }
1426 1428
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4b8ec659e797..176edfefcbaa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
3756 return false; 3756 return false;
3757 3757
3758 /* check availability */ 3758 /* check availability */
3759 ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
3759 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit) 3760 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
3760 mcs[ridx] |= rbit; 3761 mcs[ridx] |= rbit;
3761 else 3762 else
@@ -10230,7 +10231,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
10230 struct wireless_dev *wdev = dev->ieee80211_ptr; 10231 struct wireless_dev *wdev = dev->ieee80211_ptr;
10231 s32 last, low, high; 10232 s32 last, low, high;
10232 u32 hyst; 10233 u32 hyst;
10233 int i, n; 10234 int i, n, low_index;
10234 int err; 10235 int err;
10235 10236
10236 /* RSSI reporting disabled? */ 10237 /* RSSI reporting disabled? */
@@ -10267,10 +10268,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
10267 if (last < wdev->cqm_config->rssi_thresholds[i]) 10268 if (last < wdev->cqm_config->rssi_thresholds[i])
10268 break; 10269 break;
10269 10270
10270 low = i > 0 ? 10271 low_index = i - 1;
10271 (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN; 10272 if (low_index >= 0) {
10272 high = i < n ? 10273 low_index = array_index_nospec(low_index, n);
10273 (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX; 10274 low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
10275 } else {
10276 low = S32_MIN;
10277 }
10278 if (i < n) {
10279 i = array_index_nospec(i, n);
10280 high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
10281 } else {
10282 high = S32_MAX;
10283 }
10274 10284
10275 return rdev_set_cqm_rssi_range_config(rdev, dev, low, high); 10285 return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
10276} 10286}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2f702adf2912..24cfa2776f50 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2661,11 +2661,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
2661{ 2661{
2662 struct wiphy *wiphy = NULL; 2662 struct wiphy *wiphy = NULL;
2663 enum reg_request_treatment treatment; 2663 enum reg_request_treatment treatment;
2664 enum nl80211_reg_initiator initiator = reg_request->initiator;
2664 2665
2665 if (reg_request->wiphy_idx != WIPHY_IDX_INVALID) 2666 if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
2666 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); 2667 wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
2667 2668
2668 switch (reg_request->initiator) { 2669 switch (initiator) {
2669 case NL80211_REGDOM_SET_BY_CORE: 2670 case NL80211_REGDOM_SET_BY_CORE:
2670 treatment = reg_process_hint_core(reg_request); 2671 treatment = reg_process_hint_core(reg_request);
2671 break; 2672 break;
@@ -2683,7 +2684,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
2683 treatment = reg_process_hint_country_ie(wiphy, reg_request); 2684 treatment = reg_process_hint_country_ie(wiphy, reg_request);
2684 break; 2685 break;
2685 default: 2686 default:
2686 WARN(1, "invalid initiator %d\n", reg_request->initiator); 2687 WARN(1, "invalid initiator %d\n", initiator);
2687 goto out_free; 2688 goto out_free;
2688 } 2689 }
2689 2690
@@ -2698,7 +2699,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
2698 */ 2699 */
2699 if (treatment == REG_REQ_ALREADY_SET && wiphy && 2700 if (treatment == REG_REQ_ALREADY_SET && wiphy &&
2700 wiphy->regulatory_flags & REGULATORY_STRICT_REG) { 2701 wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
2701 wiphy_update_regulatory(wiphy, reg_request->initiator); 2702 wiphy_update_regulatory(wiphy, initiator);
2702 wiphy_all_share_dfs_chan_state(wiphy); 2703 wiphy_all_share_dfs_chan_state(wiphy);
2703 reg_check_channels(); 2704 reg_check_channels();
2704 } 2705 }
@@ -2867,6 +2868,7 @@ static int regulatory_hint_core(const char *alpha2)
2867 request->alpha2[0] = alpha2[0]; 2868 request->alpha2[0] = alpha2[0];
2868 request->alpha2[1] = alpha2[1]; 2869 request->alpha2[1] = alpha2[1];
2869 request->initiator = NL80211_REGDOM_SET_BY_CORE; 2870 request->initiator = NL80211_REGDOM_SET_BY_CORE;
2871 request->wiphy_idx = WIPHY_IDX_INVALID;
2870 2872
2871 queue_regulatory_request(request); 2873 queue_regulatory_request(request);
2872 2874
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d36c3eb7b931..d0e7472dd9fd 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
1058 return NULL; 1058 return NULL;
1059} 1059}
1060 1060
1061/*
1062 * Update RX channel information based on the available frame payload
1063 * information. This is mainly for the 2.4 GHz band where frames can be received
1064 * from neighboring channels and the Beacon frames use the DSSS Parameter Set
1065 * element to indicate the current (transmitting) channel, but this might also
1066 * be needed on other bands if RX frequency does not match with the actual
1067 * operating channel of a BSS.
1068 */
1061static struct ieee80211_channel * 1069static struct ieee80211_channel *
1062cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, 1070cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
1063 struct ieee80211_channel *channel) 1071 struct ieee80211_channel *channel,
1072 enum nl80211_bss_scan_width scan_width)
1064{ 1073{
1065 const u8 *tmp; 1074 const u8 *tmp;
1066 u32 freq; 1075 u32 freq;
1067 int channel_number = -1; 1076 int channel_number = -1;
1077 struct ieee80211_channel *alt_channel;
1068 1078
1069 tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen); 1079 tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
1070 if (tmp && tmp[1] == 1) { 1080 if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
1078 } 1088 }
1079 } 1089 }
1080 1090
1081 if (channel_number < 0) 1091 if (channel_number < 0) {
1092 /* No channel information in frame payload */
1082 return channel; 1093 return channel;
1094 }
1083 1095
1084 freq = ieee80211_channel_to_frequency(channel_number, channel->band); 1096 freq = ieee80211_channel_to_frequency(channel_number, channel->band);
1085 channel = ieee80211_get_channel(wiphy, freq); 1097 alt_channel = ieee80211_get_channel(wiphy, freq);
1086 if (!channel) 1098 if (!alt_channel) {
1087 return NULL; 1099 if (channel->band == NL80211_BAND_2GHZ) {
1088 if (channel->flags & IEEE80211_CHAN_DISABLED) 1100 /*
1101 * Better not allow unexpected channels when that could
1102 * be going beyond the 1-11 range (e.g., discovering
1103 * BSS on channel 12 when radio is configured for
1104 * channel 11.
1105 */
1106 return NULL;
1107 }
1108
1109 /* No match for the payload channel number - ignore it */
1110 return channel;
1111 }
1112
1113 if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
1114 scan_width == NL80211_BSS_CHAN_WIDTH_5) {
1115 /*
1116 * Ignore channel number in 5 and 10 MHz channels where there
1117 * may not be an n:1 or 1:n mapping between frequencies and
1118 * channel numbers.
1119 */
1120 return channel;
1121 }
1122
1123 /*
1124 * Use the channel determined through the payload channel number
1125 * instead of the RX channel reported by the driver.
1126 */
1127 if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
1089 return NULL; 1128 return NULL;
1090 return channel; 1129 return alt_channel;
1091} 1130}
1092 1131
1093/* Returned bss is reference counted and must be cleaned up appropriately. */ 1132/* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
1112 (data->signal < 0 || data->signal > 100))) 1151 (data->signal < 0 || data->signal > 100)))
1113 return NULL; 1152 return NULL;
1114 1153
1115 channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan); 1154 channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
1155 data->scan_width);
1116 if (!channel) 1156 if (!channel)
1117 return NULL; 1157 return NULL;
1118 1158
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
1210 return NULL; 1250 return NULL;
1211 1251
1212 channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable, 1252 channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
1213 ielen, data->chan); 1253 ielen, data->chan, data->scan_width);
1214 if (!channel) 1254 if (!channel)
1215 return NULL; 1255 return NULL;
1216 1256
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 167f7025ac98..06943d9c9835 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1278,12 +1278,16 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
1278 if (err) 1278 if (err)
1279 return err; 1279 return err;
1280 1280
1281 if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) 1281 if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
1282 return -EOPNOTSUPP; 1282 err = -EOPNOTSUPP;
1283 goto free;
1284 }
1283 1285
1284 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate); 1286 rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
1285 1287
1286 return 0; 1288free:
1289 cfg80211_sinfo_release_content(&sinfo);
1290 return err;
1287} 1291}
1288 1292
1289/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */ 1293/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
@@ -1293,7 +1297,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1293 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1297 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
1294 /* we are under RTNL - globally locked - so can use static structs */ 1298 /* we are under RTNL - globally locked - so can use static structs */
1295 static struct iw_statistics wstats; 1299 static struct iw_statistics wstats;
1296 static struct station_info sinfo; 1300 static struct station_info sinfo = {};
1297 u8 bssid[ETH_ALEN]; 1301 u8 bssid[ETH_ALEN];
1298 1302
1299 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) 1303 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
@@ -1352,6 +1356,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
1352 if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) 1356 if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
1353 wstats.discard.retries = sinfo.tx_failed; 1357 wstats.discard.retries = sinfo.tx_failed;
1354 1358
1359 cfg80211_sinfo_release_content(&sinfo);
1360
1355 return &wstats; 1361 return &wstats;
1356} 1362}
1357 1363
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b89c9c7f8c5c..be3520e429c9 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -458,6 +458,7 @@ resume:
458 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 458 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
459 goto drop; 459 goto drop;
460 } 460 }
461 crypto_done = false;
461 } while (!err); 462 } while (!err);
462 463
463 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); 464 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 45ba07ab3e4f..261995d37ced 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
100 spin_unlock_bh(&x->lock); 100 spin_unlock_bh(&x->lock);
101 101
102 skb_dst_force(skb); 102 skb_dst_force(skb);
103 if (!skb_dst(skb)) {
104 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
105 goto error_nolock;
106 }
103 107
104 if (xfrm_offload(skb)) { 108 if (xfrm_offload(skb)) {
105 x->type_offload->encap(x, skb); 109 x->type_offload->encap(x, skb);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 3110c3fbee20..f094d4b3520d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2491 } 2491 }
2492 2492
2493 skb_dst_force(skb); 2493 skb_dst_force(skb);
2494 if (!skb_dst(skb)) {
2495 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2496 return 0;
2497 }
2494 2498
2495 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 2499 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2496 if (IS_ERR(dst)) { 2500 if (IS_ERR(dst)) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4791aa8b8185..df7ca2dabc48 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
151 err = -EINVAL; 151 err = -EINVAL;
152 switch (p->family) { 152 switch (p->family) {
153 case AF_INET: 153 case AF_INET:
154 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
155 goto out;
156
154 break; 157 break;
155 158
156 case AF_INET6: 159 case AF_INET6:
157#if IS_ENABLED(CONFIG_IPV6) 160#if IS_ENABLED(CONFIG_IPV6)
161 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
162 goto out;
163
158 break; 164 break;
159#else 165#else
160 err = -EAFNOSUPPORT; 166 err = -EAFNOSUPPORT;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1396 1402
1397 switch (p->sel.family) { 1403 switch (p->sel.family) {
1398 case AF_INET: 1404 case AF_INET:
1405 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
1406 return -EINVAL;
1407
1399 break; 1408 break;
1400 1409
1401 case AF_INET6: 1410 case AF_INET6:
1402#if IS_ENABLED(CONFIG_IPV6) 1411#if IS_ENABLED(CONFIG_IPV6)
1412 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
1413 return -EINVAL;
1414
1403 break; 1415 break;
1404#else 1416#else
1405 return -EAFNOSUPPORT; 1417 return -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1480 (ut[i].family != prev_family)) 1492 (ut[i].family != prev_family))
1481 return -EINVAL; 1493 return -EINVAL;
1482 1494
1495 if (ut[i].mode >= XFRM_MODE_MAX)
1496 return -EINVAL;
1497
1483 prev_family = ut[i].family; 1498 prev_family = ut[i].family;
1484 1499
1485 switch (ut[i].family) { 1500 switch (ut[i].family) {
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index b5282cbbe489..617ff1aa818f 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -145,9 +145,11 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
145 if (!acomp->ops) { 145 if (!acomp->ops) {
146 request_module("i915"); 146 request_module("i915");
147 /* 10s timeout */ 147 /* 10s timeout */
148 wait_for_completion_timeout(&bind_complete, 10 * 1000); 148 wait_for_completion_timeout(&bind_complete,
149 msecs_to_jiffies(10 * 1000));
149 } 150 }
150 if (!acomp->ops) { 151 if (!acomp->ops) {
152 dev_info(bus->dev, "couldn't bind with audio component\n");
151 snd_hdac_acomp_exit(bus); 153 snd_hdac_acomp_exit(bus);
152 return -ENODEV; 154 return -ENODEV;
153 } 155 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1d117f00d04d..3ac7ba9b342d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6409,6 +6409,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6409 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6409 SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6410 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), 6410 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
6411 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6411 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6412 SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
6412 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 6413 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6413 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), 6414 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
6414 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6415 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index d78aed86af09..8ff8cb1a11f4 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -234,6 +234,7 @@ int main(int argc, char *argv[])
234 break; 234 break;
235 235
236 default: 236 default:
237 error = HV_E_FAIL;
237 syslog(LOG_ERR, "Unknown operation: %d", 238 syslog(LOG_ERR, "Unknown operation: %d",
238 buffer.hdr.operation); 239 buffer.hdr.operation);
239 240
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 439b8a27488d..195ba486640f 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -1325,7 +1325,7 @@ class Tui(object):
1325 msg = '' 1325 msg = ''
1326 while True: 1326 while True:
1327 self.screen.erase() 1327 self.screen.erase()
1328 self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' % 1328 self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
1329 DELAY_DEFAULT, curses.A_BOLD) 1329 DELAY_DEFAULT, curses.A_BOLD)
1330 self.screen.addstr(4, 0, msg) 1330 self.screen.addstr(4, 0, msg)
1331 self.screen.addstr(2, 0, 'Change delay from %.1fs to ' % 1331 self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 642d4e12abea..eec2663261f2 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
56 printf(fmt, ## __VA_ARGS__); \ 56 printf(fmt, ## __VA_ARGS__); \
57 } while (0) 57 } while (0)
58 58
59#if defined(__x86_64__) || defined(__i386__) 59#ifdef __i386__
60 60
61#define INJECT_ASM_REG "eax" 61#define INJECT_ASM_REG "eax"
62 62
63#define RSEQ_INJECT_CLOBBER \ 63#define RSEQ_INJECT_CLOBBER \
64 , INJECT_ASM_REG 64 , INJECT_ASM_REG
65 65
66#ifdef __i386__
67
68#define RSEQ_INJECT_ASM(n) \ 66#define RSEQ_INJECT_ASM(n) \
69 "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \ 67 "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
70 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \ 68 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
76 74
77#elif defined(__x86_64__) 75#elif defined(__x86_64__)
78 76
77#define INJECT_ASM_REG_P "rax"
78#define INJECT_ASM_REG "eax"
79
80#define RSEQ_INJECT_CLOBBER \
81 , INJECT_ASM_REG_P \
82 , INJECT_ASM_REG
83
79#define RSEQ_INJECT_ASM(n) \ 84#define RSEQ_INJECT_ASM(n) \
80 "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \ 85 "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
81 "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \ 86 "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
82 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \ 87 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
83 "jz 333f\n\t" \ 88 "jz 333f\n\t" \
84 "222:\n\t" \ 89 "222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
86 "jnz 222b\n\t" \ 91 "jnz 222b\n\t" \
87 "333:\n\t" 92 "333:\n\t"
88 93
89#else
90#error "Unsupported architecture"
91#endif
92
93#elif defined(__s390__) 94#elif defined(__s390__)
94 95
95#define RSEQ_INJECT_INPUT \ 96#define RSEQ_INJECT_INPUT \
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
index 235259011704..35edd61d1663 100644
--- a/tools/testing/selftests/x86/test_vdso.c
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -17,6 +17,7 @@
17#include <errno.h> 17#include <errno.h>
18#include <sched.h> 18#include <sched.h>
19#include <stdbool.h> 19#include <stdbool.h>
20#include <limits.h>
20 21
21#ifndef SYS_getcpu 22#ifndef SYS_getcpu
22# ifdef __x86_64__ 23# ifdef __x86_64__
@@ -31,6 +32,14 @@
31 32
32int nerrs = 0; 33int nerrs = 0;
33 34
35typedef int (*vgettime_t)(clockid_t, struct timespec *);
36
37vgettime_t vdso_clock_gettime;
38
39typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
40
41vgtod_t vdso_gettimeofday;
42
34typedef long (*getcpu_t)(unsigned *, unsigned *, void *); 43typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
35 44
36getcpu_t vgetcpu; 45getcpu_t vgetcpu;
@@ -95,6 +104,15 @@ static void fill_function_pointers()
95 printf("Warning: failed to find getcpu in vDSO\n"); 104 printf("Warning: failed to find getcpu in vDSO\n");
96 105
97 vgetcpu = (getcpu_t) vsyscall_getcpu(); 106 vgetcpu = (getcpu_t) vsyscall_getcpu();
107
108 vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
109 if (!vdso_clock_gettime)
110 printf("Warning: failed to find clock_gettime in vDSO\n");
111
112 vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
113 if (!vdso_gettimeofday)
114 printf("Warning: failed to find gettimeofday in vDSO\n");
115
98} 116}
99 117
100static long sys_getcpu(unsigned * cpu, unsigned * node, 118static long sys_getcpu(unsigned * cpu, unsigned * node,
@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
103 return syscall(__NR_getcpu, cpu, node, cache); 121 return syscall(__NR_getcpu, cpu, node, cache);
104} 122}
105 123
124static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
125{
126 return syscall(__NR_clock_gettime, id, ts);
127}
128
129static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
130{
131 return syscall(__NR_gettimeofday, tv, tz);
132}
133
106static void test_getcpu(void) 134static void test_getcpu(void)
107{ 135{
108 printf("[RUN]\tTesting getcpu...\n"); 136 printf("[RUN]\tTesting getcpu...\n");
@@ -155,10 +183,154 @@ static void test_getcpu(void)
155 } 183 }
156} 184}
157 185
186static bool ts_leq(const struct timespec *a, const struct timespec *b)
187{
188 if (a->tv_sec != b->tv_sec)
189 return a->tv_sec < b->tv_sec;
190 else
191 return a->tv_nsec <= b->tv_nsec;
192}
193
194static bool tv_leq(const struct timeval *a, const struct timeval *b)
195{
196 if (a->tv_sec != b->tv_sec)
197 return a->tv_sec < b->tv_sec;
198 else
199 return a->tv_usec <= b->tv_usec;
200}
201
202static char const * const clocknames[] = {
203 [0] = "CLOCK_REALTIME",
204 [1] = "CLOCK_MONOTONIC",
205 [2] = "CLOCK_PROCESS_CPUTIME_ID",
206 [3] = "CLOCK_THREAD_CPUTIME_ID",
207 [4] = "CLOCK_MONOTONIC_RAW",
208 [5] = "CLOCK_REALTIME_COARSE",
209 [6] = "CLOCK_MONOTONIC_COARSE",
210 [7] = "CLOCK_BOOTTIME",
211 [8] = "CLOCK_REALTIME_ALARM",
212 [9] = "CLOCK_BOOTTIME_ALARM",
213 [10] = "CLOCK_SGI_CYCLE",
214 [11] = "CLOCK_TAI",
215};
216
217static void test_one_clock_gettime(int clock, const char *name)
218{
219 struct timespec start, vdso, end;
220 int vdso_ret, end_ret;
221
222 printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
223
224 if (sys_clock_gettime(clock, &start) < 0) {
225 if (errno == EINVAL) {
226 vdso_ret = vdso_clock_gettime(clock, &vdso);
227 if (vdso_ret == -EINVAL) {
228 printf("[OK]\tNo such clock.\n");
229 } else {
230 printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
231 nerrs++;
232 }
233 } else {
234 printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
235 }
236 return;
237 }
238
239 vdso_ret = vdso_clock_gettime(clock, &vdso);
240 end_ret = sys_clock_gettime(clock, &end);
241
242 if (vdso_ret != 0 || end_ret != 0) {
243 printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
244 vdso_ret, errno);
245 nerrs++;
246 return;
247 }
248
249 printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
250 (unsigned long long)start.tv_sec, start.tv_nsec,
251 (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
252 (unsigned long long)end.tv_sec, end.tv_nsec);
253
254 if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
255 printf("[FAIL]\tTimes are out of sequence\n");
256 nerrs++;
257 }
258}
259
260static void test_clock_gettime(void)
261{
262 for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
263 clock++) {
264 test_one_clock_gettime(clock, clocknames[clock]);
265 }
266
267 /* Also test some invalid clock ids */
268 test_one_clock_gettime(-1, "invalid");
269 test_one_clock_gettime(INT_MIN, "invalid");
270 test_one_clock_gettime(INT_MAX, "invalid");
271}
272
273static void test_gettimeofday(void)
274{
275 struct timeval start, vdso, end;
276 struct timezone sys_tz, vdso_tz;
277 int vdso_ret, end_ret;
278
279 if (!vdso_gettimeofday)
280 return;
281
282 printf("[RUN]\tTesting gettimeofday...\n");
283
284 if (sys_gettimeofday(&start, &sys_tz) < 0) {
285 printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
286 nerrs++;
287 return;
288 }
289
290 vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
291 end_ret = sys_gettimeofday(&end, NULL);
292
293 if (vdso_ret != 0 || end_ret != 0) {
294 printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
295 vdso_ret, errno);
296 nerrs++;
297 return;
298 }
299
300 printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
301 (unsigned long long)start.tv_sec, start.tv_usec,
302 (unsigned long long)vdso.tv_sec, vdso.tv_usec,
303 (unsigned long long)end.tv_sec, end.tv_usec);
304
305 if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
306 printf("[FAIL]\tTimes are out of sequence\n");
307 nerrs++;
308 }
309
310 if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
311 sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
312 printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
313 sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
314 } else {
315 printf("[FAIL]\ttimezones do not match\n");
316 nerrs++;
317 }
318
319 /* And make sure that passing NULL for tz doesn't crash. */
320 vdso_gettimeofday(&vdso, NULL);
321}
322
158int main(int argc, char **argv) 323int main(int argc, char **argv)
159{ 324{
160 fill_function_pointers(); 325 fill_function_pointers();
161 326
327 test_clock_gettime();
328 test_gettimeofday();
329
330 /*
331 * Test getcpu() last so that, if something goes wrong setting affinity,
332 * we still run the other tests.
333 */
162 test_getcpu(); 334 test_getcpu();
163 335
164 return nerrs ? 1 : 0; 336 return nerrs ? 1 : 0;