summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-08-27 17:23:31 -0400
committerDavid S. Miller <davem@davemloft.net>2019-08-27 17:23:31 -0400
commit68aaf4459556b1f9370c259fd486aecad2257552 (patch)
tree99d92536a3263634969be6b70a96facea85a0df1
parentd00ee466a07eb9182ad3caf6140c7ebb527b4c64 (diff)
parent9e8312f5e160ade069e131d54ab8652cf0e86e1a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor conflict in r8169, bug fix had two versions in net and net-next, take the net-next hunks. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/PCI/index.rst2
-rw-r--r--Documentation/PCI/pciebus-howto.rst (renamed from Documentation/PCI/picebus-howto.rst)0
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt30
-rw-r--r--MAINTAINERS31
-rw-r--r--Makefile2
-rw-r--r--arch/arc/boot/dts/Makefile3
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/include/asm/mach_desc.h3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/unwind.c4
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arc/plat-hsdk/platform.c87
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/s390/net/bpf_jit_comp.c12
-rw-r--r--arch/um/include/shared/timer-internal.h14
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/time.c16
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c13
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/include/asm/bootparam_utils.h2
-rw-r--r--arch/x86/include/asm/intel-family.h15
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/kernel/apic/apic.c68
-rw-r--r--arch/x86/kernel/cpu/amd.c66
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c33
-rw-r--r--arch/x86/kvm/svm.c1
-rw-r--r--arch/x86/power/cpu.c86
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/clk/clk.c49
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c162
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c2
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c55
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/hid/hid-cp2112.c8
-rw-r--r--drivers/hid/hid-logitech-hidpp.c22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/counters.c10
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c76
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/sw/siw/siw.h8
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c82
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c40
-rw-r--r--drivers/input/serio/hyperv-keyboard.c35
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-dust.c11
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c68
-rw-r--r--drivers/md/dm-zoned-reclaim.c47
-rw-r--r--drivers/md/dm-zoned-target.c68
-rw-r--r--drivers/md/dm-zoned.h11
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/mfd/rk808.c6
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/dir.c3
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/inode.c7
-rw-r--r--fs/ceph/locks.c3
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/io_uring.c66
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c27
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/inode.c33
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/write.c38
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xfs/xfs_ioctl32.c56
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c63
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/dma-contiguous.h5
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h5
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/nexthop.h6
-rw-r--r--include/net/route.h2
-rw-r--r--include/rdma/restrack.h3
-rw-r--r--include/soc/arc/mcip.h11
-rw-r--r--include/uapi/linux/jffs2.h5
-rw-r--r--include/uapi/linux/netfilter/xt_nfacct.h5
-rw-r--r--include/uapi/linux/rds.h2
-rw-r--r--kernel/bpf/syscall.c30
-rw-r--r--kernel/bpf/verifier.c9
-rw-r--r--kernel/dma/contiguous.c8
-rw-r--r--kernel/dma/direct.c10
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/psi.c8
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/vsyscall.c22
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kasan/common.c10
-rw-r--r--mm/memcontrol.c60
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/z3fold.c89
-rw-r--r--mm/zsmalloc.c78
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/bridge/netfilter/ebtables.c8
-rw-r--r--net/ceph/osd_client.c9
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/sock.c31
-rw-r--r--net/core/stream.c16
-rw-r--r--net/ieee802154/socket.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/route.c17
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mpls/mpls_iptunnel.c8
-rw-r--r--net/ncsi/ncsi-cmd.c13
-rw-r--r--net/ncsi/ncsi-rsp.c9
-rw-r--r--net/netfilter/nft_flow_offload.c6
-rw-r--r--net/netfilter/xt_nfacct.c36
-rw-r--r--net/openvswitch/conntrack.c15
-rw-r--r--net/rds/ib.c16
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/rdma_transport.c10
-rw-r--r--net/smc/smc_tx.c6
-rw-r--r--net/sunrpc/clnt.c47
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/util.c23
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_fifo.c17
-rw-r--r--sound/core/seq/seq_fifo.h2
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/usb/line6/pcm.c18
-rw-r--r--sound/usb/mixer.c36
-rw-r--r--sound/usb/mixer_quirks.c8
-rw-r--r--sound/usb/pcm.c1
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/test_btf_dump.c7
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock.c7
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c12
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c32
-rw-r--r--virt/kvm/arm/mmio.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
279 files changed, 2599 insertions, 1287 deletions
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index f4c6121868c3..6768305e4c26 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -9,7 +9,7 @@ Linux PCI Bus Subsystem
9 :numbered: 9 :numbered:
10 10
11 pci 11 pci
12 picebus-howto 12 pciebus-howto
13 pci-iov-howto 13 pci-iov-howto
14 msi-howto 14 msi-howto
15 acpi-info 15 acpi-info
diff --git a/Documentation/PCI/picebus-howto.rst b/Documentation/PCI/pciebus-howto.rst
index f882ff62c51f..f882ff62c51f 100644
--- a/Documentation/PCI/picebus-howto.rst
+++ b/Documentation/PCI/pciebus-howto.rst
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 47d981a86e2f..4c1971960afa 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4090,6 +4090,13 @@
4090 Run specified binary instead of /init from the ramdisk, 4090 Run specified binary instead of /init from the ramdisk,
4091 used for early userspace startup. See initrd. 4091 used for early userspace startup. See initrd.
4092 4092
4093 rdrand= [X86]
4094 force - Override the decision by the kernel to hide the
4095 advertisement of RDRAND support (this affects
4096 certain AMD processors because of buggy BIOS
4097 support, specifically around the suspend/resume
4098 path).
4099
4093 rdt= [HW,X86,RDT] 4100 rdt= [HW,X86,RDT]
4094 Turn on/off individual RDT features. List is: 4101 Turn on/off individual RDT features. List is:
4095 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, 4102 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 09fc02b99845..a5c1db95b3ec 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -1,20 +1,30 @@
1* ARC-HS Interrupt Distribution Unit 1* ARC-HS Interrupt Distribution Unit
2 2
3 This optional 2nd level interrupt controller can be used in SMP configurations for 3 This optional 2nd level interrupt controller can be used in SMP configurations
4 dynamic IRQ routing, load balancing of common/external IRQs towards core intc. 4 for dynamic IRQ routing, load balancing of common/external IRQs towards core
5 intc.
5 6
6Properties: 7Properties:
7 8
8- compatible: "snps,archs-idu-intc" 9- compatible: "snps,archs-idu-intc"
9- interrupt-controller: This is an interrupt controller. 10- interrupt-controller: This is an interrupt controller.
10- #interrupt-cells: Must be <1>. 11- #interrupt-cells: Must be <1> or <2>.
11 12
12 Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N 13 Value of the first cell specifies the "common" IRQ from peripheral to IDU.
13 of the particular interrupt line of IDU corresponds to the line N+24 of the 14 Number N of the particular interrupt line of IDU corresponds to the line N+24
14 core interrupt controller. 15 of the core interrupt controller.
15 16
16 intc accessed via the special ARC AUX register interface, hence "reg" property 17 The (optional) second cell specifies any of the following flags:
17 is not specified. 18 - bits[3:0] trigger type and level flags
19 1 = low-to-high edge triggered
20 2 = NOT SUPPORTED (high-to-low edge triggered)
21 4 = active high level-sensitive <<< DEFAULT
22 8 = NOT SUPPORTED (active low level-sensitive)
23 When no second cell is specified, the interrupt is assumed to be level
24 sensitive.
25
26 The interrupt controller is accessed via the special ARC AUX register
27 interface, hence "reg" property is not specified.
18 28
19Example: 29Example:
20 core_intc: core-interrupt-controller { 30 core_intc: core-interrupt-controller {
diff --git a/MAINTAINERS b/MAINTAINERS
index 986085351d79..818f2a17699a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8468,11 +8468,6 @@ S: Maintained
8468F: fs/io_uring.c 8468F: fs/io_uring.c
8469F: include/uapi/linux/io_uring.h 8469F: include/uapi/linux/io_uring.h
8470 8470
8471IP MASQUERADING
8472M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
8473S: Maintained
8474F: net/ipv4/netfilter/ipt_MASQUERADE.c
8475
8476IPMI SUBSYSTEM 8471IPMI SUBSYSTEM
8477M: Corey Minyard <minyard@acm.org> 8472M: Corey Minyard <minyard@acm.org>
8478L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) 8473L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -8846,14 +8841,6 @@ F: virt/kvm/*
8846F: tools/kvm/ 8841F: tools/kvm/
8847F: tools/testing/selftests/kvm/ 8842F: tools/testing/selftests/kvm/
8848 8843
8849KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
8850M: Joerg Roedel <joro@8bytes.org>
8851L: kvm@vger.kernel.org
8852W: http://www.linux-kvm.org/
8853S: Maintained
8854F: arch/x86/include/asm/svm.h
8855F: arch/x86/kvm/svm.c
8856
8857KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) 8844KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
8858M: Marc Zyngier <maz@kernel.org> 8845M: Marc Zyngier <maz@kernel.org>
8859R: James Morse <james.morse@arm.com> 8846R: James Morse <james.morse@arm.com>
@@ -8896,7 +8883,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
8896M: Janosch Frank <frankja@linux.ibm.com> 8883M: Janosch Frank <frankja@linux.ibm.com>
8897R: David Hildenbrand <david@redhat.com> 8884R: David Hildenbrand <david@redhat.com>
8898R: Cornelia Huck <cohuck@redhat.com> 8885R: Cornelia Huck <cohuck@redhat.com>
8899L: linux-s390@vger.kernel.org 8886L: kvm@vger.kernel.org
8900W: http://www.ibm.com/developerworks/linux/linux390/ 8887W: http://www.ibm.com/developerworks/linux/linux390/
8901T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git 8888T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
8902S: Supported 8889S: Supported
@@ -8911,6 +8898,11 @@ F: tools/testing/selftests/kvm/*/s390x/
8911KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) 8898KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
8912M: Paolo Bonzini <pbonzini@redhat.com> 8899M: Paolo Bonzini <pbonzini@redhat.com>
8913M: Radim Krčmář <rkrcmar@redhat.com> 8900M: Radim Krčmář <rkrcmar@redhat.com>
8901R: Sean Christopherson <sean.j.christopherson@intel.com>
8902R: Vitaly Kuznetsov <vkuznets@redhat.com>
8903R: Wanpeng Li <wanpengli@tencent.com>
8904R: Jim Mattson <jmattson@google.com>
8905R: Joerg Roedel <joro@8bytes.org>
8914L: kvm@vger.kernel.org 8906L: kvm@vger.kernel.org
8915W: http://www.linux-kvm.org 8907W: http://www.linux-kvm.org
8916T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 8908T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -8918,8 +8910,12 @@ S: Supported
8918F: arch/x86/kvm/ 8910F: arch/x86/kvm/
8919F: arch/x86/kvm/*/ 8911F: arch/x86/kvm/*/
8920F: arch/x86/include/uapi/asm/kvm* 8912F: arch/x86/include/uapi/asm/kvm*
8913F: arch/x86/include/uapi/asm/vmx.h
8914F: arch/x86/include/uapi/asm/svm.h
8921F: arch/x86/include/asm/kvm* 8915F: arch/x86/include/asm/kvm*
8922F: arch/x86/include/asm/pvclock-abi.h 8916F: arch/x86/include/asm/pvclock-abi.h
8917F: arch/x86/include/asm/svm.h
8918F: arch/x86/include/asm/vmx.h
8923F: arch/x86/kernel/kvm.c 8919F: arch/x86/kernel/kvm.c
8924F: arch/x86/kernel/kvmclock.c 8920F: arch/x86/kernel/kvmclock.c
8925 8921
@@ -11099,7 +11095,7 @@ NET_FAILOVER MODULE
11099M: Sridhar Samudrala <sridhar.samudrala@intel.com> 11095M: Sridhar Samudrala <sridhar.samudrala@intel.com>
11100L: netdev@vger.kernel.org 11096L: netdev@vger.kernel.org
11101S: Supported 11097S: Supported
11102F: driver/net/net_failover.c 11098F: drivers/net/net_failover.c
11103F: include/net/net_failover.h 11099F: include/net/net_failover.h
11104F: Documentation/networking/net_failover.rst 11100F: Documentation/networking/net_failover.rst
11105 11101
@@ -14491,6 +14487,7 @@ F: drivers/net/phy/phylink.c
14491F: drivers/net/phy/sfp* 14487F: drivers/net/phy/sfp*
14492F: include/linux/phylink.h 14488F: include/linux/phylink.h
14493F: include/linux/sfp.h 14489F: include/linux/sfp.h
14490K: phylink
14494 14491
14495SGI GRU DRIVER 14492SGI GRU DRIVER
14496M: Dimitri Sivanich <sivanich@sgi.com> 14493M: Dimitri Sivanich <sivanich@sgi.com>
@@ -14896,9 +14893,9 @@ F: include/linux/arm_sdei.h
14896F: include/uapi/linux/arm_sdei.h 14893F: include/uapi/linux/arm_sdei.h
14897 14894
14898SOFTWARE RAID (Multiple Disks) SUPPORT 14895SOFTWARE RAID (Multiple Disks) SUPPORT
14899M: Shaohua Li <shli@kernel.org> 14896M: Song Liu <song@kernel.org>
14900L: linux-raid@vger.kernel.org 14897L: linux-raid@vger.kernel.org
14901T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git 14898T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
14902S: Supported 14899S: Supported
14903F: drivers/md/Makefile 14900F: drivers/md/Makefile
14904F: drivers/md/Kconfig 14901F: drivers/md/Kconfig
diff --git a/Makefile b/Makefile
index 9fa18613566f..f125625efd60 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc5 5EXTRAVERSION = -rc6
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index a83c4f5e928b..8483a86c743d 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb
12# for CONFIG_OF_ALL_DTBS test 12# for CONFIG_OF_ALL_DTBS test
13dtstree := $(srctree)/$(src) 13dtstree := $(srctree)/$(src)
14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) 14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
15
16# board-specific dtc flags
17DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index f5ae394ebe06..41b16f21beec 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -256,7 +256,7 @@
256 256
257.macro FAKE_RET_FROM_EXCPN 257.macro FAKE_RET_FROM_EXCPN
258 lr r9, [status32] 258 lr r9, [status32]
259 bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) 259 bic r9, r9, STATUS_AE_MASK
260 or r9, r9, STATUS_IE_MASK 260 or r9, r9, STATUS_IE_MASK
261 kflag r9 261 kflag r9
262.endm 262.endm
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index a0eeb9f8f0a9..d9ee43c6b7db 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -62,15 +62,15 @@
62#else /* !__ASSEMBLY__ */ 62#else /* !__ASSEMBLY__ */
63 63
64#ifdef CONFIG_ARC_HAS_ICCM 64#ifdef CONFIG_ARC_HAS_ICCM
65#define __arcfp_code __attribute__((__section__(".text.arcfp"))) 65#define __arcfp_code __section(.text.arcfp)
66#else 66#else
67#define __arcfp_code __attribute__((__section__(".text"))) 67#define __arcfp_code __section(.text)
68#endif 68#endif
69 69
70#ifdef CONFIG_ARC_HAS_DCCM 70#ifdef CONFIG_ARC_HAS_DCCM
71#define __arcfp_data __attribute__((__section__(".data.arcfp"))) 71#define __arcfp_data __section(.data.arcfp)
72#else 72#else
73#define __arcfp_data __attribute__((__section__(".data"))) 73#define __arcfp_data __section(.data)
74#endif 74#endif
75 75
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 8ac0e2ac3e70..73746ed5b834 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
53 */ 53 */
54#define MACHINE_START(_type, _name) \ 54#define MACHINE_START(_type, _name) \
55static const struct machine_desc __mach_desc_##_type \ 55static const struct machine_desc __mach_desc_##_type \
56__used \ 56__used __section(.arch.info.init) = { \
57__attribute__((__section__(".arch.info.init"))) = { \
58 .name = _name, 57 .name = _name,
59 58
60#define MACHINE_END \ 59#define MACHINE_END \
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 18b493dfb3a8..abf9398cc333 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
203} 203}
204 204
205static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 205static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
206 unsigned int distr) 206 bool set_distr, unsigned int distr)
207{ 207{
208 union { 208 union {
209 unsigned int word; 209 unsigned int word;
@@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
212 }; 212 };
213 } data; 213 } data;
214 214
215 data.distr = distr; 215 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
216 data.lvl = lvl; 216 if (set_distr)
217 data.distr = distr;
218 if (set_lvl)
219 data.lvl = lvl;
217 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
218} 221}
219 222
@@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data)
240 raw_spin_unlock_irqrestore(&mcip_lock, flags); 243 raw_spin_unlock_irqrestore(&mcip_lock, flags);
241} 244}
242 245
246static void idu_irq_ack(struct irq_data *data)
247{
248 unsigned long flags;
249
250 raw_spin_lock_irqsave(&mcip_lock, flags);
251 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
252 raw_spin_unlock_irqrestore(&mcip_lock, flags);
253}
254
255static void idu_irq_mask_ack(struct irq_data *data)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&mcip_lock, flags);
260 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
261 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
262 raw_spin_unlock_irqrestore(&mcip_lock, flags);
263}
264
243static int 265static int
244idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 266idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
245 bool force) 267 bool force)
@@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
263 else 285 else
264 distribution_mode = IDU_M_DISTRI_RR; 286 distribution_mode = IDU_M_DISTRI_RR;
265 287
266 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 288 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
267 289
268 raw_spin_unlock_irqrestore(&mcip_lock, flags); 290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
269 291
270 return IRQ_SET_MASK_OK; 292 return IRQ_SET_MASK_OK;
271} 293}
272 294
295static int idu_irq_set_type(struct irq_data *data, u32 type)
296{
297 unsigned long flags;
298
299 /*
300 * ARCv2 IDU HW does not support inverse polarity, so these are the
301 * only interrupt types supported.
302 */
303 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
304 return -EINVAL;
305
306 raw_spin_lock_irqsave(&mcip_lock, flags);
307
308 idu_set_mode(data->hwirq, true,
309 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
310 IDU_M_TRIG_LEVEL,
311 false, 0);
312
313 raw_spin_unlock_irqrestore(&mcip_lock, flags);
314
315 return 0;
316}
317
273static void idu_irq_enable(struct irq_data *data) 318static void idu_irq_enable(struct irq_data *data)
274{ 319{
275 /* 320 /*
@@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = {
289 .name = "MCIP IDU Intc", 334 .name = "MCIP IDU Intc",
290 .irq_mask = idu_irq_mask, 335 .irq_mask = idu_irq_mask,
291 .irq_unmask = idu_irq_unmask, 336 .irq_unmask = idu_irq_unmask,
337 .irq_ack = idu_irq_ack,
338 .irq_mask_ack = idu_irq_mask_ack,
292 .irq_enable = idu_irq_enable, 339 .irq_enable = idu_irq_enable,
340 .irq_set_type = idu_irq_set_type,
293#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
294 .irq_set_affinity = idu_irq_set_affinity, 342 .irq_set_affinity = idu_irq_set_affinity,
295#endif 343#endif
@@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t
317} 365}
318 366
319static const struct irq_domain_ops idu_irq_ops = { 367static const struct irq_domain_ops idu_irq_ops = {
320 .xlate = irq_domain_xlate_onecell, 368 .xlate = irq_domain_xlate_onetwocell,
321 .map = idu_irq_map, 369 .map = idu_irq_map,
322}; 370};
323 371
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index c2663fce7f6c..445e4d702f43 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -826,7 +826,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
826 case DW_CFA_def_cfa: 826 case DW_CFA_def_cfa:
827 state->cfa.reg = get_uleb128(&ptr.p8, end); 827 state->cfa.reg = get_uleb128(&ptr.p8, end);
828 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); 828 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
829 /*nobreak*/ 829 /* fall through */
830 case DW_CFA_def_cfa_offset: 830 case DW_CFA_def_cfa_offset:
831 state->cfa.offs = get_uleb128(&ptr.p8, end); 831 state->cfa.offs = get_uleb128(&ptr.p8, end);
832 unw_debug("cfa_def_cfa_offset: 0x%lx ", 832 unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -834,7 +834,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
834 break; 834 break;
835 case DW_CFA_def_cfa_sf: 835 case DW_CFA_def_cfa_sf:
836 state->cfa.reg = get_uleb128(&ptr.p8, end); 836 state->cfa.reg = get_uleb128(&ptr.p8, end);
837 /*nobreak */ 837 /* fall through */
838 case DW_CFA_def_cfa_offset_sf: 838 case DW_CFA_def_cfa_offset_sf:
839 state->cfa.offs = get_sleb128(&ptr.p8, end) 839 state->cfa.offs = get_sleb128(&ptr.p8, end)
840 * state->dataAlign; 840 * state->dataAlign;
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 62c210e7ee4c..70a3fbe79fba 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
101 if (is_isa_arcv2() && ioc_enable && coherent) 101 if (is_isa_arcv2() && ioc_enable && coherent)
102 dev->dma_coherent = true; 102 dev->dma_coherent = true;
103 103
104 dev_info(dev, "use %sncoherent DMA ops\n", 104 dev_info(dev, "use %scoherent DMA ops\n",
105 dev->dma_coherent ? "" : "non"); 105 dev->dma_coherent ? "" : "non");
106} 106}
107 107
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 7dd2dd335cf6..0b961a2a10b8 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -6,11 +6,15 @@
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/of_fdt.h>
10#include <linux/libfdt.h>
9#include <linux/smp.h> 11#include <linux/smp.h>
10#include <asm/arcregs.h> 12#include <asm/arcregs.h>
11#include <asm/io.h> 13#include <asm/io.h>
12#include <asm/mach_desc.h> 14#include <asm/mach_desc.h>
13 15
16int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
17
14#define ARC_CCM_UNUSED_ADDR 0x60000000 18#define ARC_CCM_UNUSED_ADDR 0x60000000
15 19
16static void __init hsdk_init_per_cpu(unsigned int cpu) 20static void __init hsdk_init_per_cpu(unsigned int cpu)
@@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void)
97 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); 101 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
98} 102}
99 103
104static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
105{
106 void *fdt = initial_boot_params;
107 const void *prop;
108 int node, ret;
109 bool dt_coh_set;
110
111 node = fdt_path_offset(fdt, path);
112 if (node < 0)
113 goto tweak_fail;
114
115 prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
116 if (!prop && ret != -FDT_ERR_NOTFOUND)
117 goto tweak_fail;
118
119 dt_coh_set = ret != -FDT_ERR_NOTFOUND;
120 ret = 0;
121
122 /* need to remove "dma-coherent" property */
123 if (dt_coh_set && !coherent)
124 ret = fdt_delprop(fdt, node, "dma-coherent");
125
126 /* need to set "dma-coherent" property */
127 if (!dt_coh_set && coherent)
128 ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
129
130 if (ret < 0)
131 goto tweak_fail;
132
133 return 0;
134
135tweak_fail:
136 pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
137 return -EFAULT;
138}
139
100enum hsdk_axi_masters { 140enum hsdk_axi_masters {
101 M_HS_CORE = 0, 141 M_HS_CORE = 0,
102 M_HS_RTT, 142 M_HS_RTT,
@@ -162,6 +202,39 @@ enum hsdk_axi_masters {
162#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) 202#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
163#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) 203#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
164 204
205static void __init hsdk_init_memory_bridge_axi_dmac(void)
206{
207 bool coherent = !!arc_hsdk_axi_dmac_coherent;
208 u32 axi_m_slv1, axi_m_oft1;
209
210 /*
211 * Don't tweak memory bridge configuration if we failed to tweak DTB
212 * as we will end up in a inconsistent state.
213 */
214 if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
215 return;
216
217 if (coherent) {
218 axi_m_slv1 = 0x77999999;
219 axi_m_oft1 = 0x76DCBA98;
220 } else {
221 axi_m_slv1 = 0x77777777;
222 axi_m_oft1 = 0x76543210;
223 }
224
225 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
226 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
227 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
228 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
229 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
230
231 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
233 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
234 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
235 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
236}
237
165static void __init hsdk_init_memory_bridge(void) 238static void __init hsdk_init_memory_bridge(void)
166{ 239{
167 u32 reg; 240 u32 reg;
@@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void)
227 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); 300 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
228 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); 301 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
229 302
230 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
231 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
233 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
234 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
235
236 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
237 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
238 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
239 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
240 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
241
242 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); 303 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
243 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); 304 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
244 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); 305 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
245 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); 306 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
246 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); 307 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
247 308
309 hsdk_init_memory_bridge_axi_dmac();
310
248 /* 311 /*
249 * PAE remapping for DMA clients does not work due to an RTL bug, so 312 * PAE remapping for DMA clients does not work due to an RTL bug, so
250 * CREG_PAE register must be programmed to all zeroes, otherwise it 313 * CREG_PAE register must be programmed to all zeroes, otherwise it
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 33b00579beff..24360211534a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,6 +7,8 @@ config ARM
7 select ARCH_HAS_BINFMT_FLAT 7 select ARCH_HAS_BINFMT_FLAT
8 select ARCH_HAS_DEBUG_VIRTUAL if MMU 8 select ARCH_HAS_DEBUG_VIRTUAL if MMU
9 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 select ARCH_HAS_DEVMEM_IS_ALLOWED
10 select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
11 select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
10 select ARCH_HAS_ELF_RANDOMIZE 12 select ARCH_HAS_ELF_RANDOMIZE
11 select ARCH_HAS_FORTIFY_SOURCE 13 select ARCH_HAS_FORTIFY_SOURCE
12 select ARCH_HAS_KEEPINITRD 14 select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
18 select ARCH_HAS_SET_MEMORY 20 select ARCH_HAS_SET_MEMORY
19 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 21 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
20 select ARCH_HAS_STRICT_MODULE_RWX if MMU 22 select ARCH_HAS_STRICT_MODULE_RWX if MMU
23 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
24 select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU 25 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
22 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 26 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
23 select ARCH_HAVE_CUSTOM_GPIO_H 27 select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 0ce56ad754ce..ea2c84214bac 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
46 switch (tag->u.acorn.vram_pages) { 46 switch (tag->u.acorn.vram_pages) {
47 case 512: 47 case 512:
48 vram_size += PAGE_SIZE * 256; 48 vram_size += PAGE_SIZE * 256;
49 /* Fall through - ??? */
49 case 256: 50 case 256:
50 vram_size += PAGE_SIZE * 256; 51 vram_size += PAGE_SIZE * 256;
51 default: 52 default:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54cd7ed90ba..c1222c0e9fd3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -664,10 +664,6 @@ config ARM_LPAE
664 !CPU_32v4 && !CPU_32v3 664 !CPU_32v4 && !CPU_32v3
665 select PHYS_ADDR_T_64BIT 665 select PHYS_ADDR_T_64BIT
666 select SWIOTLB 666 select SWIOTLB
667 select ARCH_HAS_DMA_COHERENT_TO_PFN
668 select ARCH_HAS_DMA_MMAP_PGPROT
669 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
670 select ARCH_HAS_SYNC_DMA_FOR_CPU
671 help 667 help
672 Say Y if you have an ARMv7 processor supporting the LPAE page 668 Say Y if you have an ARMv7 processor supporting the LPAE page
673 table format and you would like to access memory beyond the 669 table format and you would like to access memory beyond the
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index 52cf96ea43e5..cbc7cdae1c6a 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK: 46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) 47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
48 return 0x0000000000003CB0ull; 48 return 0x0000000000003CB0ull;
49 /* Else, fall through */
49 default: 50 default:
50 return 0x0000000000023CB0ull; 51 return 0x0000000000023CB0ull;
51 } 52 }
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index a39b079e73f2..6d58c1739b42 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
2#ifndef _PARISC_PGTABLE_H 2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H 3#define _PARISC_PGTABLE_H
4 4
5#include <asm/page.h>
5#include <asm-generic/4level-fixup.h> 6#include <asm-generic/4level-fixup.h>
6 7
7#include <asm/fixmap.h> 8#include <asm/fixmap.h>
@@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
98 99
99#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
100 101
101#include <asm/page.h>
102
103#define pte_ERROR(e) \ 102#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
105#define pmd_ERROR(e) \ 104#define pmd_ERROR(e) \
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e636728ab452..955eb355c2fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
863 break; 863 break;
864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
865 /* lcgr %dst,%dst */ 865 /* lcgr %dst,%dst */
866 EMIT4(0xb9130000, dst_reg, dst_reg); 866 EMIT4(0xb9030000, dst_reg, dst_reg);
867 break; 867 break;
868 /* 868 /*
869 * BPF_FROM_BE/LE 869 * BPF_FROM_BE/LE
@@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1049 /* llgf %w1,map.max_entries(%b2) */ 1049 /* llgf %w1,map.max_entries(%b2) */
1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1051 offsetof(struct bpf_array, map.max_entries)); 1051 offsetof(struct bpf_array, map.max_entries));
1052 /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ 1052 /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
1053 EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, 1053 EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1054 REG_W1, 0, 0xa); 1054 REG_W1, 0, 0xa);
1055 1055
1056 /* 1056 /*
@@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1076 * goto out; 1076 * goto out;
1077 */ 1077 */
1078 1078
1079 /* sllg %r1,%b3,3: %r1 = index * 8 */ 1079 /* llgfr %r1,%b3: %r1 = (u32) index */
1080 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); 1080 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1081 /* sllg %r1,%r1,3: %r1 *= 8 */
1082 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1081 /* lg %r1,prog(%b2,%r1) */ 1083 /* lg %r1,prog(%b2,%r1) */
1082 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, 1084 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1083 REG_1, offsetof(struct bpf_array, ptrs)); 1085 REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
index 8574338bf23b..9991ec2371e4 100644
--- a/arch/um/include/shared/timer-internal.h
+++ b/arch/um/include/shared/timer-internal.h
@@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns)
34 time_travel_time = ns; 34 time_travel_time = ns;
35} 35}
36 36
37static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 37static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
38 unsigned long long expiry)
39{ 38{
40 time_travel_timer_mode = mode; 39 time_travel_timer_mode = mode;
40}
41
42static inline void time_travel_set_timer_expiry(unsigned long long expiry)
43{
41 time_travel_timer_expiry = expiry; 44 time_travel_timer_expiry = expiry;
42} 45}
43#else 46#else
@@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns)
50{ 53{
51} 54}
52 55
53static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 56static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
54 unsigned long long expiry) 57{
58}
59
60static inline void time_travel_set_timer_expiry(unsigned long long expiry)
55{ 61{
56} 62}
57 63
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 67c0d1a860e9..6bede7888fc2 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration)
213 if (time_travel_timer_mode != TT_TMR_DISABLED || 213 if (time_travel_timer_mode != TT_TMR_DISABLED ||
214 time_travel_timer_expiry < next) { 214 time_travel_timer_expiry < next) {
215 if (time_travel_timer_mode == TT_TMR_ONESHOT) 215 if (time_travel_timer_mode == TT_TMR_ONESHOT)
216 time_travel_set_timer(TT_TMR_DISABLED, 0); 216 time_travel_set_timer_mode(TT_TMR_DISABLED);
217 /* 217 /*
218 * time_travel_time will be adjusted in the timer 218 * time_travel_time will be adjusted in the timer
219 * IRQ handler so it works even when the signal 219 * IRQ handler so it works even when the signal
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 6a051b078359..234757233355 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
50static int itimer_shutdown(struct clock_event_device *evt) 50static int itimer_shutdown(struct clock_event_device *evt)
51{ 51{
52 if (time_travel_mode != TT_MODE_OFF) 52 if (time_travel_mode != TT_MODE_OFF)
53 time_travel_set_timer(TT_TMR_DISABLED, 0); 53 time_travel_set_timer_mode(TT_TMR_DISABLED);
54 54
55 if (time_travel_mode != TT_MODE_INFCPU) 55 if (time_travel_mode != TT_MODE_INFCPU)
56 os_timer_disable(); 56 os_timer_disable();
@@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt)
62{ 62{
63 unsigned long long interval = NSEC_PER_SEC / HZ; 63 unsigned long long interval = NSEC_PER_SEC / HZ;
64 64
65 if (time_travel_mode != TT_MODE_OFF) 65 if (time_travel_mode != TT_MODE_OFF) {
66 time_travel_set_timer(TT_TMR_PERIODIC, 66 time_travel_set_timer_mode(TT_TMR_PERIODIC);
67 time_travel_time + interval); 67 time_travel_set_timer_expiry(time_travel_time + interval);
68 }
68 69
69 if (time_travel_mode != TT_MODE_INFCPU) 70 if (time_travel_mode != TT_MODE_INFCPU)
70 os_timer_set_interval(interval); 71 os_timer_set_interval(interval);
@@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta,
77{ 78{
78 delta += 1; 79 delta += 1;
79 80
80 if (time_travel_mode != TT_MODE_OFF) 81 if (time_travel_mode != TT_MODE_OFF) {
81 time_travel_set_timer(TT_TMR_ONESHOT, 82 time_travel_set_timer_mode(TT_TMR_ONESHOT);
82 time_travel_time + delta); 83 time_travel_set_timer_expiry(time_travel_time + delta);
84 }
83 85
84 if (time_travel_mode != TT_MODE_INFCPU) 86 if (time_travel_mode != TT_MODE_INFCPU)
85 return os_timer_one_shot(delta); 87 return os_timer_one_shot(delta);
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 5f2d03067ae5..2faddeb0398a 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -72,6 +72,8 @@ static unsigned long find_trampoline_placement(void)
72 72
73 /* Find the first usable memory region under bios_start. */ 73 /* Find the first usable memory region under bios_start. */
74 for (i = boot_params->e820_entries - 1; i >= 0; i--) { 74 for (i = boot_params->e820_entries - 1; i >= 0; i--) {
75 unsigned long new;
76
75 entry = &boot_params->e820_table[i]; 77 entry = &boot_params->e820_table[i];
76 78
77 /* Skip all entries above bios_start. */ 79 /* Skip all entries above bios_start. */
@@ -84,15 +86,20 @@ static unsigned long find_trampoline_placement(void)
84 86
85 /* Adjust bios_start to the end of the entry if needed. */ 87 /* Adjust bios_start to the end of the entry if needed. */
86 if (bios_start > entry->addr + entry->size) 88 if (bios_start > entry->addr + entry->size)
87 bios_start = entry->addr + entry->size; 89 new = entry->addr + entry->size;
88 90
89 /* Keep bios_start page-aligned. */ 91 /* Keep bios_start page-aligned. */
90 bios_start = round_down(bios_start, PAGE_SIZE); 92 new = round_down(new, PAGE_SIZE);
91 93
92 /* Skip the entry if it's too small. */ 94 /* Skip the entry if it's too small. */
93 if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) 95 if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
94 continue; 96 continue;
95 97
98 /* Protect against underflow. */
99 if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
100 break;
101
102 bios_start = new;
96 break; 103 break;
97 } 104 }
98 105
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81b005e4c7d9..325959d19d9a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1236 * Add a single event to the PMU. 1236 * Add a single event to the PMU.
1237 * 1237 *
1238 * The event is added to the group of enabled events 1238 * The event is added to the group of enabled events
1239 * but only if it can be scehduled with existing events. 1239 * but only if it can be scheduled with existing events.
1240 */ 1240 */
1241static int x86_pmu_add(struct perf_event *event, int flags) 1241static int x86_pmu_add(struct perf_event *event, int flags)
1242{ 1242{
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index f5e90a849bca..9e5f3c722c33 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -59,7 +59,6 @@ static void sanitize_boot_params(struct boot_params *boot_params)
59 BOOT_PARAM_PRESERVE(apm_bios_info), 59 BOOT_PARAM_PRESERVE(apm_bios_info),
60 BOOT_PARAM_PRESERVE(tboot_addr), 60 BOOT_PARAM_PRESERVE(tboot_addr),
61 BOOT_PARAM_PRESERVE(ist_info), 61 BOOT_PARAM_PRESERVE(ist_info),
62 BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
63 BOOT_PARAM_PRESERVE(hd0_info), 62 BOOT_PARAM_PRESERVE(hd0_info),
64 BOOT_PARAM_PRESERVE(hd1_info), 63 BOOT_PARAM_PRESERVE(hd1_info),
65 BOOT_PARAM_PRESERVE(sys_desc_table), 64 BOOT_PARAM_PRESERVE(sys_desc_table),
@@ -71,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
71 BOOT_PARAM_PRESERVE(eddbuf_entries), 70 BOOT_PARAM_PRESERVE(eddbuf_entries),
72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), 71 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
73 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), 72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
73 BOOT_PARAM_PRESERVE(hdr),
74 BOOT_PARAM_PRESERVE(e820_table), 74 BOOT_PARAM_PRESERVE(e820_table),
75 BOOT_PARAM_PRESERVE(eddbuf), 75 BOOT_PARAM_PRESERVE(eddbuf),
76 }; 76 };
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0278aa66ef62..fe7c205233f1 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -11,6 +11,21 @@
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
13 * that group keep the CPUID for the variants sorted by model number. 13 * that group keep the CPUID for the variants sorted by model number.
14 *
15 * The defined symbol names have the following form:
16 * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
17 * where:
18 * OPTFAMILY Describes the family of CPUs that this belongs to. Default
19 * is assumed to be "_CORE" (and should be omitted). Other values
20 * currently in use are _ATOM and _XEON_PHI
21 * MICROARCH Is the code name for the micro-architecture for this core.
22 * N.B. Not the platform name.
23 * OPTDIFF If needed, a short string to differentiate by market segment.
24 * Exact strings here will vary over time. _DESKTOP, _MOBILE, and
25 * _X (short for Xeon server) should be used when they are
26 * appropriate.
27 *
28 * The #define line may optionally include a comment including platform names.
14 */ 29 */
15 30
16#define INTEL_FAM6_CORE_YONAH 0x0E 31#define INTEL_FAM6_CORE_YONAH 0x0E
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6b4fc2788078..271d837d69a8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -381,6 +381,7 @@
381#define MSR_AMD64_PATCH_LEVEL 0x0000008b 381#define MSR_AMD64_PATCH_LEVEL 0x0000008b
382#define MSR_AMD64_TSC_RATIO 0xc0000104 382#define MSR_AMD64_TSC_RATIO 0xc0000104
383#define MSR_AMD64_NB_CFG 0xc001001f 383#define MSR_AMD64_NB_CFG 0xc001001f
384#define MSR_AMD64_CPUID_FN_1 0xc0011004
384#define MSR_AMD64_PATCH_LOADER 0xc0010020 385#define MSR_AMD64_PATCH_LOADER 0xc0010020
385#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 386#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
386#define MSR_AMD64_OSVW_STATUS 0xc0010141 387#define MSR_AMD64_OSVW_STATUS 0xc0010141
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 109f974f9835..80bc209c0708 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -192,7 +192,7 @@
192 " lfence;\n" \ 192 " lfence;\n" \
193 " jmp 902b;\n" \ 193 " jmp 902b;\n" \
194 " .align 16\n" \ 194 " .align 16\n" \
195 "903: addl $4, %%esp;\n" \ 195 "903: lea 4(%%esp), %%esp;\n" \
196 " pushl %[thunk_target];\n" \ 196 " pushl %[thunk_target];\n" \
197 " ret;\n" \ 197 " ret;\n" \
198 " .align 16\n" \ 198 " .align 16\n" \
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f5291362da1a..aa5495d0f478 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; 722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
723 723
724/* 724/*
725 * Temporary interrupt handler. 725 * Temporary interrupt handler and polled calibration function.
726 */ 726 */
727static void __init lapic_cal_handler(struct clock_event_device *dev) 727static void __init lapic_cal_handler(struct clock_event_device *dev)
728{ 728{
@@ -851,7 +851,8 @@ bool __init apic_needs_pit(void)
851static int __init calibrate_APIC_clock(void) 851static int __init calibrate_APIC_clock(void)
852{ 852{
853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
854 void (*real_handler)(struct clock_event_device *dev); 854 u64 tsc_perj = 0, tsc_start = 0;
855 unsigned long jif_start;
855 unsigned long deltaj; 856 unsigned long deltaj;
856 long delta, deltatsc; 857 long delta, deltatsc;
857 int pm_referenced = 0; 858 int pm_referenced = 0;
@@ -878,28 +879,64 @@ static int __init calibrate_APIC_clock(void)
878 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" 879 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
879 "calibrating APIC timer ...\n"); 880 "calibrating APIC timer ...\n");
880 881
882 /*
883 * There are platforms w/o global clockevent devices. Instead of
884 * making the calibration conditional on that, use a polling based
885 * approach everywhere.
886 */
881 local_irq_disable(); 887 local_irq_disable();
882 888
883 /* Replace the global interrupt handler */
884 real_handler = global_clock_event->event_handler;
885 global_clock_event->event_handler = lapic_cal_handler;
886
887 /* 889 /*
888 * Setup the APIC counter to maximum. There is no way the lapic 890 * Setup the APIC counter to maximum. There is no way the lapic
889 * can underflow in the 100ms detection time frame 891 * can underflow in the 100ms detection time frame
890 */ 892 */
891 __setup_APIC_LVTT(0xffffffff, 0, 0); 893 __setup_APIC_LVTT(0xffffffff, 0, 0);
892 894
893 /* Let the interrupts run */ 895 /*
896 * Methods to terminate the calibration loop:
897 * 1) Global clockevent if available (jiffies)
898 * 2) TSC if available and frequency is known
899 */
900 jif_start = READ_ONCE(jiffies);
901
902 if (tsc_khz) {
903 tsc_start = rdtsc();
904 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
905 }
906
907 /*
908 * Enable interrupts so the tick can fire, if a global
909 * clockevent device is available
910 */
894 local_irq_enable(); 911 local_irq_enable();
895 912
896 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) 913 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
897 cpu_relax(); 914 /* Wait for a tick to elapse */
915 while (1) {
916 if (tsc_khz) {
917 u64 tsc_now = rdtsc();
918 if ((tsc_now - tsc_start) >= tsc_perj) {
919 tsc_start += tsc_perj;
920 break;
921 }
922 } else {
923 unsigned long jif_now = READ_ONCE(jiffies);
898 924
899 local_irq_disable(); 925 if (time_after(jif_now, jif_start)) {
926 jif_start = jif_now;
927 break;
928 }
929 }
930 cpu_relax();
931 }
900 932
901 /* Restore the real event handler */ 933 /* Invoke the calibration routine */
902 global_clock_event->event_handler = real_handler; 934 local_irq_disable();
935 lapic_cal_handler(NULL);
936 local_irq_enable();
937 }
938
939 local_irq_disable();
903 940
904 /* Build delta t1-t2 as apic timer counts down */ 941 /* Build delta t1-t2 as apic timer counts down */
905 delta = lapic_cal_t1 - lapic_cal_t2; 942 delta = lapic_cal_t1 - lapic_cal_t2;
@@ -943,10 +980,11 @@ static int __init calibrate_APIC_clock(void)
943 levt->features &= ~CLOCK_EVT_FEAT_DUMMY; 980 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
944 981
945 /* 982 /*
946 * PM timer calibration failed or not turned on 983 * PM timer calibration failed or not turned on so lets try APIC
947 * so lets try APIC timer based calibration 984 * timer based calibration, if a global clockevent device is
985 * available.
948 */ 986 */
949 if (!pm_referenced) { 987 if (!pm_referenced && global_clock_event) {
950 apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); 988 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
951 989
952 /* 990 /*
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8d4e50428b68..68c363c341bf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
804 msr_set_bit(MSR_AMD64_DE_CFG, 31); 804 msr_set_bit(MSR_AMD64_DE_CFG, 31);
805} 805}
806 806
807static bool rdrand_force;
808
809static int __init rdrand_cmdline(char *str)
810{
811 if (!str)
812 return -EINVAL;
813
814 if (!strcmp(str, "force"))
815 rdrand_force = true;
816 else
817 return -EINVAL;
818
819 return 0;
820}
821early_param("rdrand", rdrand_cmdline);
822
823static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
824{
825 /*
826 * Saving of the MSR used to hide the RDRAND support during
827 * suspend/resume is done by arch/x86/power/cpu.c, which is
828 * dependent on CONFIG_PM_SLEEP.
829 */
830 if (!IS_ENABLED(CONFIG_PM_SLEEP))
831 return;
832
833 /*
834 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
835 * RDRAND support using the CPUID function directly.
836 */
837 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
838 return;
839
840 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
841
842 /*
843 * Verify that the CPUID change has occurred in case the kernel is
844 * running virtualized and the hypervisor doesn't support the MSR.
845 */
846 if (cpuid_ecx(1) & BIT(30)) {
847 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
848 return;
849 }
850
851 clear_cpu_cap(c, X86_FEATURE_RDRAND);
852 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
853}
854
855static void init_amd_jg(struct cpuinfo_x86 *c)
856{
857 /*
858 * Some BIOS implementations do not restore proper RDRAND support
859 * across suspend and resume. Check on whether to hide the RDRAND
860 * instruction support via CPUID.
861 */
862 clear_rdrand_cpuid_bit(c);
863}
864
807static void init_amd_bd(struct cpuinfo_x86 *c) 865static void init_amd_bd(struct cpuinfo_x86 *c)
808{ 866{
809 u64 value; 867 u64 value;
@@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
818 wrmsrl_safe(MSR_F15H_IC_CFG, value); 876 wrmsrl_safe(MSR_F15H_IC_CFG, value);
819 } 877 }
820 } 878 }
879
880 /*
881 * Some BIOS implementations do not restore proper RDRAND support
882 * across suspend and resume. Check on whether to hide the RDRAND
883 * instruction support via CPUID.
884 */
885 clear_rdrand_cpuid_bit(c);
821} 886}
822 887
823static void init_amd_zn(struct cpuinfo_x86 *c) 888static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
860 case 0x10: init_amd_gh(c); break; 925 case 0x10: init_amd_gh(c); break;
861 case 0x12: init_amd_ln(c); break; 926 case 0x12: init_amd_ln(c); break;
862 case 0x15: init_amd_bd(c); break; 927 case 0x15: init_amd_bd(c); break;
928 case 0x16: init_amd_jg(c); break;
863 case 0x17: init_amd_zn(c); break; 929 case 0x17: init_amd_zn(c); break;
864 } 930 }
865 931
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 685d17c11461..e904ff06a83d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) 216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
217 new->phys_map[xapic_id] = apic; 217 new->phys_map[xapic_id] = apic;
218 218
219 if (!kvm_apic_sw_enabled(apic))
220 continue;
221
219 ldr = kvm_lapic_get_reg(apic, APIC_LDR); 222 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
220 223
221 if (apic_x2apic_mode(apic)) { 224 if (apic_x2apic_mode(apic)) {
@@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
258 static_key_slow_dec_deferred(&apic_sw_disabled); 261 static_key_slow_dec_deferred(&apic_sw_disabled);
259 else 262 else
260 static_key_slow_inc(&apic_sw_disabled.key); 263 static_key_slow_inc(&apic_sw_disabled.key);
264
265 recalculate_apic_map(apic->vcpu->kvm);
261 } 266 }
262} 267}
263 268
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24843cf49579..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5653 struct kvm_memory_slot *slot, 5653 struct kvm_memory_slot *slot,
5654 struct kvm_page_track_notifier_node *node) 5654 struct kvm_page_track_notifier_node *node)
5655{ 5655{
5656 struct kvm_mmu_page *sp; 5656 kvm_mmu_zap_all(kvm);
5657 LIST_HEAD(invalid_list);
5658 unsigned long i;
5659 bool flush;
5660 gfn_t gfn;
5661
5662 spin_lock(&kvm->mmu_lock);
5663
5664 if (list_empty(&kvm->arch.active_mmu_pages))
5665 goto out_unlock;
5666
5667 flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
5668
5669 for (i = 0; i < slot->npages; i++) {
5670 gfn = slot->base_gfn + i;
5671
5672 for_each_valid_sp(kvm, sp, gfn) {
5673 if (sp->gfn != gfn)
5674 continue;
5675
5676 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5677 }
5678 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5679 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5680 flush = false;
5681 cond_resched_lock(&kvm->mmu_lock);
5682 }
5683 }
5684 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5685
5686out_unlock:
5687 spin_unlock(&kvm->mmu_lock);
5688} 5657}
5689 5658
5690void kvm_mmu_init_vm(struct kvm *kvm) 5659void kvm_mmu_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d685491fce4d..e3d3b2128f2b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1714 if (!entry) 1714 if (!entry)
1715 return -EINVAL; 1715 return -EINVAL;
1716 1716
1717 new_entry = READ_ONCE(*entry);
1718 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & 1717 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1719 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | 1718 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1720 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK); 1719 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 24b079e94bc2..c9ef6a7a4a1a 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -12,6 +12,7 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/tboot.h> 14#include <linux/tboot.h>
15#include <linux/dmi.h>
15 16
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17#include <asm/proto.h> 18#include <asm/proto.h>
@@ -23,7 +24,7 @@
23#include <asm/debugreg.h> 24#include <asm/debugreg.h>
24#include <asm/cpu.h> 25#include <asm/cpu.h>
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <linux/dmi.h> 27#include <asm/cpu_device_id.h>
27 28
28#ifdef CONFIG_X86_32 29#ifdef CONFIG_X86_32
29__visible unsigned long saved_context_ebx; 30__visible unsigned long saved_context_ebx;
@@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
397 398
398core_initcall(bsp_pm_check_init); 399core_initcall(bsp_pm_check_init);
399 400
400static int msr_init_context(const u32 *msr_id, const int total_num) 401static int msr_build_context(const u32 *msr_id, const int num)
401{ 402{
402 int i = 0; 403 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
403 struct saved_msr *msr_array; 404 struct saved_msr *msr_array;
405 int total_num;
406 int i, j;
404 407
405 if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { 408 total_num = saved_msrs->num + num;
406 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
407 return -EINVAL;
408 }
409 409
410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); 410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
411 if (!msr_array) { 411 if (!msr_array) {
@@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
413 return -ENOMEM; 413 return -ENOMEM;
414 } 414 }
415 415
416 for (i = 0; i < total_num; i++) { 416 if (saved_msrs->array) {
417 msr_array[i].info.msr_no = msr_id[i]; 417 /*
418 * Multiple callbacks can invoke this function, so copy any
419 * MSR save requests from previous invocations.
420 */
421 memcpy(msr_array, saved_msrs->array,
422 sizeof(struct saved_msr) * saved_msrs->num);
423
424 kfree(saved_msrs->array);
425 }
426
427 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
428 msr_array[i].info.msr_no = msr_id[j];
418 msr_array[i].valid = false; 429 msr_array[i].valid = false;
419 msr_array[i].info.reg.q = 0; 430 msr_array[i].info.reg.q = 0;
420 } 431 }
421 saved_context.saved_msrs.num = total_num; 432 saved_msrs->num = total_num;
422 saved_context.saved_msrs.array = msr_array; 433 saved_msrs->array = msr_array;
423 434
424 return 0; 435 return 0;
425} 436}
426 437
427/* 438/*
428 * The following section is a quirk framework for problematic BIOSen: 439 * The following sections are a quirk framework for problematic BIOSen:
429 * Sometimes MSRs are modified by the BIOSen after suspended to 440 * Sometimes MSRs are modified by the BIOSen after suspended to
430 * RAM, this might cause unexpected behavior after wakeup. 441 * RAM, this might cause unexpected behavior after wakeup.
431 * Thus we save/restore these specified MSRs across suspend/resume 442 * Thus we save/restore these specified MSRs across suspend/resume
@@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
440 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; 451 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
441 452
442 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); 453 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
443 return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); 454 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
444} 455}
445 456
446static const struct dmi_system_id msr_save_dmi_table[] = { 457static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
455 {} 466 {}
456}; 467};
457 468
469static int msr_save_cpuid_features(const struct x86_cpu_id *c)
470{
471 u32 cpuid_msr_id[] = {
472 MSR_AMD64_CPUID_FN_1,
473 };
474
475 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
476 c->family);
477
478 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
479}
480
481static const struct x86_cpu_id msr_save_cpu_table[] = {
482 {
483 .vendor = X86_VENDOR_AMD,
484 .family = 0x15,
485 .model = X86_MODEL_ANY,
486 .feature = X86_FEATURE_ANY,
487 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
488 },
489 {
490 .vendor = X86_VENDOR_AMD,
491 .family = 0x16,
492 .model = X86_MODEL_ANY,
493 .feature = X86_FEATURE_ANY,
494 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
495 },
496 {}
497};
498
499typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
500static int pm_cpu_check(const struct x86_cpu_id *c)
501{
502 const struct x86_cpu_id *m;
503 int ret = 0;
504
505 m = x86_match_cpu(msr_save_cpu_table);
506 if (m) {
507 pm_cpu_match_t fn;
508
509 fn = (pm_cpu_match_t)m->driver_data;
510 ret = fn(m);
511 }
512
513 return ret;
514}
515
458static int pm_check_save_msr(void) 516static int pm_check_save_msr(void)
459{ 517{
460 dmi_check_system(msr_save_dmi_table); 518 dmi_check_system(msr_save_dmi_table);
519 pm_cpu_check(msr_save_cpu_table);
520
461 return 0; 521 return 0;
462} 522}
463 523
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa577437..8c37294f1d1e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
200 make the card work). 200 make the card work).
201 201
202config ATM_NICSTAR_USE_IDT77105 202config ATM_NICSTAR_USE_IDT77105
203 bool "Use IDT77015 PHY driver (25Mbps)" 203 bool "Use IDT77105 PHY driver (25Mbps)"
204 depends on ATM_NICSTAR 204 depends on ATM_NICSTAR
205 help 205 help
206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In 206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 9c0bb771751d..a2fcde582e2a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -74,7 +74,7 @@ struct ht16k33_priv {
74 struct ht16k33_fbdev fbdev; 74 struct ht16k33_fbdev fbdev;
75}; 75};
76 76
77static struct fb_fix_screeninfo ht16k33_fb_fix = { 77static const struct fb_fix_screeninfo ht16k33_fb_fix = {
78 .id = DRIVER_NAME, 78 .id = DRIVER_NAME,
79 .type = FB_TYPE_PACKED_PIXELS, 79 .type = FB_TYPE_PACKED_PIXELS,
80 .visual = FB_VISUAL_MONO10, 80 .visual = FB_VISUAL_MONO10,
@@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
85 .accel = FB_ACCEL_NONE, 85 .accel = FB_ACCEL_NONE,
86}; 86};
87 87
88static struct fb_var_screeninfo ht16k33_fb_var = { 88static const struct fb_var_screeninfo ht16k33_fb_var = {
89 .xres = HT16K33_MATRIX_LED_MAX_ROWS, 89 .xres = HT16K33_MATRIX_LED_MAX_ROWS,
90 .yres = HT16K33_MATRIX_LED_MAX_COLS, 90 .yres = HT16K33_MATRIX_LED_MAX_COLS,
91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, 91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9bd4ddd12b25..5b248763a672 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
322 thi->name[0], 322 thi->name[0],
323 resource->name); 323 resource->name);
324 324
325 allow_kernel_signal(DRBD_SIGKILL);
326 allow_kernel_signal(SIGXCPU);
325restart: 327restart:
326 retval = thi->function(thi); 328 retval = thi->function(thi);
327 329
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c0990703ce54..1c46babeb093 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
324 return NULL; 324 return NULL;
325} 325}
326 326
327#ifdef CONFIG_OF
328static int of_parse_clkspec(const struct device_node *np, int index,
329 const char *name, struct of_phandle_args *out_args);
330static struct clk_hw *
331of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
332#else
333static inline int of_parse_clkspec(const struct device_node *np, int index,
334 const char *name,
335 struct of_phandle_args *out_args)
336{
337 return -ENOENT;
338}
339static inline struct clk_hw *
340of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
341{
342 return ERR_PTR(-ENOENT);
343}
344#endif
345
327/** 346/**
328 * clk_core_get - Find the clk_core parent of a clk 347 * clk_core_get - Find the clk_core parent of a clk
329 * @core: clk to find parent of 348 * @core: clk to find parent of
@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
355 * }; 374 * };
356 * 375 *
357 * Returns: -ENOENT when the provider can't be found or the clk doesn't 376 * Returns: -ENOENT when the provider can't be found or the clk doesn't
358 * exist in the provider. -EINVAL when the name can't be found. NULL when the 377 * exist in the provider or the name can't be found in the DT node or
359 * provider knows about the clk but it isn't provided on this system. 378 * in a clkdev lookup. NULL when the provider knows about the clk but it
379 * isn't provided on this system.
360 * A valid clk_core pointer when the clk can be found in the provider. 380 * A valid clk_core pointer when the clk can be found in the provider.
361 */ 381 */
362static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 382static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
367 struct device *dev = core->dev; 387 struct device *dev = core->dev;
368 const char *dev_id = dev ? dev_name(dev) : NULL; 388 const char *dev_id = dev ? dev_name(dev) : NULL;
369 struct device_node *np = core->of_node; 389 struct device_node *np = core->of_node;
390 struct of_phandle_args clkspec;
370 391
371 if (np && (name || index >= 0)) 392 if (np && (name || index >= 0) &&
372 hw = of_clk_get_hw(np, index, name); 393 !of_parse_clkspec(np, index, name, &clkspec)) {
373 394 hw = of_clk_get_hw_from_clkspec(&clkspec);
374 /* 395 of_node_put(clkspec.np);
375 * If the DT search above couldn't find the provider or the provider 396 } else if (name) {
376 * didn't know about this clk, fallback to looking up via clkdev based 397 /*
377 * clk_lookups 398 * If the DT search above couldn't find the provider fallback to
378 */ 399 * looking up via clkdev based clk_lookups.
379 if (PTR_ERR(hw) == -ENOENT && name) 400 */
380 hw = clk_find_hw(dev_id, name); 401 hw = clk_find_hw(dev_id, name);
402 }
381 403
382 if (IS_ERR(hw)) 404 if (IS_ERR(hw))
383 return ERR_CAST(hw); 405 return ERR_CAST(hw);
@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
401 parent = ERR_PTR(-EPROBE_DEFER); 423 parent = ERR_PTR(-EPROBE_DEFER);
402 } else { 424 } else {
403 parent = clk_core_get(core, index); 425 parent = clk_core_get(core, index);
404 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) 426 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
405 parent = clk_core_lookup(entry->name); 427 parent = clk_core_lookup(entry->name);
406 } 428 }
407 429
@@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
1632 break; 1654 break;
1633 1655
1634 /* Fallback to comparing globally unique names */ 1656 /* Fallback to comparing globally unique names */
1635 if (!strcmp(parent->name, core->parents[i].name)) 1657 if (core->parents[i].name &&
1658 !strcmp(parent->name, core->parents[i].name))
1636 break; 1659 break;
1637 } 1660 }
1638 1661
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 91db7894125d..65c82d922b05 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -14,7 +14,7 @@
14#include "clk-exynos5-subcmu.h" 14#include "clk-exynos5-subcmu.h"
15 15
16static struct samsung_clk_provider *ctx; 16static struct samsung_clk_provider *ctx;
17static const struct exynos5_subcmu_info *cmu; 17static const struct exynos5_subcmu_info **cmu;
18static int nr_cmus; 18static int nr_cmus;
19 19
20static void exynos5_subcmu_clk_save(void __iomem *base, 20static void exynos5_subcmu_clk_save(void __iomem *base,
@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
56 * when OF-core populates all device-tree nodes. 56 * when OF-core populates all device-tree nodes.
57 */ 57 */
58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, 58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
59 const struct exynos5_subcmu_info *_cmu) 59 const struct exynos5_subcmu_info **_cmu)
60{ 60{
61 ctx = _ctx; 61 ctx = _ctx;
62 cmu = _cmu; 62 cmu = _cmu;
63 nr_cmus = _nr_cmus; 63 nr_cmus = _nr_cmus;
64 64
65 for (; _nr_cmus--; _cmu++) { 65 for (; _nr_cmus--; _cmu++) {
66 exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, 66 exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
67 _cmu->nr_gate_clks); 67 (*_cmu)->nr_gate_clks);
68 exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, 68 exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
69 _cmu->nr_suspend_regs); 69 (*_cmu)->nr_suspend_regs);
70 } 70 }
71} 71}
72 72
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
163 if (of_property_read_string(np, "label", &name) < 0) 163 if (of_property_read_string(np, "label", &name) < 0)
164 continue; 164 continue;
165 for (i = 0; i < nr_cmus; i++) 165 for (i = 0; i < nr_cmus; i++)
166 if (strcmp(cmu[i].pd_name, name) == 0) 166 if (strcmp(cmu[i]->pd_name, name) == 0)
167 exynos5_clk_register_subcmu(&pdev->dev, 167 exynos5_clk_register_subcmu(&pdev->dev,
168 &cmu[i], np); 168 cmu[i], np);
169 } 169 }
170 return 0; 170 return 0;
171} 171}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
index 755ee8aaa3de..9ae5356f25aa 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
21}; 21};
22 22
23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, 23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
24 const struct exynos5_subcmu_info *cmu); 24 const struct exynos5_subcmu_info **cmu);
25 25
26#endif 26#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f2b896881768..931c70a4da19 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
681 .pd_name = "DISP1", 681 .pd_name = "DISP1",
682}; 682};
683 683
684static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
685 &exynos5250_disp_subcmu,
686};
687
684static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { 688static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
685 /* sorted in descending order */ 689 /* sorted in descending order */
686 /* PLL_36XX_RATE(rate, m, p, s, k) */ 690 /* PLL_36XX_RATE(rate, m, p, s, k) */
@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
843 847
844 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, 848 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
845 ARRAY_SIZE(exynos5250_clk_regs)); 849 ARRAY_SIZE(exynos5250_clk_regs));
846 exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); 850 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
851 exynos5250_subcmus);
847 852
848 samsung_clk_of_add_provider(np, ctx); 853 samsung_clk_of_add_provider(np, ctx);
849 854
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 01bca5a498b2..7670cc596c74 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
534 GATE_BUS_TOP, 24, 0, 0), 534 GATE_BUS_TOP, 24, 0, 0),
535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), 536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
537 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
538 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
539}; 537};
540 538
541static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 539static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
577 575
578static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { 576static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
579 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), 577 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
578 /* Maudio Block */
580 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 579 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
581 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), 580 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
581 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
582 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
583 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
584 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
582}; 585};
583 586
584static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { 587static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
@@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
890 /* GSCL Block */ 893 /* GSCL Block */
891 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), 894 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
892 895
893 /* MSCL Block */
894 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
895
896 /* PSGEN */ 896 /* PSGEN */
897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), 897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), 898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
@@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", 1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), 1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
1019 1019
1020 /* Maudio Block */
1021 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1022 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1023 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1024 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1025
1026 /* FSYS Block */ 1020 /* FSYS Block */
1027 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), 1021 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
1028 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), 1022 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
@@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1162 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", 1156 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
1163 GATE_IP_GSCL1, 17, 0, 0), 1157 GATE_IP_GSCL1, 17, 0, 0),
1164 1158
1165 /* MSCL Block */
1166 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1167 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1168 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1169 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1170 GATE_IP_MSCL, 8, 0, 0),
1171 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1172 GATE_IP_MSCL, 9, 0, 0),
1173 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1174 GATE_IP_MSCL, 10, 0, 0),
1175
1176 /* ISP */ 1159 /* ISP */
1177 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", 1160 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
1178 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), 1161 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
1281 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ 1264 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
1282}; 1265};
1283 1266
1284static const struct exynos5_subcmu_info exynos5x_subcmus[] = { 1267static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
1285 { 1268 /* MSCL Block */
1286 .div_clks = exynos5x_disp_div_clks, 1269 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1287 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), 1270 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1288 .gate_clks = exynos5x_disp_gate_clks, 1271 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1289 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), 1272 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1290 .suspend_regs = exynos5x_disp_suspend_regs, 1273 GATE_IP_MSCL, 8, 0, 0),
1291 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), 1274 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1292 .pd_name = "DISP", 1275 GATE_IP_MSCL, 9, 0, 0),
1293 }, { 1276 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1294 .div_clks = exynos5x_gsc_div_clks, 1277 GATE_IP_MSCL, 10, 0, 0),
1295 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), 1278};
1296 .gate_clks = exynos5x_gsc_gate_clks, 1279
1297 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), 1280static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
1298 .suspend_regs = exynos5x_gsc_suspend_regs, 1281 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
1299 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), 1282};
1300 .pd_name = "GSC", 1283
1301 }, { 1284static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
1302 .div_clks = exynos5x_mfc_div_clks, 1285 { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
1303 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), 1286 { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
1304 .gate_clks = exynos5x_mfc_gate_clks, 1287 { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
1305 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), 1288};
1306 .suspend_regs = exynos5x_mfc_suspend_regs, 1289
1307 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), 1290static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
1308 .pd_name = "MFC", 1291 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
1309 }, 1292 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
1293 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1294 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1295 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1296 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1297};
1298
1299static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
1300 { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
1301};
1302
1303static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
1304 .div_clks = exynos5x_disp_div_clks,
1305 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
1306 .gate_clks = exynos5x_disp_gate_clks,
1307 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
1308 .suspend_regs = exynos5x_disp_suspend_regs,
1309 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
1310 .pd_name = "DISP",
1311};
1312
1313static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
1314 .div_clks = exynos5x_gsc_div_clks,
1315 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
1316 .gate_clks = exynos5x_gsc_gate_clks,
1317 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
1318 .suspend_regs = exynos5x_gsc_suspend_regs,
1319 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
1320 .pd_name = "GSC",
1321};
1322
1323static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
1324 .div_clks = exynos5x_mfc_div_clks,
1325 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
1326 .gate_clks = exynos5x_mfc_gate_clks,
1327 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
1328 .suspend_regs = exynos5x_mfc_suspend_regs,
1329 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
1330 .pd_name = "MFC",
1331};
1332
1333static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
1334 .div_clks = exynos5x_mscl_div_clks,
1335 .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
1336 .gate_clks = exynos5x_mscl_gate_clks,
1337 .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
1338 .suspend_regs = exynos5x_mscl_suspend_regs,
1339 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
1340 .pd_name = "MSC",
1341};
1342
1343static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
1344 .gate_clks = exynos5800_mau_gate_clks,
1345 .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
1346 .suspend_regs = exynos5800_mau_suspend_regs,
1347 .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
1348 .pd_name = "MAU",
1349};
1350
1351static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1352 &exynos5x_disp_subcmu,
1353 &exynos5x_gsc_subcmu,
1354 &exynos5x_mfc_subcmu,
1355 &exynos5x_mscl_subcmu,
1356};
1357
1358static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
1359 &exynos5x_disp_subcmu,
1360 &exynos5x_gsc_subcmu,
1361 &exynos5x_mfc_subcmu,
1362 &exynos5x_mscl_subcmu,
1363 &exynos5800_mau_subcmu,
1310}; 1364};
1311 1365
1312static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { 1366static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
@@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
1539 samsung_clk_extended_sleep_init(reg_base, 1593 samsung_clk_extended_sleep_init(reg_base,
1540 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), 1594 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
1541 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); 1595 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
1542 if (soc == EXYNOS5800) 1596
1597 if (soc == EXYNOS5800) {
1543 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, 1598 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
1544 ARRAY_SIZE(exynos5800_clk_regs)); 1599 ARRAY_SIZE(exynos5800_clk_regs));
1545 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), 1600
1546 exynos5x_subcmus); 1601 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
1602 exynos5800_subcmus);
1603 } else {
1604 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
1605 exynos5x_subcmus);
1606 }
1547 1607
1548 samsung_clk_of_add_provider(np, ctx); 1608 samsung_clk_of_add_provider(np, ctx);
1549} 1609}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 5c50e723ecae..1a191eeeebba 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
38 if (socfpgaclk->fixed_div) { 38 if (socfpgaclk->fixed_div) {
39 div = socfpgaclk->fixed_div; 39 div = socfpgaclk->fixed_div;
40 } else { 40 } else {
41 if (!socfpgaclk->bypass_reg) 41 if (socfpgaclk->hw.reg)
42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); 42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
43 } 43 }
44 44
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1163 switch (chan->feature & FSL_DMA_IP_MASK) { 1163 switch (chan->feature & FSL_DMA_IP_MASK) {
1164 case FSL_DMA_IP_85XX: 1164 case FSL_DMA_IP_85XX:
1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166 /* Fall through */
1166 case FSL_DMA_IP_83XX: 1167 case FSL_DMA_IP_83XX:
1167 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1168 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1168 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1169 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 567fb98c0892..9762dd6d99fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
363 /* Special handling for SPI GPIOs if used */ 363 /* Special handling for SPI GPIOs if used */
364 if (IS_ERR(desc)) 364 if (IS_ERR(desc))
365 desc = of_find_spi_gpio(dev, con_id, &of_flags); 365 desc = of_find_spi_gpio(dev, con_id, &of_flags);
366 if (IS_ERR(desc)) { 366 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
367 /* This quirk looks up flags and all */ 367 /* This quirk looks up flags and all */
368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); 368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
369 if (!IS_ERR(desc)) 369 if (!IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f497003f119c..cca749010cd0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; 1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1094 lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; 1094 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
1095 GPIOLINE_FLAG_IS_OUT);
1095 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 1096 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
1096 lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; 1097 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
1098 GPIOLINE_FLAG_IS_OUT);
1097 1099
1098 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) 1100 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
1099 return -EFAULT; 1101 return -EFAULT;
@@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1371 if (status) 1373 if (status)
1372 goto err_remove_from_list; 1374 goto err_remove_from_list;
1373 1375
1374 status = gpiochip_irqchip_init_valid_mask(chip);
1375 if (status)
1376 goto err_remove_from_list;
1377
1378 status = gpiochip_alloc_valid_mask(chip); 1376 status = gpiochip_alloc_valid_mask(chip);
1379 if (status) 1377 if (status)
1380 goto err_remove_irqchip_mask; 1378 goto err_remove_from_list;
1381
1382 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1383 if (status)
1384 goto err_free_gpiochip_mask;
1385 1379
1386 status = of_gpiochip_add(chip); 1380 status = of_gpiochip_add(chip);
1387 if (status) 1381 if (status)
1388 goto err_remove_chip; 1382 goto err_free_gpiochip_mask;
1389 1383
1390 status = gpiochip_init_valid_mask(chip); 1384 status = gpiochip_init_valid_mask(chip);
1391 if (status) 1385 if (status)
@@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1411 1405
1412 machine_gpiochip_add(chip); 1406 machine_gpiochip_add(chip);
1413 1407
1408 status = gpiochip_irqchip_init_valid_mask(chip);
1409 if (status)
1410 goto err_remove_acpi_chip;
1411
1412 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1413 if (status)
1414 goto err_remove_irqchip_mask;
1415
1414 /* 1416 /*
1415 * By first adding the chardev, and then adding the device, 1417 * By first adding the chardev, and then adding the device,
1416 * we get a device node entry in sysfs under 1418 * we get a device node entry in sysfs under
@@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1422 if (gpiolib_initialized) { 1424 if (gpiolib_initialized) {
1423 status = gpiochip_setup_dev(gdev); 1425 status = gpiochip_setup_dev(gdev);
1424 if (status) 1426 if (status)
1425 goto err_remove_acpi_chip; 1427 goto err_remove_irqchip;
1426 } 1428 }
1427 return 0; 1429 return 0;
1428 1430
1431err_remove_irqchip:
1432 gpiochip_irqchip_remove(chip);
1433err_remove_irqchip_mask:
1434 gpiochip_irqchip_free_valid_mask(chip);
1429err_remove_acpi_chip: 1435err_remove_acpi_chip:
1430 acpi_gpiochip_remove(chip); 1436 acpi_gpiochip_remove(chip);
1431err_remove_of_chip: 1437err_remove_of_chip:
1432 gpiochip_free_hogs(chip); 1438 gpiochip_free_hogs(chip);
1433 of_gpiochip_remove(chip); 1439 of_gpiochip_remove(chip);
1434err_remove_chip:
1435 gpiochip_irqchip_remove(chip);
1436err_free_gpiochip_mask: 1440err_free_gpiochip_mask:
1437 gpiochip_free_valid_mask(chip); 1441 gpiochip_free_valid_mask(chip);
1438err_remove_irqchip_mask:
1439 gpiochip_irqchip_free_valid_mask(chip);
1440err_remove_from_list: 1442err_remove_from_list:
1441 spin_lock_irqsave(&gpio_lock, flags); 1443 spin_lock_irqsave(&gpio_lock, flags);
1442 list_del(&gdev->list); 1444 list_del(&gdev->list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e4094f842e7..8b26c970a3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1143 num_deps = chunk->length_dw * 4 / 1143 num_deps = chunk->length_dw * 4 /
1144 sizeof(struct drm_amdgpu_cs_chunk_sem); 1144 sizeof(struct drm_amdgpu_cs_chunk_sem);
1145 1145
1146 if (p->post_deps)
1147 return -EINVAL;
1148
1146 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1149 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1147 GFP_KERNEL); 1150 GFP_KERNEL);
1148 p->num_post_deps = 0; 1151 p->num_post_deps = 0;
@@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1166 1169
1167 1170
1168static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, 1171static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1169 struct amdgpu_cs_chunk 1172 struct amdgpu_cs_chunk *chunk)
1170 *chunk)
1171{ 1173{
1172 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1173 unsigned num_deps; 1175 unsigned num_deps;
@@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
1177 num_deps = chunk->length_dw * 4 / 1179 num_deps = chunk->length_dw * 4 /
1178 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1179 1181
1182 if (p->post_deps)
1183 return -EINVAL;
1184
1180 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1185 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1181 GFP_KERNEL); 1186 GFP_KERNEL);
1182 p->num_post_deps = 0; 1187 p->num_post_deps = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 04b8ac4432c7..4ea67f94cae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -604,6 +604,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
604 (adev->gfx.rlc_feature_version < 1) || 604 (adev->gfx.rlc_feature_version < 1) ||
605 !adev->gfx.rlc.is_rlc_v2_1) 605 !adev->gfx.rlc.is_rlc_v2_1)
606 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 606 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
607 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
608 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
609 AMD_PG_SUPPORT_CP |
610 AMD_PG_SUPPORT_RLC_SMU_HS;
607 break; 611 break;
608 default: 612 default:
609 break; 613 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..9922bce3fd89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
552 AMD_CG_SUPPORT_BIF_LS; 552 AMD_CG_SUPPORT_BIF_LS;
553 adev->pg_flags = AMD_PG_SUPPORT_VCN | 553 adev->pg_flags = AMD_PG_SUPPORT_VCN |
554 AMD_PG_SUPPORT_VCN_DPG | 554 AMD_PG_SUPPORT_VCN_DPG |
555 AMD_PG_SUPPORT_MMHUB |
556 AMD_PG_SUPPORT_ATHUB; 555 AMD_PG_SUPPORT_ATHUB;
557 adev->external_rev_id = adev->rev_id + 0x1; 556 adev->external_rev_id = adev->rev_id + 0x1;
558 break; 557 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..04fbf05d7176 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
992 992
993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
994 } 994 }
995
996 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
997 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
998 AMD_PG_SUPPORT_CP |
999 AMD_PG_SUPPORT_RLC_SMU_HS;
1000 break; 995 break;
1001 default: 996 default:
1002 /* FIXME: not supported yet */ 997 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..45be7a2132bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3131,13 +3131,25 @@ static enum dc_color_depth
3131convert_color_depth_from_display_info(const struct drm_connector *connector, 3131convert_color_depth_from_display_info(const struct drm_connector *connector,
3132 const struct drm_connector_state *state) 3132 const struct drm_connector_state *state)
3133{ 3133{
3134 uint32_t bpc = connector->display_info.bpc; 3134 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3135
3136 /* Assume 8 bpc by default if no bpc is specified. */
3137 bpc = bpc ? bpc : 8;
3135 3138
3136 if (!state) 3139 if (!state)
3137 state = connector->state; 3140 state = connector->state;
3138 3141
3139 if (state) { 3142 if (state) {
3140 bpc = state->max_bpc; 3143 /*
3144 * Cap display bpc based on the user requested value.
3145 *
3146 * The value for state->max_bpc may not correctly updated
3147 * depending on when the connector gets added to the state
3148 * or if this was called outside of atomic check, so it
3149 * can't be used directly.
3150 */
3151 bpc = min(bpc, state->max_requested_bpc);
3152
3141 /* Round down to the nearest even number. */ 3153 /* Round down to the nearest even number. */
3142 bpc = bpc - (bpc & 1); 3154 bpc = bpc - (bpc & 1);
3143 } 3155 }
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a0f52c86d8c7..a78b2e295895 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -907,8 +907,6 @@ struct smu_funcs
907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) 907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
908#define smu_set_azalia_d3_pme(smu) \ 908#define smu_set_azalia_d3_pme(smu) \
909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) 909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
910#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
911 ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
912#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ 910#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
913 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) 911 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
914#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ 912#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 5fde5cf65b42..53097961bf2b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
326 struct amdgpu_device *adev = smu->adev; 326 struct amdgpu_device *adev = smu->adev;
327 const struct smc_firmware_header_v1_0 *hdr; 327 const struct smc_firmware_header_v1_0 *hdr;
328 int ret, index; 328 int ret, index;
329 uint32_t size; 329 uint32_t size = 0;
330 uint16_t atom_table_size;
330 uint8_t frev, crev; 331 uint8_t frev, crev;
331 void *table; 332 void *table;
332 uint16_t version_major, version_minor; 333 uint16_t version_major, version_minor;
@@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
354 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 355 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
355 powerplayinfo); 356 powerplayinfo);
356 357
357 ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, 358 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
358 (uint8_t **)&table); 359 (uint8_t **)&table);
359 if (ret) 360 if (ret)
360 return ret; 361 return ret;
362 size = atom_table_size;
361 } 363 }
362 364
363 if (!smu->smu_table.power_play_table) 365 if (!smu->smu_table.power_play_table)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..a0eabc134dd6 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
8#include <linux/iommu.h> 8#include <linux/iommu.h>
9#include <linux/of_device.h> 9#include <linux/of_device.h>
10#include <linux/of_graph.h> 10#include <linux/of_graph.h>
11#include <linux/of_reserved_mem.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#ifdef CONFIG_DEBUG_FS 14#ifdef CONFIG_DEBUG_FS
@@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
143 return mdev->irq; 144 return mdev->irq;
144 } 145 }
145 146
147 /* Get the optional framebuffer memory resource */
148 ret = of_reserved_mem_device_init(dev);
149 if (ret && ret != -ENODEV)
150 return ret;
151 ret = 0;
152
146 for_each_available_child_of_node(np, child) { 153 for_each_available_child_of_node(np, child) {
147 if (of_node_cmp(child->name, "pipeline") == 0) { 154 if (of_node_cmp(child->name, "pipeline") == 0) {
148 ret = komeda_parse_pipe_dt(mdev, child); 155 ret = komeda_parse_pipe_dt(mdev, child);
@@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
289 296
290 mdev->n_pipelines = 0; 297 mdev->n_pipelines = 0;
291 298
299 of_reserved_mem_device_release(dev);
300
292 if (funcs && funcs->cleanup) 301 if (funcs && funcs->cleanup)
293 funcs->cleanup(mdev); 302 funcs->cleanup(mdev);
294 303
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
35 return NULL; 35 return NULL;
36} 36}
37 37
38u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
39{
40 u32 bpp;
41
42 switch (info->format) {
43 case DRM_FORMAT_YUV420_8BIT:
44 bpp = 12;
45 break;
46 case DRM_FORMAT_YUV420_10BIT:
47 bpp = 15;
48 break;
49 default:
50 bpp = info->cpp[0] * 8;
51 break;
52 }
53
54 return bpp;
55}
56
38/* Two assumptions 57/* Two assumptions
39 * 1. RGB always has YTR 58 * 1. RGB always has YTR
40 * 2. Tiled RGB always has SC 59 * 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
97komeda_get_format_caps(struct komeda_format_caps_table *table, 97komeda_get_format_caps(struct komeda_format_caps_table *table,
98 u32 fourcc, u64 modifier); 98 u32 fourcc, u64 modifier);
99 99
100u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
101 u64 modifier);
102
100u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, 103u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
101 u32 layer_type, u32 *n_fmts); 104 u32 layer_type, u32 *n_fmts);
102 105
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
43 struct drm_framebuffer *fb = &kfb->base; 43 struct drm_framebuffer *fb = &kfb->base;
44 const struct drm_format_info *info = fb->format; 44 const struct drm_format_info *info = fb->format;
45 struct drm_gem_object *obj; 45 struct drm_gem_object *obj;
46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; 46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
47 u64 min_size; 47 u64 min_size;
48 48
49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); 49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, 88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
89 alignment_header); 89 alignment_header);
90 90
91 bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
91 kfb->afbc_size = kfb->offset_payload + n_blocks * 92 kfb->afbc_size = kfb->offset_payload + n_blocks *
92 ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, 93 ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
93 AFBC_SUPERBLK_ALIGNMENT); 94 AFBC_SUPERBLK_ALIGNMENT);
94 min_size = kfb->afbc_size + fb->offsets[0]; 95 min_size = kfb->afbc_size + fb->offsets[0];
95 if (min_size > obj->size) { 96 if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..d50e75f0b2bd 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -15,6 +15,7 @@
15#include <drm/drm_gem_framebuffer_helper.h> 15#include <drm/drm_gem_framebuffer_helper.h>
16#include <drm/drm_irq.h> 16#include <drm/drm_irq.h>
17#include <drm/drm_vblank.h> 17#include <drm/drm_vblank.h>
18#include <drm/drm_probe_helper.h>
18 19
19#include "komeda_dev.h" 20#include "komeda_dev.h"
20#include "komeda_framebuffer.h" 21#include "komeda_framebuffer.h"
@@ -315,6 +316,8 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
315 316
316 drm->irq_enabled = true; 317 drm->irq_enabled = true;
317 318
319 drm_kms_helper_poll_init(drm);
320
318 err = drm_dev_register(drm, 0); 321 err = drm_dev_register(drm, 0);
319 if (err) 322 if (err)
320 goto cleanup_mode_config; 323 goto cleanup_mode_config;
@@ -322,6 +325,7 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
322 return kms; 325 return kms;
323 326
324cleanup_mode_config: 327cleanup_mode_config:
328 drm_kms_helper_poll_fini(drm);
325 drm->irq_enabled = false; 329 drm->irq_enabled = false;
326 drm_mode_config_cleanup(drm); 330 drm_mode_config_cleanup(drm);
327 komeda_kms_cleanup_private_objs(kms); 331 komeda_kms_cleanup_private_objs(kms);
@@ -338,6 +342,7 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
338 drm->irq_enabled = false; 342 drm->irq_enabled = false;
339 mdev->funcs->disable_irq(mdev); 343 mdev->funcs->disable_irq(mdev);
340 drm_dev_unregister(drm); 344 drm_dev_unregister(drm);
345 drm_kms_helper_poll_fini(drm);
341 component_unbind_all(mdev->dev, drm); 346 component_unbind_all(mdev->dev, drm);
342 komeda_kms_cleanup_private_objs(kms); 347 komeda_kms_cleanup_private_objs(kms);
343 drm_mode_config_cleanup(drm); 348 drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..1cb1fa74cfbc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1465 else if (intel_crtc_has_dp_encoder(pipe_config)) 1465 else if (intel_crtc_has_dp_encoder(pipe_config))
1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1467 &pipe_config->dp_m_n); 1467 &pipe_config->dp_m_n);
1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
1469 dotclock = pipe_config->port_clock * 2 / 3; 1469 dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
1470 else 1470 else
1471 dotclock = pipe_config->port_clock; 1471 dotclock = pipe_config->port_clock;
1472 1472
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..f11979879e7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -829,7 +829,7 @@ struct intel_crtc_state {
829 829
830 /* 830 /*
831 * Frequence the dpll for the port should run at. Differs from the 831 * Frequence the dpll for the port should run at. Differs from the
832 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 832 * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
833 * already multiplied by pixel_multiplier. 833 * already multiplied by pixel_multiplier.
834 */ 834 */
835 int port_clock; 835 int port_clock;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..945bc20f1d33 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,6 +17,7 @@
17#include <linux/of_address.h> 17#include <linux/of_address.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/dma-mapping.h>
20 21
21#include "mtk_drm_crtc.h" 22#include "mtk_drm_crtc.h"
22#include "mtk_drm_ddp.h" 23#include "mtk_drm_ddp.h"
@@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
213 struct mtk_drm_private *private = drm->dev_private; 214 struct mtk_drm_private *private = drm->dev_private;
214 struct platform_device *pdev; 215 struct platform_device *pdev;
215 struct device_node *np; 216 struct device_node *np;
217 struct device *dma_dev;
216 int ret; 218 int ret;
217 219
218 if (!iommu_present(&platform_bus_type)) 220 if (!iommu_present(&platform_bus_type))
@@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
275 goto err_component_unbind; 277 goto err_component_unbind;
276 } 278 }
277 279
278 private->dma_dev = &pdev->dev; 280 dma_dev = &pdev->dev;
281 private->dma_dev = dma_dev;
282
283 /*
284 * Configure the DMA segment size to make sure we get contiguous IOVA
285 * when importing PRIME buffers.
286 */
287 if (!dma_dev->dma_parms) {
288 private->dma_parms_allocated = true;
289 dma_dev->dma_parms =
290 devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
291 GFP_KERNEL);
292 }
293 if (!dma_dev->dma_parms) {
294 ret = -ENOMEM;
295 goto err_component_unbind;
296 }
297
298 ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
299 if (ret) {
300 dev_err(dma_dev, "Failed to set DMA segment size\n");
301 goto err_unset_dma_parms;
302 }
279 303
280 /* 304 /*
281 * We don't use the drm_irq_install() helpers provided by the DRM 305 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
285 drm->irq_enabled = true; 309 drm->irq_enabled = true;
286 ret = drm_vblank_init(drm, MAX_CRTC); 310 ret = drm_vblank_init(drm, MAX_CRTC);
287 if (ret < 0) 311 if (ret < 0)
288 goto err_component_unbind; 312 goto err_unset_dma_parms;
289 313
290 drm_kms_helper_poll_init(drm); 314 drm_kms_helper_poll_init(drm);
291 drm_mode_config_reset(drm); 315 drm_mode_config_reset(drm);
292 316
293 return 0; 317 return 0;
294 318
319err_unset_dma_parms:
320 if (private->dma_parms_allocated)
321 dma_dev->dma_parms = NULL;
295err_component_unbind: 322err_component_unbind:
296 component_unbind_all(drm->dev, drm); 323 component_unbind_all(drm->dev, drm);
297err_config_cleanup: 324err_config_cleanup:
@@ -302,9 +329,14 @@ err_config_cleanup:
302 329
303static void mtk_drm_kms_deinit(struct drm_device *drm) 330static void mtk_drm_kms_deinit(struct drm_device *drm)
304{ 331{
332 struct mtk_drm_private *private = drm->dev_private;
333
305 drm_kms_helper_poll_fini(drm); 334 drm_kms_helper_poll_fini(drm);
306 drm_atomic_helper_shutdown(drm); 335 drm_atomic_helper_shutdown(drm);
307 336
337 if (private->dma_parms_allocated)
338 private->dma_dev->dma_parms = NULL;
339
308 component_unbind_all(drm->dev, drm); 340 component_unbind_all(drm->dev, drm);
309 drm_mode_config_cleanup(drm); 341 drm_mode_config_cleanup(drm);
310} 342}
@@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
320 .compat_ioctl = drm_compat_ioctl, 352 .compat_ioctl = drm_compat_ioctl,
321}; 353};
322 354
355/*
356 * We need to override this because the device used to import the memory is
357 * not dev->dev, as drm_gem_prime_import() expects.
358 */
359struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
360 struct dma_buf *dma_buf)
361{
362 struct mtk_drm_private *private = dev->dev_private;
363
364 return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
365}
366
323static struct drm_driver mtk_drm_driver = { 367static struct drm_driver mtk_drm_driver = {
324 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 368 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
325 DRIVER_ATOMIC, 369 DRIVER_ATOMIC,
@@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
331 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 375 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
332 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 376 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
333 .gem_prime_export = drm_gem_prime_export, 377 .gem_prime_export = drm_gem_prime_export,
334 .gem_prime_import = drm_gem_prime_import, 378 .gem_prime_import = mtk_drm_gem_prime_import,
335 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, 379 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
336 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, 380 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
337 .gem_prime_mmap = mtk_drm_gem_mmap_buf, 381 .gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
524 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); 568 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
525 if (!comp) { 569 if (!comp) {
526 ret = -ENOMEM; 570 ret = -ENOMEM;
571 of_node_put(node);
527 goto err_node; 572 goto err_node;
528 } 573 }
529 574
530 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); 575 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
531 if (ret) 576 if (ret) {
577 of_node_put(node);
532 goto err_node; 578 goto err_node;
579 }
533 580
534 private->ddp_comp[comp_id] = comp; 581 private->ddp_comp[comp_id] = comp;
535 } 582 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..e03fea12ff59 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -51,6 +51,8 @@ struct mtk_drm_private {
51 } commit; 51 } commit;
52 52
53 struct drm_atomic_state *suspend_state; 53 struct drm_atomic_state *suspend_state;
54
55 bool dma_parms_allocated;
54}; 56};
55 57
56extern struct platform_driver mtk_ddp_driver; 58extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
40 u8 *ptr = msg->buf; 40 u8 *ptr = msg->buf;
41 41
42 while (remaining) { 42 while (remaining) {
43 u8 cnt = (remaining > 16) ? 16 : remaining; 43 u8 cnt, retries, cmd;
44 u8 cmd;
45 44
46 if (msg->flags & I2C_M_RD) 45 if (msg->flags & I2C_M_RD)
47 cmd = 1; 46 cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51 if (mcnt || remaining > 16) 50 if (mcnt || remaining > 16)
52 cmd |= 4; /* MOT */ 51 cmd |= 4; /* MOT */
53 52
54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); 53 for (retries = 0, cnt = 0;
55 if (ret < 0) { 54 retries < 32 && !cnt;
56 nvkm_i2c_aux_release(aux); 55 retries++) {
57 return ret; 56 cnt = min_t(u8, remaining, 16);
57 ret = aux->func->xfer(aux, true, cmd,
58 msg->addr, ptr, &cnt);
59 if (ret < 0)
60 goto out;
61 }
62 if (!cnt) {
63 AUX_TRACE(aux, "no data after 32 retries");
64 ret = -EIO;
65 goto out;
58 } 66 }
59 67
60 ptr += cnt; 68 ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
64 msg++; 72 msg++;
65 } 73 }
66 74
75 ret = num;
76out:
67 nvkm_i2c_aux_release(aux); 77 nvkm_i2c_aux_release(aux);
68 return num; 78 return ret;
69} 79}
70 80
71static u32 81static u32
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..1bad0a2cc5c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
669 if (omapdss_is_initialized() == false) 669 if (omapdss_is_initialized() == false)
670 return -EPROBE_DEFER; 670 return -EPROBE_DEFER;
671 671
672 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 672 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
673 if (ret) { 673 if (ret) {
674 dev_err(&pdev->dev, "Failed to set the DMA mask\n"); 674 dev_err(&pdev->dev, "Failed to set the DMA mask\n");
675 return ret; 675 return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..082d02c84024 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
673 673
674 /* Locate the companion LVDS encoder for dual-link operation, if any. */ 674 /* Locate the companion LVDS encoder for dual-link operation, if any. */
675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); 675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
676 if (!companion) { 676 if (!companion)
677 dev_err(dev, "Companion LVDS encoder not found\n"); 677 return 0;
678 return -ENXIO;
679 }
680 678
681 /* 679 /*
682 * Sanity check: the companion encoder must have the same compatible 680 * Sanity check: the companion encoder must have the same compatible
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..df0cc8f46d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
314 /* R and B components are only 5 bits deep */ 314 /* R and B components are only 5 bits deep */
315 val |= SUN4I_TCON0_FRM_CTL_MODE_R; 315 val |= SUN4I_TCON0_FRM_CTL_MODE_R;
316 val |= SUN4I_TCON0_FRM_CTL_MODE_B; 316 val |= SUN4I_TCON0_FRM_CTL_MODE_B;
317 /* Fall through */
317 case MEDIA_BUS_FMT_RGB666_1X18: 318 case MEDIA_BUS_FMT_RGB666_1X18:
318 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: 319 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
319 /* Fall through: enable dithering */ 320 /* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..b889ad3e86e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
993 ret = sun6i_dsi_dcs_read(dsi, msg); 993 ret = sun6i_dsi_dcs_read(dsi, msg);
994 break; 994 break;
995 } 995 }
996 /* Else, fall through */
996 997
997 default: 998 default:
998 ret = -EINVAL; 999 ret = -EINVAL;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 2310c96ccf4a..db1b55df0d13 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
1153 1153
1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); 1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
1155 1155
1156 cp2112_gpio_direction_input(gc, d->hwirq);
1157
1158 if (!dev->gpio_poll) { 1156 if (!dev->gpio_poll) {
1159 dev->gpio_poll = true; 1157 dev->gpio_poll = true;
1160 schedule_delayed_work(&dev->gpio_poll_worker, 0); 1158 schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
1204 return PTR_ERR(dev->desc[pin]); 1202 return PTR_ERR(dev->desc[pin]);
1205 } 1203 }
1206 1204
1205 ret = cp2112_gpio_direction_input(&dev->gc, pin);
1206 if (ret < 0) {
1207 dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
1208 goto err_desc;
1209 }
1210
1207 ret = gpiochip_lock_as_irq(&dev->gc, pin); 1211 ret = gpiochip_lock_as_irq(&dev->gc, pin);
1208 if (ret) { 1212 if (ret) {
1209 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); 1213 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 21268c9fa71a..0179f7ed77e5 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = {
3749 3749
3750 { L27MHZ_DEVICE(HID_ANY_ID) }, 3750 { L27MHZ_DEVICE(HID_ANY_ID) },
3751 3751
3752 { /* Logitech G203/Prodigy Gaming Mouse */
3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
3754 { /* Logitech G302 Gaming Mouse */
3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
3756 { /* Logitech G303 Gaming Mouse */
3757 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
3758 { /* Logitech G400 Gaming Mouse */
3759 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
3760 { /* Logitech G403 Wireless Gaming Mouse over USB */ 3752 { /* Logitech G403 Wireless Gaming Mouse over USB */
3761 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, 3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
3762 { /* Logitech G403 Gaming Mouse */
3763 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
3764 { /* Logitech G403 Hero Gaming Mouse over USB */
3765 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
3766 { /* Logitech G502 Proteus Core Gaming Mouse */
3767 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
3768 { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
3769 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
3770 { /* Logitech G502 Hero Gaming Mouse over USB */
3771 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
3772 { /* Logitech G700 Gaming Mouse over USB */
3773 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
3774 { /* Logitech G700s Gaming Mouse over USB */
3775 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
3776 { /* Logitech G703 Gaming Mouse over USB */ 3754 { /* Logitech G703 Gaming Mouse over USB */
3777 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, 3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
3778 { /* Logitech G703 Hero Gaming Mouse over USB */ 3756 { /* Logitech G703 Hero Gaming Mouse over USB */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1065692f90e2..5792a104000a 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,6 +24,7 @@
24#define ICL_MOBILE_DEVICE_ID 0x34FC 24#define ICL_MOBILE_DEVICE_ID 0x34FC
25#define SPT_H_DEVICE_ID 0xA135 25#define SPT_H_DEVICE_ID 0xA135
26#define CML_LP_DEVICE_ID 0x02FC 26#define CML_LP_DEVICE_ID 0x02FC
27#define EHL_Ax_DEVICE_ID 0x4BB3
27 28
28#define REVISION_ID_CHT_A0 0x6 29#define REVISION_ID_CHT_A0 0x6
29#define REVISION_ID_CHT_Ax_SI 0x0 30#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aa80b4d3b740..279567baca3d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, 33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, 35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
36 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
36 {0, } 37 {0, }
37}; 38};
38MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 39MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 7a8ddc999a8e..1713235d28cb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
846 y >>= 1; 846 y >>= 1;
847 distance >>= 1; 847 distance >>= 1;
848 } 848 }
849 if (features->type == INTUOSHT2)
850 distance = features->distance_max - distance;
849 input_report_abs(input, ABS_X, x); 851 input_report_abs(input, ABS_X, x);
850 input_report_abs(input, ABS_Y, y); 852 input_report_abs(input, ABS_Y, y);
851 input_report_abs(input, ABS_DISTANCE, distance); 853 input_report_abs(input, ABS_DISTANCE, distance);
@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1059 input_report_key(input, BTN_BASE2, (data[11] & 0x02)); 1061 input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1060 1062
1061 if (data[12] & 0x80) 1063 if (data[12] & 0x80)
1062 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); 1064 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1063 else 1065 else
1064 input_report_abs(input, ABS_WHEEL, 0); 1066 input_report_abs(input, ABS_WHEEL, 0);
1065 1067
@@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1290 } 1292 }
1291 if (wacom->tool[0]) { 1293 if (wacom->tool[0]) {
1292 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1294 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
1293 if (wacom->features.type == INTUOSP2_BT) { 1295 if (wacom->features.type == INTUOSP2_BT ||
1296 wacom->features.type == INTUOSP2S_BT) {
1294 input_report_abs(pen_input, ABS_DISTANCE, 1297 input_report_abs(pen_input, ABS_DISTANCE,
1295 range ? frame[13] : wacom->features.distance_max); 1298 range ? frame[13] : wacom->features.distance_max);
1296 } else { 1299 } else {
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5f9505a087f6..23f358cb7f49 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -26,7 +26,7 @@
26 26
27static unsigned long virt_to_hvpfn(void *addr) 27static unsigned long virt_to_hvpfn(void *addr)
28{ 28{
29 unsigned long paddr; 29 phys_addr_t paddr;
30 30
31 if (is_vmalloc_addr(addr)) 31 if (is_vmalloc_addr(addr))
32 paddr = page_to_phys(vmalloc_to_page(addr)) + 32 paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 362e70e9d145..fb16a622e8ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -146,8 +146,6 @@ struct hv_context {
146 */ 146 */
147 u64 guestid; 147 u64 guestid;
148 148
149 void *tsc_page;
150
151 struct hv_per_cpu_context __percpu *cpu_context; 149 struct hv_per_cpu_context __percpu *cpu_context;
152 150
153 /* 151 /*
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 19f1730a4f24..a68d0ccf67a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
4724 if (ret) 4724 if (ret)
4725 goto err; 4725 goto err;
4726 4726
4727 cma_configfs_init(); 4727 ret = cma_configfs_init();
4728 if (ret)
4729 goto err_ib;
4728 4730
4729 return 0; 4731 return 0;
4730 4732
4733err_ib:
4734 ib_unregister_client(&cma_client);
4731err: 4735err:
4732 unregister_netdevice_notifier(&cma_nb); 4736 unregister_netdevice_notifier(&cma_nb);
4733 ib_sa_unregister_client(&sa_client); 4737 ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index b79890739a2c..af8c85d18e62 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
149 struct auto_mode_param *param = &counter->mode.param; 149 struct auto_mode_param *param = &counter->mode.param;
150 bool match = true; 150 bool match = true;
151 151
152 if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) 152 if (!rdma_is_visible_in_pid_ns(&qp->res))
153 return false; 153 return false;
154 154
155 /* Ensure that counter belong to right PID */ 155 /* Ensure that counter belongs to the right PID */
156 if (!rdma_is_kernel_res(&counter->res) && 156 if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
157 !rdma_is_kernel_res(&qp->res) &&
158 (task_pid_vnr(counter->res.task) != current->pid))
159 return false; 157 return false;
160 158
161 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) 159 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
424 return qp; 422 return qp;
425 423
426err: 424err:
427 rdma_restrack_put(&qp->res); 425 rdma_restrack_put(res);
428 return NULL; 426 return NULL;
429} 427}
430 428
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 87d40d1ecdde..020c26976558 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
383 if (!names[i]) 383 if (!names[i])
384 continue; 384 continue;
385 curr = rdma_restrack_count(device, i, 385 curr = rdma_restrack_count(device, i);
386 task_active_pid_ns(current));
387 ret = fill_res_info_entry(msg, names[i], curr); 386 ret = fill_res_info_entry(msg, names[i], curr);
388 if (ret) 387 if (ret)
389 goto err; 388 goto err;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
107 * rdma_restrack_count() - the current usage of specific object 107 * rdma_restrack_count() - the current usage of specific object
108 * @dev: IB device 108 * @dev: IB device
109 * @type: actual type of object to operate 109 * @type: actual type of object to operate
110 * @ns: PID namespace
111 */ 110 */
112int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, 111int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
113 struct pid_namespace *ns)
114{ 112{
115 struct rdma_restrack_root *rt = &dev->res[type]; 113 struct rdma_restrack_root *rt = &dev->res[type];
116 struct rdma_restrack_entry *e; 114 struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
119 117
120 xa_lock(&rt->xa); 118 xa_lock(&rt->xa);
121 xas_for_each(&xas, e, U32_MAX) { 119 xas_for_each(&xas, e, U32_MAX) {
122 if (ns == &init_pid_ns || 120 if (!rdma_is_visible_in_pid_ns(e))
123 (!rdma_is_kernel_res(e) && 121 continue;
124 ns == task_active_pid_ns(e->task))) 122 cnt++;
125 cnt++;
126 } 123 }
127 xa_unlock(&rt->xa); 124 xa_unlock(&rt->xa);
128 return cnt; 125 return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
360 */ 357 */
361 if (rdma_is_kernel_res(res)) 358 if (rdma_is_kernel_res(res))
362 return task_active_pid_ns(current) == &init_pid_ns; 359 return task_active_pid_ns(current) == &init_pid_ns;
363 return task_active_pid_ns(current) == task_active_pid_ns(res->task); 360
361 /* PID 0 means that resource is not found in current namespace */
362 return task_pid_vnr(res->task);
364} 363}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..56553668256f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
379 379
380int ib_umem_page_count(struct ib_umem *umem) 380int ib_umem_page_count(struct ib_umem *umem)
381{ 381{
382 int i; 382 int i, n = 0;
383 int n;
384 struct scatterlist *sg; 383 struct scatterlist *sg;
385 384
386 if (umem->is_odp)
387 return ib_umem_num_pages(umem);
388
389 n = 0;
390 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 385 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
391 n += sg_dma_len(sg) >> PAGE_SHIFT; 386 n += sg_dma_len(sg) >> PAGE_SHIFT;
392 387
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
136 spin_unlock_irqrestore(&cmdq->lock, flags); 136 spin_unlock_irqrestore(&cmdq->lock, flags);
137 return -EBUSY; 137 return -EBUSY;
138 } 138 }
139
140 size = req->cmd_size;
141 /* change the cmd_size to the number of 16byte cmdq unit.
142 * req->cmd_size is modified here
143 */
144 bnxt_qplib_set_cmd_slots(req);
145
139 memset(resp, 0, sizeof(*resp)); 146 memset(resp, 0, sizeof(*resp));
140 crsqe->resp = (struct creq_qp_event *)resp; 147 crsqe->resp = (struct creq_qp_event *)resp;
141 crsqe->resp->cookie = req->cookie; 148 crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
150 157
151 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 158 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
152 preq = (u8 *)req; 159 preq = (u8 *)req;
153 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
154 do { 160 do {
155 /* Locate the next cmdq slot */ 161 /* Locate the next cmdq slot */
156 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 162 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
55 do { \ 55 do { \
56 memset(&(req), 0, sizeof((req))); \ 56 memset(&(req), 0, sizeof((req))); \
57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ 57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
58 (req).cmd_size = (sizeof((req)) + \ 58 (req).cmd_size = sizeof((req)); \
59 BNXT_QPLIB_CMDQE_UNITS - 1) / \
60 BNXT_QPLIB_CMDQE_UNITS; \
61 (req).flags = cpu_to_le16(cmd_flags); \ 59 (req).flags = cpu_to_le16(cmd_flags); \
62 } while (0) 60 } while (0)
63 61
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
95 BNXT_QPLIB_CMDQE_UNITS); 93 BNXT_QPLIB_CMDQE_UNITS);
96} 94}
97 95
96/* Set the cmd_size to a factor of CMDQE unit */
97static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
98{
99 req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
100 BNXT_QPLIB_CMDQE_UNITS;
101}
102
98#define MAX_CMDQ_IDX(depth) ((depth) - 1) 103#define MAX_CMDQ_IDX(depth) ((depth) - 1)
99 104
100static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) 105static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
141 if (!data) 141 if (!data)
142 return -ENOMEM; 142 return -ENOMEM;
143 copy = min(len, datalen - 1); 143 copy = min(len, datalen - 1);
144 if (copy_from_user(data, buf, copy)) 144 if (copy_from_user(data, buf, copy)) {
145 return -EFAULT; 145 ret = -EFAULT;
146 goto free_data;
147 }
146 148
147 ret = debugfs_file_get(file->f_path.dentry); 149 ret = debugfs_file_get(file->f_path.dentry);
148 if (unlikely(ret)) 150 if (unlikely(ret))
149 return ret; 151 goto free_data;
150 ptr = data; 152 ptr = data;
151 token = ptr; 153 token = ptr;
152 for (ptr = data; *ptr; ptr = end + 1, token = ptr) { 154 for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
195 ret = len; 197 ret = len;
196 198
197 debugfs_file_put(file->f_path.dentry); 199 debugfs_file_put(file->f_path.dentry);
200free_data:
198 kfree(data); 201 kfree(data);
199 return ret; 202 return ret;
200} 203}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
214 return -ENOMEM; 217 return -ENOMEM;
215 ret = debugfs_file_get(file->f_path.dentry); 218 ret = debugfs_file_get(file->f_path.dentry);
216 if (unlikely(ret)) 219 if (unlikely(ret))
217 return ret; 220 goto free_data;
218 bit = find_first_bit(fault->opcodes, bitsize); 221 bit = find_first_bit(fault->opcodes, bitsize);
219 while (bit < bitsize) { 222 while (bit < bitsize) {
220 zero = find_next_zero_bit(fault->opcodes, bitsize, bit); 223 zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
232 data[size - 1] = '\n'; 235 data[size - 1] = '\n';
233 data[size] = '\0'; 236 data[size] = '\0';
234 ret = simple_read_from_buffer(buf, len, pos, data, size); 237 ret = simple_read_from_buffer(buf, len, pos, data, size);
238free_data:
235 kfree(data); 239 kfree(data);
236 return ret; 240 return ret;
237} 241}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 996fc298207e..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2574 hfi1_kern_clear_hw_flow(priv->rcd, qp); 2574 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2575} 2575}
2576 2576
2577static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, 2577static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
2578 struct hfi1_packet *packet, u8 rcv_type,
2579 u8 opcode)
2580{ 2578{
2581 struct rvt_qp *qp = packet->qp; 2579 struct rvt_qp *qp = packet->qp;
2582 struct hfi1_qp_priv *qpriv = qp->priv;
2583 u32 ipsn;
2584 struct ib_other_headers *ohdr = packet->ohdr;
2585 struct rvt_ack_entry *e;
2586 struct tid_rdma_request *req;
2587 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2588 u32 i;
2589 2580
2590 if (rcv_type >= RHF_RCV_TYPE_IB) 2581 if (rcv_type >= RHF_RCV_TYPE_IB)
2591 goto done; 2582 goto done;
@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2602 if (rcv_type == RHF_RCV_TYPE_EAGER) { 2593 if (rcv_type == RHF_RCV_TYPE_EAGER) {
2603 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); 2594 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2604 hfi1_schedule_send(qp); 2595 hfi1_schedule_send(qp);
2605 goto done_unlock;
2606 }
2607
2608 /*
2609 * For TID READ response, error out QP after freeing the tid
2610 * resources.
2611 */
2612 if (opcode == TID_OP(READ_RESP)) {
2613 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2614 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2615 cmp_psn(ipsn, qp->s_psn) < 0) {
2616 hfi1_kern_read_tid_flow_free(qp);
2617 spin_unlock(&qp->s_lock);
2618 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2619 goto done;
2620 }
2621 goto done_unlock;
2622 }
2623
2624 /*
2625 * Error out the qp for TID RDMA WRITE
2626 */
2627 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2628 for (i = 0; i < rvt_max_atomic(rdi); i++) {
2629 e = &qp->s_ack_queue[i];
2630 if (e->opcode == TID_OP(WRITE_REQ)) {
2631 req = ack_to_tid_req(e);
2632 hfi1_kern_exp_rcv_clear_all(req);
2633 }
2634 } 2596 }
2635 spin_unlock(&qp->s_lock);
2636 rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2637 goto done;
2638 2597
2639done_unlock: 2598 /* Since no payload is delivered, just drop the packet */
2640 spin_unlock(&qp->s_lock); 2599 spin_unlock(&qp->s_lock);
2641done: 2600done:
2642 return true; 2601 return true;
@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2687 u32 fpsn; 2646 u32 fpsn;
2688 2647
2689 lockdep_assert_held(&qp->r_lock); 2648 lockdep_assert_held(&qp->r_lock);
2649 spin_lock(&qp->s_lock);
2690 /* If the psn is out of valid range, drop the packet */ 2650 /* If the psn is out of valid range, drop the packet */
2691 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || 2651 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2692 cmp_psn(ibpsn, qp->s_psn) > 0) 2652 cmp_psn(ibpsn, qp->s_psn) > 0)
2693 return ret; 2653 goto s_unlock;
2694 2654
2695 spin_lock(&qp->s_lock);
2696 /* 2655 /*
2697 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2656 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2698 * requests and implicitly NAK RDMA read and atomic requests issued 2657 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2740 2699
2741 wqe = do_rc_completion(qp, wqe, ibp); 2700 wqe = do_rc_completion(qp, wqe, ibp);
2742 if (qp->s_acked == qp->s_tail) 2701 if (qp->s_acked == qp->s_tail)
2743 break; 2702 goto s_unlock;
2744 } 2703 }
2745 2704
2705 if (qp->s_acked == qp->s_tail)
2706 goto s_unlock;
2707
2746 /* Handle the eflags for the request */ 2708 /* Handle the eflags for the request */
2747 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 2709 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2748 goto s_unlock; 2710 goto s_unlock;
@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2922 if (lnh == HFI1_LRH_GRH) 2884 if (lnh == HFI1_LRH_GRH)
2923 goto r_unlock; 2885 goto r_unlock;
2924 2886
2925 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) 2887 if (tid_rdma_tid_err(packet, rcv_type))
2926 goto r_unlock; 2888 goto r_unlock;
2927 } 2889 }
2928 2890
@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2942 */ 2904 */
2943 spin_lock(&qp->s_lock); 2905 spin_lock(&qp->s_lock);
2944 qpriv = qp->priv; 2906 qpriv = qp->priv;
2907 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
2908 qpriv->r_tid_tail == qpriv->r_tid_head)
2909 goto unlock;
2945 e = &qp->s_ack_queue[qpriv->r_tid_tail]; 2910 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2911 if (e->opcode != TID_OP(WRITE_REQ))
2912 goto unlock;
2946 req = ack_to_tid_req(e); 2913 req = ack_to_tid_req(e);
2914 if (req->comp_seg == req->cur_seg)
2915 goto unlock;
2947 flow = &req->flows[req->clear_tail]; 2916 flow = &req->flows[req->clear_tail];
2948 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); 2917 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2949 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); 2918 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4509 struct rvt_swqe *wqe; 4478 struct rvt_swqe *wqe;
4510 struct tid_rdma_request *req; 4479 struct tid_rdma_request *req;
4511 struct tid_rdma_flow *flow; 4480 struct tid_rdma_flow *flow;
4512 u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; 4481 u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
4513 unsigned long flags; 4482 unsigned long flags;
4514 u16 fidx; 4483 u16 fidx;
4515 4484
@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4538 ack_kpsn--; 4507 ack_kpsn--;
4539 } 4508 }
4540 4509
4510 if (unlikely(qp->s_acked == qp->s_tail))
4511 goto ack_op_err;
4512
4541 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 4513 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4542 4514
4543 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 4515 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4550 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); 4522 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4551 4523
4552 /* Drop stale ACK/NAK */ 4524 /* Drop stale ACK/NAK */
4553 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) 4525 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4526 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4554 goto ack_op_err; 4527 goto ack_op_err;
4555 4528
4556 while (cmp_psn(ack_kpsn, 4529 while (cmp_psn(ack_kpsn,
@@ -4712,7 +4685,12 @@ done:
4712 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 4685 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4713 IB_AETH_CREDIT_MASK) { 4686 IB_AETH_CREDIT_MASK) {
4714 case 0: /* PSN sequence error */ 4687 case 0: /* PSN sequence error */
4688 if (!req->flows)
4689 break;
4715 flow = &req->flows[req->acked_tail]; 4690 flow = &req->flows[req->acked_tail];
4691 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4692 if (cmp_psn(psn, flpsn) > 0)
4693 break;
4716 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, 4694 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4717 flow); 4695 flow);
4718 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); 4696 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
1677 tx_buf_size, DMA_TO_DEVICE); 1677 tx_buf_size, DMA_TO_DEVICE);
1678 kfree(tun_qp->tx_ring[i].buf.addr); 1678 kfree(tun_qp->tx_ring[i].buf.addr);
1679 } 1679 }
1680 kfree(tun_qp->tx_ring);
1681 tun_qp->tx_ring = NULL;
1682 i = MLX4_NUM_TUNNEL_BUFS; 1680 i = MLX4_NUM_TUNNEL_BUFS;
1683err: 1681err:
1684 while (i > 0) { 1682 while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
1687 rx_buf_size, DMA_FROM_DEVICE); 1685 rx_buf_size, DMA_FROM_DEVICE);
1688 kfree(tun_qp->ring[i].addr); 1686 kfree(tun_qp->ring[i].addr);
1689 } 1687 }
1688 kfree(tun_qp->tx_ring);
1689 tun_qp->tx_ring = NULL;
1690 kfree(tun_qp->ring); 1690 kfree(tun_qp->ring);
1691 tun_qp->ring = NULL; 1691 tun_qp->ring = NULL;
1692 return -ENOMEM; 1692 return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e12a4404096b..0569bcab02d4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 1024
1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 if (MLX5_CAP_GEN(mdev, pg)) 1026 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 props->odp_caps = dev->odp_caps; 1028 props->odp_caps = dev->odp_caps;
1029 } 1029 }
@@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6140 } 6140 }
6141 6141
6142 mlx5_ib_internal_fill_odp_caps(dev);
6143
6142 err = mlx5_ib_init_multiport_master(dev); 6144 err = mlx5_ib_init_multiport_master(dev);
6143 if (err) 6145 if (err)
6144 return err; 6146 return err;
@@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6563 6565
6564static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6566static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6565{ 6567{
6566 mlx5_ib_internal_fill_odp_caps(dev);
6567
6568 return mlx5_ib_odp_init_one(dev); 6568 return mlx5_ib_odp_init_one(dev);
6569} 6569}
6570 6570
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index fe1a76d8531c..a40e0abf2338 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
57 int entry; 57 int entry;
58 58
59 if (umem->is_odp) { 59 if (umem->is_odp) {
60 unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; 60 struct ib_umem_odp *odp = to_ib_umem_odp(umem);
61 unsigned int page_shift = odp->page_shift;
61 62
62 *ncont = ib_umem_page_count(umem); 63 *ncont = ib_umem_odp_num_pages(odp);
63 *count = *ncont << (page_shift - PAGE_SHIFT); 64 *count = *ncont << (page_shift - PAGE_SHIFT);
64 *shift = page_shift; 65 *shift = page_shift;
65 if (order) 66 if (order)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f6a53455bf8b..9ae587b74b12 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1475 bool dyn_bfreg); 1475 bool dyn_bfreg);
1476 1476
1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
1478
1479static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
1480 bool do_modify_atomic)
1481{
1482 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1483 return false;
1484
1485 if (do_modify_atomic &&
1486 MLX5_CAP_GEN(dev->mdev, atomic) &&
1487 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1488 return false;
1489
1490 return true;
1491}
1478#endif /* MLX5_IB_H */ 1492#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b74fad08412f..3401f5f6792e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1293 if (err < 0) 1293 if (err < 0)
1294 return ERR_PTR(err); 1294 return ERR_PTR(err);
1295 1295
1296 use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && 1296 use_umr = mlx5_ib_can_use_umr(dev, true);
1297 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
1298 !MLX5_CAP_GEN(dev->mdev, atomic));
1299 1297
1300 if (order <= mr_cache_max_order(dev) && use_umr) { 1298 if (order <= mr_cache_max_order(dev) && use_umr) {
1301 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1299 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1448 goto err; 1446 goto err;
1449 } 1447 }
1450 1448
1451 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1449 if (!mlx5_ib_can_use_umr(dev, true) ||
1450 (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
1452 /* 1451 /*
1453 * UMR can't be used - MKey needs to be replaced. 1452 * UMR can't be used - MKey needs to be replaced.
1454 */ 1453 */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 1d257d1b3b0d..0a59912a4cef 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
301 301
302 memset(caps, 0, sizeof(*caps)); 302 memset(caps, 0, sizeof(*caps));
303 303
304 if (!MLX5_CAP_GEN(dev->mdev, pg)) 304 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
305 !mlx5_ib_can_use_umr(dev, true))
305 return; 306 return;
306 307
307 caps->general_caps = IB_ODP_SUPPORT; 308 caps->general_caps = IB_ODP_SUPPORT;
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
355 356
356 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 357 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
357 MLX5_CAP_GEN(dev->mdev, null_mkey) && 358 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
358 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 359 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
360 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
359 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 361 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
360 362
361 return; 363 return;
@@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1622{ 1624{
1623 int ret = 0; 1625 int ret = 0;
1624 1626
1625 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1627 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1626 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1628 return ret;
1629
1630 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1627 1631
1628 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { 1632 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1629 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); 1633 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1633 } 1637 }
1634 } 1638 }
1635 1639
1636 if (!MLX5_CAP_GEN(dev->mdev, pg))
1637 return ret;
1638
1639 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); 1640 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1640 1641
1641 return ret; 1642 return ret;
@@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1643 1644
1644void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1645void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1645{ 1646{
1646 if (!MLX5_CAP_GEN(dev->mdev, pg)) 1647 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1647 return; 1648 return;
1648 1649
1649 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); 1650 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 379328b2598f..72869ff4a334 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
4162 MLX5_IB_UMR_OCTOWORD; 4162 MLX5_IB_UMR_OCTOWORD;
4163} 4163}
4164 4164
4165static __be64 frwr_mkey_mask(void) 4165static __be64 frwr_mkey_mask(bool atomic)
4166{ 4166{
4167 u64 result; 4167 u64 result;
4168 4168
@@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
4175 MLX5_MKEY_MASK_LW | 4175 MLX5_MKEY_MASK_LW |
4176 MLX5_MKEY_MASK_RR | 4176 MLX5_MKEY_MASK_RR |
4177 MLX5_MKEY_MASK_RW | 4177 MLX5_MKEY_MASK_RW |
4178 MLX5_MKEY_MASK_A |
4179 MLX5_MKEY_MASK_SMALL_FENCE | 4178 MLX5_MKEY_MASK_SMALL_FENCE |
4180 MLX5_MKEY_MASK_FREE; 4179 MLX5_MKEY_MASK_FREE;
4181 4180
4181 if (atomic)
4182 result |= MLX5_MKEY_MASK_A;
4183
4182 return cpu_to_be64(result); 4184 return cpu_to_be64(result);
4183} 4185}
4184 4186
@@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
4204} 4206}
4205 4207
4206static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 4208static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4207 struct mlx5_ib_mr *mr, u8 flags) 4209 struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4208{ 4210{
4209 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4211 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4210 4212
@@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4212 4214
4213 umr->flags = flags; 4215 umr->flags = flags;
4214 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4216 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4215 umr->mkey_mask = frwr_mkey_mask(); 4217 umr->mkey_mask = frwr_mkey_mask(atomic);
4216} 4218}
4217 4219
4218static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 4220static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4811{ 4813{
4812 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 4814 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4813 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 4815 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4816 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4814 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4817 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4815 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 4818 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4819 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4816 u8 flags = 0; 4820 u8 flags = 0;
4817 4821
4822 if (!mlx5_ib_can_use_umr(dev, atomic)) {
4823 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4824 "Fast update of %s for MR is disabled\n",
4825 (MLX5_CAP_GEN(dev->mdev,
4826 umr_modify_entity_size_disabled)) ?
4827 "entity size" :
4828 "atomic access");
4829 return -EINVAL;
4830 }
4831
4818 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 4832 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4819 mlx5_ib_warn(to_mdev(qp->ibqp.device), 4833 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4820 "Invalid IB_SEND_INLINE send flag\n"); 4834 "Invalid IB_SEND_INLINE send flag\n");
@@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4826 if (umr_inline) 4840 if (umr_inline)
4827 flags |= MLX5_UMR_INLINE; 4841 flags |= MLX5_UMR_INLINE;
4828 4842
4829 set_reg_umr_seg(*seg, mr, flags); 4843 set_reg_umr_seg(*seg, mr, flags, atomic);
4830 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4844 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4831 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4845 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4832 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4846 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 77b1aabf6ff3..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
138}; 138};
139 139
140struct siw_pble { 140struct siw_pble {
141 u64 addr; /* Address of assigned user buffer */ 141 dma_addr_t addr; /* Address of assigned buffer */
142 u64 size; /* Size of this entry */ 142 unsigned int size; /* Size of this entry */
143 u64 pbl_off; /* Total offset from start of PBL */ 143 unsigned long pbl_off; /* Total offset from start of PBL */
144}; 144};
145 145
146struct siw_pbl { 146struct siw_pbl {
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) 734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
735 735
736#define siw_dbg_cep(cep, fmt, ...) \ 736#define siw_dbg_cep(cep, fmt, ...) \
737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ 737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
738 cep, __func__, ##__VA_ARGS__) 738 cep, __func__, ##__VA_ARGS__)
739 739
740void siw_cq_flush(struct siw_cq *cq); 740void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 9ce8a1b925d2..1db5ad3d9580 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
355 getname_local(cep->sock, &event.local_addr); 355 getname_local(cep->sock, &event.local_addr);
356 getname_peer(cep->sock, &event.remote_addr); 356 getname_peer(cep->sock, &event.remote_addr);
357 } 357 }
358 siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", 358 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
359 cep->qp ? qp_id(cep->qp) : -1, id, reason, status); 359 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
360 360
361 return id->event_handler(id, &event); 361 return id->event_handler(id, &event);
362} 362}
@@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
947 siw_cep_get(new_cep); 947 siw_cep_get(new_cep);
948 new_s->sk->sk_user_data = new_cep; 948 new_s->sk->sk_user_data = new_cep;
949 949
950 siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
951
952 if (siw_tcp_nagle == false) { 950 if (siw_tcp_nagle == false) {
953 int val = 1; 951 int val = 1;
954 952
@@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
1011 cep = work->cep; 1009 cep = work->cep;
1012 1010
1013 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", 1011 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1014 cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); 1012 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1013 work->type, cep->state);
1015 1014
1016 siw_cep_set_inuse(cep); 1015 siw_cep_set_inuse(cep);
1017 1016
@@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
1145 } 1144 }
1146 if (release_cep) { 1145 if (release_cep) {
1147 siw_dbg_cep(cep, 1146 siw_dbg_cep(cep,
1148 "release: timer=%s, QP[%u], id 0x%p\n", 1147 "release: timer=%s, QP[%u]\n",
1149 cep->mpa_timer ? "y" : "n", 1148 cep->mpa_timer ? "y" : "n",
1150 cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); 1149 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1151 1150
1152 siw_cancel_mpatimer(cep); 1151 siw_cancel_mpatimer(cep);
1153 1152
@@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1211 else 1210 else
1212 delay = MPAREP_TIMEOUT; 1211 delay = MPAREP_TIMEOUT;
1213 } 1212 }
1214 siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", 1213 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1215 cep->qp ? qp_id(cep->qp) : -1, type, work, delay); 1214 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1216 1215
1217 queue_delayed_work(siw_cm_wq, &work->work, delay); 1216 queue_delayed_work(siw_cm_wq, &work->work, delay);
1218 1217
@@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1376 } 1375 }
1377 if (v4) 1376 if (v4)
1378 siw_dbg_qp(qp, 1377 siw_dbg_qp(qp,
1379 "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", 1378 "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
1380 id, pd_len, 1379 pd_len,
1381 &((struct sockaddr_in *)(laddr))->sin_addr, 1380 &((struct sockaddr_in *)(laddr))->sin_addr,
1382 ntohs(((struct sockaddr_in *)(laddr))->sin_port), 1381 ntohs(((struct sockaddr_in *)(laddr))->sin_port),
1383 &((struct sockaddr_in *)(raddr))->sin_addr, 1382 &((struct sockaddr_in *)(raddr))->sin_addr,
1384 ntohs(((struct sockaddr_in *)(raddr))->sin_port)); 1383 ntohs(((struct sockaddr_in *)(raddr))->sin_port));
1385 else 1384 else
1386 siw_dbg_qp(qp, 1385 siw_dbg_qp(qp,
1387 "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", 1386 "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
1388 id, pd_len, 1387 pd_len,
1389 &((struct sockaddr_in6 *)(laddr))->sin6_addr, 1388 &((struct sockaddr_in6 *)(laddr))->sin6_addr,
1390 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), 1389 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
1391 &((struct sockaddr_in6 *)(raddr))->sin6_addr, 1390 &((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1508 if (rv >= 0) { 1507 if (rv >= 0) {
1509 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); 1508 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1510 if (!rv) { 1509 if (!rv) {
1511 siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, 1510 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1512 qp_id(qp));
1513 siw_cep_set_free(cep); 1511 siw_cep_set_free(cep);
1514 return 0; 1512 return 0;
1515 } 1513 }
1516 } 1514 }
1517error: 1515error:
1518 siw_dbg_qp(qp, "failed: %d\n", rv); 1516 siw_dbg(id->device, "failed: %d\n", rv);
1519 1517
1520 if (cep) { 1518 if (cep) {
1521 siw_socket_disassoc(s); 1519 siw_socket_disassoc(s);
@@ -1540,7 +1538,8 @@ error:
1540 } else if (s) { 1538 } else if (s) {
1541 sock_release(s); 1539 sock_release(s);
1542 } 1540 }
1543 siw_qp_put(qp); 1541 if (qp)
1542 siw_qp_put(qp);
1544 1543
1545 return rv; 1544 return rv;
1546} 1545}
@@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1580 siw_cancel_mpatimer(cep); 1579 siw_cancel_mpatimer(cep);
1581 1580
1582 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1581 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1583 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1582 siw_dbg_cep(cep, "out of state\n");
1584 1583
1585 siw_cep_set_free(cep); 1584 siw_cep_set_free(cep);
1586 siw_cep_put(cep); 1585 siw_cep_put(cep);
@@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1601 up_write(&qp->state_lock); 1600 up_write(&qp->state_lock);
1602 goto error; 1601 goto error;
1603 } 1602 }
1604 siw_dbg_cep(cep, "id 0x%p\n", id); 1603 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1605 1604
1606 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { 1605 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1607 siw_dbg_cep(cep, "peer allows GSO on TX\n"); 1606 siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1611 params->ird > sdev->attrs.max_ird) { 1610 params->ird > sdev->attrs.max_ird) {
1612 siw_dbg_cep( 1611 siw_dbg_cep(
1613 cep, 1612 cep,
1614 "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", 1613 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1615 id, qp_id(qp), params->ord, sdev->attrs.max_ord, 1614 qp_id(qp), params->ord, sdev->attrs.max_ord,
1616 params->ird, sdev->attrs.max_ird); 1615 params->ird, sdev->attrs.max_ird);
1617 rv = -EINVAL; 1616 rv = -EINVAL;
1618 up_write(&qp->state_lock); 1617 up_write(&qp->state_lock);
@@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1624 if (params->private_data_len > max_priv_data) { 1623 if (params->private_data_len > max_priv_data) {
1625 siw_dbg_cep( 1624 siw_dbg_cep(
1626 cep, 1625 cep,
1627 "id 0x%p, [QP %u]: private data length: %d (max %d)\n", 1626 "[QP %u]: private data length: %d (max %d)\n",
1628 id, qp_id(qp), params->private_data_len, max_priv_data); 1627 qp_id(qp), params->private_data_len, max_priv_data);
1629 rv = -EINVAL; 1628 rv = -EINVAL;
1630 up_write(&qp->state_lock); 1629 up_write(&qp->state_lock);
1631 goto error; 1630 goto error;
@@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1679 qp_attrs.flags = SIW_MPA_CRC; 1678 qp_attrs.flags = SIW_MPA_CRC;
1680 qp_attrs.state = SIW_QP_STATE_RTS; 1679 qp_attrs.state = SIW_QP_STATE_RTS;
1681 1680
1682 siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); 1681 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1683 1682
1684 /* Associate QP with CEP */ 1683 /* Associate QP with CEP */
1685 siw_cep_get(cep); 1684 siw_cep_get(cep);
@@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1700 if (rv) 1699 if (rv)
1701 goto error; 1700 goto error;
1702 1701
1703 siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", 1702 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1704 id, qp_id(qp), params->private_data_len); 1703 qp_id(qp), params->private_data_len);
1705 1704
1706 rv = siw_send_mpareqrep(cep, params->private_data, 1705 rv = siw_send_mpareqrep(cep, params->private_data,
1707 params->private_data_len); 1706 params->private_data_len);
@@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1759 siw_cancel_mpatimer(cep); 1758 siw_cancel_mpatimer(cep);
1760 1759
1761 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1760 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1762 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1761 siw_dbg_cep(cep, "out of state\n");
1763 1762
1764 siw_cep_set_free(cep); 1763 siw_cep_set_free(cep);
1765 siw_cep_put(cep); /* put last reference */ 1764 siw_cep_put(cep); /* put last reference */
1766 1765
1767 return -ECONNRESET; 1766 return -ECONNRESET;
1768 } 1767 }
1769 siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, 1768 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1770 pd_len); 1769 pd_len);
1771 1770
1772 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { 1771 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1804 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, 1803 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
1805 sizeof(s_val)); 1804 sizeof(s_val));
1806 if (rv) { 1805 if (rv) {
1807 siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); 1806 siw_dbg(id->device, "setsockopt error: %d\n", rv);
1808 goto error; 1807 goto error;
1809 } 1808 }
1810 rv = s->ops->bind(s, laddr, addr_family == AF_INET ? 1809 rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
1811 sizeof(struct sockaddr_in) : 1810 sizeof(struct sockaddr_in) :
1812 sizeof(struct sockaddr_in6)); 1811 sizeof(struct sockaddr_in6));
1813 if (rv) { 1812 if (rv) {
1814 siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); 1813 siw_dbg(id->device, "socket bind error: %d\n", rv);
1815 goto error; 1814 goto error;
1816 } 1815 }
1817 cep = siw_cep_alloc(sdev); 1816 cep = siw_cep_alloc(sdev);
@@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1824 rv = siw_cm_alloc_work(cep, backlog); 1823 rv = siw_cm_alloc_work(cep, backlog);
1825 if (rv) { 1824 if (rv) {
1826 siw_dbg(id->device, 1825 siw_dbg(id->device,
1827 "id 0x%p: alloc_work error %d, backlog %d\n", id, 1826 "alloc_work error %d, backlog %d\n",
1828 rv, backlog); 1827 rv, backlog);
1829 goto error; 1828 goto error;
1830 } 1829 }
1831 rv = s->ops->listen(s, backlog); 1830 rv = s->ops->listen(s, backlog);
1832 if (rv) { 1831 if (rv) {
1833 siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); 1832 siw_dbg(id->device, "listen error %d\n", rv);
1834 goto error; 1833 goto error;
1835 } 1834 }
1836 cep->cm_id = id; 1835 cep->cm_id = id;
@@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
1914 1913
1915 list_del(p); 1914 list_del(p);
1916 1915
1917 siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, 1916 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1918 cep->state);
1919 1917
1920 siw_cep_set_inuse(cep); 1918 siw_cep_set_inuse(cep);
1921 1919
@@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1952 struct net_device *dev = to_siw_dev(id->device)->netdev; 1950 struct net_device *dev = to_siw_dev(id->device)->netdev;
1953 int rv = 0, listeners = 0; 1951 int rv = 0, listeners = 0;
1954 1952
1955 siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); 1953 siw_dbg(id->device, "backlog %d\n", backlog);
1956 1954
1957 /* 1955 /*
1958 * For each attached address of the interface, create a 1956 * For each attached address of the interface, create a
@@ -1968,8 +1966,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1968 s_raddr = (struct sockaddr_in *)&id->remote_addr; 1966 s_raddr = (struct sockaddr_in *)&id->remote_addr;
1969 1967
1970 siw_dbg(id->device, 1968 siw_dbg(id->device,
1971 "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", 1969 "laddr %pI4:%d, raddr %pI4:%d\n",
1972 id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), 1970 &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
1973 &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); 1971 &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
1974 1972
1975 rtnl_lock(); 1973 rtnl_lock();
@@ -1994,8 +1992,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1994 *s_raddr = &to_sockaddr_in6(id->remote_addr); 1992 *s_raddr = &to_sockaddr_in6(id->remote_addr);
1995 1993
1996 siw_dbg(id->device, 1994 siw_dbg(id->device,
1997 "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", 1995 "laddr %pI6:%d, raddr %pI6:%d\n",
1998 id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), 1996 &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
1999 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); 1997 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
2000 1998
2001 read_lock_bh(&in6_dev->lock); 1999 read_lock_bh(&in6_dev->lock);
@@ -2028,17 +2026,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
2028 else if (!rv) 2026 else if (!rv)
2029 rv = -EINVAL; 2027 rv = -EINVAL;
2030 2028
2031 siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); 2029 siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
2032 2030
2033 return rv; 2031 return rv;
2034} 2032}
2035 2033
2036int siw_destroy_listen(struct iw_cm_id *id) 2034int siw_destroy_listen(struct iw_cm_id *id)
2037{ 2035{
2038 siw_dbg(id->device, "id 0x%p\n", id);
2039
2040 if (!id->provider_data) { 2036 if (!id->provider_data) {
2041 siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); 2037 siw_dbg(id->device, "no cep(s)\n");
2042 return 0; 2038 return 0;
2043 } 2039 }
2044 siw_drop_listeners(id); 2040 siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
71 wc->wc_flags = IB_WC_WITH_INVALIDATE; 71 wc->wc_flags = IB_WC_WITH_INVALIDATE;
72 } 72 }
73 wc->qp = cqe->base_qp; 73 wc->qp = cqe->base_qp;
74 siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", 74 siw_dbg_cq(cq,
75 "idx %u, type %d, flags %2x, id 0x%pK\n",
75 cq->cq_get % cq->num_cqe, cqe->opcode, 76 cq->cq_get % cq->num_cqe, cqe->opcode,
76 cqe->flags, (void *)cqe->id); 77 cqe->flags, (void *)(uintptr_t)cqe->id);
77 } 78 }
78 WRITE_ONCE(cqe->flags, 0); 79 WRITE_ONCE(cqe->flags, 0);
79 cq->cq_get++; 80 cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
197 */ 197 */
198 if (addr < mem->va || addr + len > mem->va + mem->len) { 198 if (addr < mem->va || addr + len > mem->va + mem->len) {
199 siw_dbg_pd(pd, "MEM interval len %d\n", len); 199 siw_dbg_pd(pd, "MEM interval len %d\n", len);
200 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", 200 siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
201 (unsigned long long)addr, 201 (void *)(uintptr_t)addr,
202 (unsigned long long)(addr + len)); 202 (void *)(uintptr_t)(addr + len));
203 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", 203 siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
204 (unsigned long long)mem->va, 204 (void *)(uintptr_t)mem->va,
205 (unsigned long long)(mem->va + mem->len), 205 (void *)(uintptr_t)(mem->va + mem->len),
206 mem->stag); 206 mem->stag);
207 207
208 return -E_BASE_BOUNDS; 208 return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
330 * Optionally, provides remaining len within current element, and 330 * Optionally, provides remaining len within current element, and
331 * current PBL index for later resume at same element. 331 * current PBL index for later resume at same element.
332 */ 332 */
333u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) 333dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
334{ 334{
335 int i = idx ? *idx : 0; 335 int i = idx ? *idx : 0;
336 336
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); 9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10void siw_umem_release(struct siw_umem *umem, bool dirty); 10void siw_umem_release(struct siw_umem *umem, bool dirty);
11struct siw_pbl *siw_pbl_alloc(u32 num_buf); 11struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); 12dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); 13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); 14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15int siw_invalidate_stag(struct ib_pd *pd, u32 stag); 15int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 0990307c5d2c..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -949,7 +949,7 @@ skip_irq:
949 rv = -EINVAL; 949 rv = -EINVAL;
950 goto out; 950 goto out;
951 } 951 }
952 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 952 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
953 wqe->sqe.sge[0].lkey = 0; 953 wqe->sqe.sge[0].lkey = 0;
954 wqe->sqe.num_sge = 1; 954 wqe->sqe.num_sge = 1;
955 } 955 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
38 38
39 p = siw_get_upage(umem, dest_addr); 39 p = siw_get_upage(umem, dest_addr);
40 if (unlikely(!p)) { 40 if (unlikely(!p)) {
41 pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", 41 pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
42 __func__, qp_id(rx_qp(srx)), 42 __func__, qp_id(rx_qp(srx)),
43 (void *)dest_addr, (void *)umem->fp_addr); 43 (void *)(uintptr_t)dest_addr,
44 (void *)(uintptr_t)umem->fp_addr);
44 /* siw internal error */ 45 /* siw internal error */
45 srx->skb_copied += copied; 46 srx->skb_copied += copied;
46 srx->skb_new -= copied; 47 srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
50 pg_off = dest_addr & ~PAGE_MASK; 51 pg_off = dest_addr & ~PAGE_MASK;
51 bytes = min(len, (int)PAGE_SIZE - pg_off); 52 bytes = min(len, (int)PAGE_SIZE - pg_off);
52 53
53 siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); 54 siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
54 55
55 dest = kmap_atomic(p); 56 dest = kmap_atomic(p);
56 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, 57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
104{ 105{
105 int rv; 106 int rv;
106 107
107 siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); 108 siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
108 109
109 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); 110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
110 if (unlikely(rv)) { 111 if (unlikely(rv)) {
111 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", 112 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
112 qp_id(rx_qp(srx)), __func__, len, kva, rv); 113 qp_id(rx_qp(srx)), __func__, len, kva, rv);
113 114
114 return rv; 115 return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
132 133
133 while (len) { 134 while (len) {
134 int bytes; 135 int bytes;
135 u64 buf_addr = 136 dma_addr_t buf_addr =
136 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); 137 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
137 if (!buf_addr) 138 if (!buf_addr)
138 break; 139 break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
485 mem_p = *mem; 486 mem_p = *mem;
486 if (mem_p->mem_obj == NULL) 487 if (mem_p->mem_obj == NULL)
487 rv = siw_rx_kva(srx, 488 rv = siw_rx_kva(srx,
488 (void *)(sge->laddr + frx->sge_off), 489 (void *)(uintptr_t)(sge->laddr + frx->sge_off),
489 sge_bytes); 490 sge_bytes);
490 else if (!mem_p->is_pbl) 491 else if (!mem_p->is_pbl)
491 rv = siw_rx_umem(srx, mem_p->umem, 492 rv = siw_rx_umem(srx, mem_p->umem,
492 sge->laddr + frx->sge_off, sge_bytes); 493 sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
598 599
599 if (mem->mem_obj == NULL) 600 if (mem->mem_obj == NULL)
600 rv = siw_rx_kva(srx, 601 rv = siw_rx_kva(srx,
601 (void *)(srx->ddp_to + srx->fpdu_part_rcvd), 602 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
602 bytes); 603 bytes);
603 else if (!mem->is_pbl) 604 else if (!mem->is_pbl)
604 rv = siw_rx_umem(srx, mem->umem, 605 rv = siw_rx_umem(srx, mem->umem,
605 srx->ddp_to + srx->fpdu_part_rcvd, bytes); 606 srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
841 bytes = min(srx->fpdu_part_rem, srx->skb_new); 842 bytes = min(srx->fpdu_part_rem, srx->skb_new);
842 843
843 if (mem_p->mem_obj == NULL) 844 if (mem_p->mem_obj == NULL)
844 rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), 845 rv = siw_rx_kva(srx,
845 bytes); 846 (void *)(uintptr_t)(sge->laddr + wqe->processed),
847 bytes);
846 else if (!mem_p->is_pbl) 848 else if (!mem_p->is_pbl)
847 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, 849 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
848 bytes); 850 bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..438a2917a47c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
26{ 26{
27 struct siw_pbl *pbl = mem->pbl; 27 struct siw_pbl *pbl = mem->pbl;
28 u64 offset = addr - mem->va; 28 u64 offset = addr - mem->va;
29 u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); 29 dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
30 30
31 if (paddr) 31 if (paddr)
32 return virt_to_page(paddr); 32 return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
37/* 37/*
38 * Copy short payload at provided destination payload address 38 * Copy short payload at provided destination payload address
39 */ 39 */
40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) 40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
41{ 41{
42 struct siw_wqe *wqe = &c_tx->wqe_active; 42 struct siw_wqe *wqe = &c_tx->wqe_active;
43 struct siw_sge *sge = &wqe->sqe.sge[0]; 43 struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
50 return 0; 50 return 0;
51 51
52 if (tx_flags(wqe) & SIW_WQE_INLINE) { 52 if (tx_flags(wqe) & SIW_WQE_INLINE) {
53 memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); 53 memcpy(paddr, &wqe->sqe.sge[1], bytes);
54 } else { 54 } else {
55 struct siw_mem *mem = wqe->mem[0]; 55 struct siw_mem *mem = wqe->mem[0];
56 56
57 if (!mem->mem_obj) { 57 if (!mem->mem_obj) {
58 /* Kernel client using kva */ 58 /* Kernel client using kva */
59 memcpy((void *)paddr, (void *)sge->laddr, bytes); 59 memcpy(paddr,
60 (const void *)(uintptr_t)sge->laddr, bytes);
60 } else if (c_tx->in_syscall) { 61 } else if (c_tx->in_syscall) {
61 if (copy_from_user((void *)paddr, 62 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
62 (const void __user *)sge->laddr,
63 bytes)) 63 bytes))
64 return -EFAULT; 64 return -EFAULT;
65 } else { 65 } else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
79 buffer = kmap_atomic(p); 79 buffer = kmap_atomic(p);
80 80
81 if (likely(PAGE_SIZE - off >= bytes)) { 81 if (likely(PAGE_SIZE - off >= bytes)) {
82 memcpy((void *)paddr, buffer + off, bytes); 82 memcpy(paddr, buffer + off, bytes);
83 kunmap_atomic(buffer); 83 kunmap_atomic(buffer);
84 } else { 84 } else {
85 unsigned long part = bytes - (PAGE_SIZE - off); 85 unsigned long part = bytes - (PAGE_SIZE - off);
86 86
87 memcpy((void *)paddr, buffer + off, part); 87 memcpy(paddr, buffer + off, part);
88 kunmap_atomic(buffer); 88 kunmap_atomic(buffer);
89 89
90 if (!mem->is_pbl) 90 if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
98 return -EFAULT; 98 return -EFAULT;
99 99
100 buffer = kmap_atomic(p); 100 buffer = kmap_atomic(p);
101 memcpy((void *)(paddr + part), buffer, 101 memcpy(paddr + part, buffer,
102 bytes - part); 102 bytes - part);
103 kunmap_atomic(buffer); 103 kunmap_atomic(buffer);
104 } 104 }
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
166 c_tx->ctrl_len = sizeof(struct iwarp_send); 166 c_tx->ctrl_len = sizeof(struct iwarp_send);
167 167
168 crc = (char *)&c_tx->pkt.send_pkt.crc; 168 crc = (char *)&c_tx->pkt.send_pkt.crc;
169 data = siw_try_1seg(c_tx, (u64)crc); 169 data = siw_try_1seg(c_tx, crc);
170 break; 170 break;
171 171
172 case SIW_OP_SEND_REMOTE_INV: 172 case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv); 189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
190 190
191 crc = (char *)&c_tx->pkt.send_pkt.crc; 191 crc = (char *)&c_tx->pkt.send_pkt.crc;
192 data = siw_try_1seg(c_tx, (u64)crc); 192 data = siw_try_1seg(c_tx, crc);
193 break; 193 break;
194 194
195 case SIW_OP_WRITE: 195 case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); 201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
202 202
203 crc = (char *)&c_tx->pkt.write_pkt.crc; 203 crc = (char *)&c_tx->pkt.write_pkt.crc;
204 data = siw_try_1seg(c_tx, (u64)crc); 204 data = siw_try_1seg(c_tx, crc);
205 break; 205 break;
206 206
207 case SIW_OP_READ_RESPONSE: 207 case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); 216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
217 217
218 crc = (char *)&c_tx->pkt.write_pkt.crc; 218 crc = (char *)&c_tx->pkt.write_pkt.crc;
219 data = siw_try_1seg(c_tx, (u64)crc); 219 data = siw_try_1seg(c_tx, crc);
220 break; 220 break;
221 221
222 default: 222 default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
398 398
399#define MAX_TRAILER (MPA_CRC_SIZE + 4) 399#define MAX_TRAILER (MPA_CRC_SIZE + 4)
400 400
401static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) 401static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
402{ 402{
403 if (hdr_len) { 403 while (kmap_mask) {
404 ++pages; 404 if (kmap_mask & BIT(0))
405 --num_maps; 405 kunmap(*pp);
406 } 406 pp++;
407 while (num_maps-- > 0) { 407 kmap_mask >>= 1;
408 kunmap(*pages);
409 pages++;
410 } 408 }
411} 409}
412 410
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
437 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, 435 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
438 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, 436 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
439 pbl_idx = c_tx->pbl_idx; 437 pbl_idx = c_tx->pbl_idx;
438 unsigned long kmap_mask = 0L;
440 439
441 if (c_tx->state == SIW_SEND_HDR) { 440 if (c_tx->state == SIW_SEND_HDR) {
442 if (c_tx->use_sendpage) { 441 if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
463 462
464 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { 463 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
465 mem = wqe->mem[sge_idx]; 464 mem = wqe->mem[sge_idx];
466 if (!mem->mem_obj) 465 is_kva = mem->mem_obj == NULL ? 1 : 0;
467 is_kva = 1;
468 } else { 466 } else {
469 is_kva = 1; 467 is_kva = 1;
470 } 468 }
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
473 * tx from kernel virtual address: either inline data 471 * tx from kernel virtual address: either inline data
474 * or memory region with assigned kernel buffer 472 * or memory region with assigned kernel buffer
475 */ 473 */
476 iov[seg].iov_base = (void *)(sge->laddr + sge_off); 474 iov[seg].iov_base =
475 (void *)(uintptr_t)(sge->laddr + sge_off);
477 iov[seg].iov_len = sge_len; 476 iov[seg].iov_len = sge_len;
478 477
479 if (do_crc) 478 if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
500 p = siw_get_upage(mem->umem, 499 p = siw_get_upage(mem->umem,
501 sge->laddr + sge_off); 500 sge->laddr + sge_off);
502 if (unlikely(!p)) { 501 if (unlikely(!p)) {
503 if (hdr_len) 502 siw_unmap_pages(page_array, kmap_mask);
504 seg--;
505 if (!c_tx->use_sendpage && seg) {
506 siw_unmap_pages(page_array,
507 hdr_len, seg);
508 }
509 wqe->processed -= c_tx->bytes_unsent; 503 wqe->processed -= c_tx->bytes_unsent;
510 rv = -EFAULT; 504 rv = -EFAULT;
511 goto done_crc; 505 goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
515 if (!c_tx->use_sendpage) { 509 if (!c_tx->use_sendpage) {
516 iov[seg].iov_base = kmap(p) + fp_off; 510 iov[seg].iov_base = kmap(p) + fp_off;
517 iov[seg].iov_len = plen; 511 iov[seg].iov_len = plen;
512
513 /* Remember for later kunmap() */
514 kmap_mask |= BIT(seg);
515
518 if (do_crc) 516 if (do_crc)
519 crypto_shash_update( 517 crypto_shash_update(
520 c_tx->mpa_crc_hd, 518 c_tx->mpa_crc_hd,
@@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
526 page_address(p) + fp_off, 524 page_address(p) + fp_off,
527 plen); 525 plen);
528 } else { 526 } else {
529 u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); 527 u64 va = sge->laddr + sge_off;
530 528
531 page_array[seg] = virt_to_page(pa); 529 page_array[seg] = virt_to_page(va & PAGE_MASK);
532 if (do_crc) 530 if (do_crc)
533 crypto_shash_update( 531 crypto_shash_update(
534 c_tx->mpa_crc_hd, 532 c_tx->mpa_crc_hd,
535 (void *)(sge->laddr + sge_off), 533 (void *)(uintptr_t)va,
536 plen); 534 plen);
537 } 535 }
538 536
@@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
543 541
544 if (++seg > (int)MAX_ARRAY) { 542 if (++seg > (int)MAX_ARRAY) {
545 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); 543 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
546 if (!is_kva && !c_tx->use_sendpage) { 544 siw_unmap_pages(page_array, kmap_mask);
547 siw_unmap_pages(page_array, hdr_len,
548 seg - 1);
549 }
550 wqe->processed -= c_tx->bytes_unsent; 545 wqe->processed -= c_tx->bytes_unsent;
551 rv = -EMSGSIZE; 546 rv = -EMSGSIZE;
552 goto done_crc; 547 goto done_crc;
@@ -597,8 +592,7 @@ sge_done:
597 } else { 592 } else {
598 rv = kernel_sendmsg(s, &msg, iov, seg + 1, 593 rv = kernel_sendmsg(s, &msg, iov, seg + 1,
599 hdr_len + data_len + trl_len); 594 hdr_len + data_len + trl_len);
600 if (!is_kva) 595 siw_unmap_pages(page_array, kmap_mask);
601 siw_unmap_pages(page_array, hdr_len, seg);
602 } 596 }
603 if (rv < (int)hdr_len) { 597 if (rv < (int)hdr_len) {
604 /* Not even complete hdr pushed or negative rv */ 598 /* Not even complete hdr pushed or negative rv */
@@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
829 rv = -EINVAL; 823 rv = -EINVAL;
830 goto tx_error; 824 goto tx_error;
831 } 825 }
832 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 826 wqe->sqe.sge[0].laddr =
827 (u64)(uintptr_t)&wqe->sqe.sge[1];
833 } 828 }
834 } 829 }
835 wqe->wr_status = SIW_WR_INPROGRESS; 830 wqe->wr_status = SIW_WR_INPROGRESS;
@@ -924,7 +919,7 @@ tx_error:
924 919
925static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) 920static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
926{ 921{
927 struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; 922 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
928 struct siw_device *sdev = to_siw_dev(pd->device); 923 struct siw_device *sdev = to_siw_dev(pd->device);
929 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 924 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
930 int rv = 0; 925 int rv = 0;
@@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
954 mem->stag = sqe->rkey; 949 mem->stag = sqe->rkey;
955 mem->perms = sqe->access; 950 mem->perms = sqe->access;
956 951
957 siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", 952 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
958 mem->va, base_mr->iova);
959 mem->va = base_mr->iova; 953 mem->va = base_mr->iova;
960 mem->stag_valid = 1; 954 mem->stag_valid = 1;
961out: 955out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index e7f3a2379d9d..da52c90e06d4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
424 */ 424 */
425 qp->srq = to_siw_srq(attrs->srq); 425 qp->srq = to_siw_srq(attrs->srq);
426 qp->attrs.rq_size = 0; 426 qp->attrs.rq_size = 0;
427 siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", 427 siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
428 qp->qp_num, qp->srq);
429 } else if (num_rqe) { 428 } else if (num_rqe) {
430 if (qp->kernel_verbs) 429 if (qp->kernel_verbs)
431 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 430 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
610 base_ucontext); 609 base_ucontext);
611 struct siw_qp_attrs qp_attrs; 610 struct siw_qp_attrs qp_attrs;
612 611
613 siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); 612 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
614 613
615 /* 614 /*
616 * Mark QP as in process of destruction to prevent from 615 * Mark QP as in process of destruction to prevent from
@@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
662 void *kbuf = &sqe->sge[1]; 661 void *kbuf = &sqe->sge[1];
663 int num_sge = core_wr->num_sge, bytes = 0; 662 int num_sge = core_wr->num_sge, bytes = 0;
664 663
665 sqe->sge[0].laddr = (u64)kbuf; 664 sqe->sge[0].laddr = (uintptr_t)kbuf;
666 sqe->sge[0].lkey = 0; 665 sqe->sge[0].lkey = 0;
667 666
668 while (num_sge--) { 667 while (num_sge--) {
@@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
825 break; 824 break;
826 825
827 case IB_WR_REG_MR: 826 case IB_WR_REG_MR:
828 sqe->base_mr = (uint64_t)reg_wr(wr)->mr; 827 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
829 sqe->rkey = reg_wr(wr)->key; 828 sqe->rkey = reg_wr(wr)->key;
830 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 829 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
831 sqe->opcode = SIW_OP_REG_MR; 830 sqe->opcode = SIW_OP_REG_MR;
@@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
842 rv = -EINVAL; 841 rv = -EINVAL;
843 break; 842 break;
844 } 843 }
845 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", 844 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
846 sqe->opcode, sqe->flags, (void *)sqe->id); 845 sqe->opcode, sqe->flags,
846 (void *)(uintptr_t)sqe->id);
847 847
848 if (unlikely(rv < 0)) 848 if (unlikely(rv < 0))
849 break; 849 break;
@@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1206 int rv; 1206 int rv;
1207 1207
1208 siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", 1208 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1209 (unsigned long long)start, (unsigned long long)rnic_va, 1209 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1210 (unsigned long long)len); 1210 (unsigned long long)len);
1211 1211
1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1363 struct siw_mem *mem = mr->mem; 1363 struct siw_mem *mem = mr->mem;
1364 struct siw_pbl *pbl = mem->pbl; 1364 struct siw_pbl *pbl = mem->pbl;
1365 struct siw_pble *pble; 1365 struct siw_pble *pble;
1366 u64 pbl_size; 1366 unsigned long pbl_size;
1367 int i, rv; 1367 int i, rv;
1368 1368
1369 if (!pbl) { 1369 if (!pbl) {
@@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1402 pbl_size += sg_dma_len(slp); 1402 pbl_size += sg_dma_len(slp);
1403 } 1403 }
1404 siw_dbg_mem(mem, 1404 siw_dbg_mem(mem,
1405 "sge[%d], size %llu, addr 0x%016llx, total %llu\n", 1405 "sge[%d], size %u, addr 0x%p, total %lu\n",
1406 i, pble->size, pble->addr, pbl_size); 1406 i, pble->size, (void *)(uintptr_t)pble->addr,
1407 pbl_size);
1407 } 1408 }
1408 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1409 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1409 if (rv > 0) { 1410 if (rv > 0) {
1410 mem->len = base_mr->length; 1411 mem->len = base_mr->length;
1411 mem->va = base_mr->iova; 1412 mem->va = base_mr->iova;
1412 siw_dbg_mem(mem, 1413 siw_dbg_mem(mem,
1413 "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", 1414 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1414 mem->len, mem->va, num_sle, pbl->num_buf); 1415 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1416 pbl->num_buf);
1415 } 1417 }
1416 return rv; 1418 return rv;
1417} 1419}
@@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
1529 } 1531 }
1530 spin_lock_init(&srq->lock); 1532 spin_lock_init(&srq->lock);
1531 1533
1532 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); 1534 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1533 1535
1534 return 0; 1536 return 0;
1535 1537
@@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1650 1652
1651 if (unlikely(!srq->kernel_verbs)) { 1653 if (unlikely(!srq->kernel_verbs)) {
1652 siw_dbg_pd(base_srq->pd, 1654 siw_dbg_pd(base_srq->pd,
1653 "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", 1655 "[SRQ]: no kernel post_recv for mapped srq\n");
1654 srq);
1655 rv = -EINVAL; 1656 rv = -EINVAL;
1656 goto out; 1657 goto out;
1657 } 1658 }
@@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1673 } 1674 }
1674 if (unlikely(wr->num_sge > srq->max_sge)) { 1675 if (unlikely(wr->num_sge > srq->max_sge)) {
1675 siw_dbg_pd(base_srq->pd, 1676 siw_dbg_pd(base_srq->pd,
1676 "[SRQ 0x%p]: too many sge's: %d\n", srq, 1677 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1677 wr->num_sge);
1678 rv = -EINVAL; 1678 rv = -EINVAL;
1679 break; 1679 break;
1680 } 1680 }
@@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1693 spin_unlock_irqrestore(&srq->lock, flags); 1693 spin_unlock_irqrestore(&srq->lock, flags);
1694out: 1694out:
1695 if (unlikely(rv < 0)) { 1695 if (unlikely(rv < 0)) {
1696 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); 1696 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1697 *bad_wr = wr; 1697 *bad_wr = wr;
1698 } 1698 }
1699 return rv; 1699 return rv;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 88ae7c2ac3c8..e486a8a74c40 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
237 237
238static void hv_kbd_on_channel_callback(void *context) 238static void hv_kbd_on_channel_callback(void *context)
239{ 239{
240 struct vmpacket_descriptor *desc;
240 struct hv_device *hv_dev = context; 241 struct hv_device *hv_dev = context;
241 void *buffer;
242 int bufferlen = 0x100; /* Start with sensible size */
243 u32 bytes_recvd; 242 u32 bytes_recvd;
244 u64 req_id; 243 u64 req_id;
245 int error;
246 244
247 buffer = kmalloc(bufferlen, GFP_ATOMIC); 245 foreach_vmbus_pkt(desc, hv_dev->channel) {
248 if (!buffer) 246 bytes_recvd = desc->len8 * 8;
249 return; 247 req_id = desc->trans_id;
250
251 while (1) {
252 error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
253 &bytes_recvd, &req_id);
254 switch (error) {
255 case 0:
256 if (bytes_recvd == 0) {
257 kfree(buffer);
258 return;
259 }
260
261 hv_kbd_handle_received_packet(hv_dev, buffer,
262 bytes_recvd, req_id);
263 break;
264 248
265 case -ENOBUFS: 249 hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
266 kfree(buffer); 250 req_id);
267 /* Handle large packet */
268 bufferlen = bytes_recvd;
269 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
270 if (!buffer)
271 return;
272 break;
273 }
274 } 251 }
275} 252}
276 253
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d991d40f797f..f68a62c3c32b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -965,11 +965,14 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
965{ 965{
966 bool coherent = dev_is_dma_coherent(dev); 966 bool coherent = dev_is_dma_coherent(dev);
967 size_t alloc_size = PAGE_ALIGN(size); 967 size_t alloc_size = PAGE_ALIGN(size);
968 int node = dev_to_node(dev);
968 struct page *page = NULL; 969 struct page *page = NULL;
969 void *cpu_addr; 970 void *cpu_addr;
970 971
971 page = dma_alloc_contiguous(dev, alloc_size, gfp); 972 page = dma_alloc_contiguous(dev, alloc_size, gfp);
972 if (!page) 973 if (!page)
974 page = alloc_pages_node(node, gfp, get_order(alloc_size));
975 if (!page)
973 return NULL; 976 return NULL;
974 977
975 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 978 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6b5acc92ca2..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1599 unsigned long freed; 1599 unsigned long freed;
1600 1600
1601 c = container_of(shrink, struct dm_bufio_client, shrinker); 1601 c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 if (!dm_bufio_trylock(c)) 1602 if (sc->gfp_mask & __GFP_FS)
1603 dm_bufio_lock(c);
1604 else if (!dm_bufio_trylock(c))
1603 return SHRINK_STOP; 1605 return SHRINK_STOP;
1604 1606
1605 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); 1607 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 845f376a72d9..8288887b7f94 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -25,6 +25,7 @@ struct dust_device {
25 unsigned long long badblock_count; 25 unsigned long long badblock_count;
26 spinlock_t dust_lock; 26 spinlock_t dust_lock;
27 unsigned int blksz; 27 unsigned int blksz;
28 int sect_per_block_shift;
28 unsigned int sect_per_block; 29 unsigned int sect_per_block;
29 sector_t start; 30 sector_t start;
30 bool fail_read_on_bb:1; 31 bool fail_read_on_bb:1;
@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79 unsigned long flags; 80 unsigned long flags;
80 81
81 spin_lock_irqsave(&dd->dust_lock, flags); 82 spin_lock_irqsave(&dd->dust_lock, flags);
82 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 83 bblock = dust_rb_search(&dd->badblocklist, block);
83 84
84 if (bblock == NULL) { 85 if (bblock == NULL) {
85 if (!dd->quiet_mode) { 86 if (!dd->quiet_mode) {
@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
113 } 114 }
114 115
115 spin_lock_irqsave(&dd->dust_lock, flags); 116 spin_lock_irqsave(&dd->dust_lock, flags);
116 bblock->bb = block * dd->sect_per_block; 117 bblock->bb = block;
117 if (!dust_rb_insert(&dd->badblocklist, bblock)) { 118 if (!dust_rb_insert(&dd->badblocklist, bblock)) {
118 if (!dd->quiet_mode) { 119 if (!dd->quiet_mode) {
119 DMERR("%s: block %llu already in badblocklist", 120 DMERR("%s: block %llu already in badblocklist",
@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
138 unsigned long flags; 139 unsigned long flags;
139 140
140 spin_lock_irqsave(&dd->dust_lock, flags); 141 spin_lock_irqsave(&dd->dust_lock, flags);
141 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 142 bblock = dust_rb_search(&dd->badblocklist, block);
142 if (bblock != NULL) 143 if (bblock != NULL)
143 DMINFO("%s: block %llu found in badblocklist", __func__, block); 144 DMINFO("%s: block %llu found in badblocklist", __func__, block);
144 else 145 else
@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
165 int ret = DM_MAPIO_REMAPPED; 166 int ret = DM_MAPIO_REMAPPED;
166 167
167 if (fail_read_on_bb) { 168 if (fail_read_on_bb) {
169 thisblock >>= dd->sect_per_block_shift;
168 spin_lock_irqsave(&dd->dust_lock, flags); 170 spin_lock_irqsave(&dd->dust_lock, flags);
169 ret = __dust_map_read(dd, thisblock); 171 ret = __dust_map_read(dd, thisblock);
170 spin_unlock_irqrestore(&dd->dust_lock, flags); 172 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195 unsigned long flags; 197 unsigned long flags;
196 198
197 if (fail_read_on_bb) { 199 if (fail_read_on_bb) {
200 thisblock >>= dd->sect_per_block_shift;
198 spin_lock_irqsave(&dd->dust_lock, flags); 201 spin_lock_irqsave(&dd->dust_lock, flags);
199 __dust_map_write(dd, thisblock); 202 __dust_map_write(dd, thisblock);
200 spin_unlock_irqrestore(&dd->dust_lock, flags); 203 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
331 dd->blksz = blksz; 334 dd->blksz = blksz;
332 dd->start = tmp; 335 dd->start = tmp;
333 336
337 dd->sect_per_block_shift = __ffs(sect_per_block);
338
334 /* 339 /*
335 * Whether to fail a read on a "bad" block. 340 * Whether to fail a read on a "bad" block.
336 * Defaults to false; enabled later by message. 341 * Defaults to false; enabled later by message.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b1b0de402dfc..9118ab85cb3a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1943,7 +1943,22 @@ offload_to_thread:
1943 queue_work(ic->wait_wq, &dio->work); 1943 queue_work(ic->wait_wq, &dio->work);
1944 return; 1944 return;
1945 } 1945 }
1946 if (journal_read_pos != NOT_FOUND)
1947 dio->range.n_sectors = ic->sectors_per_block;
1946 wait_and_add_new_range(ic, &dio->range); 1948 wait_and_add_new_range(ic, &dio->range);
1949 /*
1950 * wait_and_add_new_range drops the spinlock, so the journal
1951 * may have been changed arbitrarily. We need to recheck.
1952 * To simplify the code, we restrict I/O size to just one block.
1953 */
1954 if (journal_read_pos != NOT_FOUND) {
1955 sector_t next_sector;
1956 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1957 if (unlikely(new_pos != journal_read_pos)) {
1958 remove_range_unlocked(ic, &dio->range);
1959 goto retry;
1960 }
1961 }
1947 } 1962 }
1948 spin_unlock_irq(&ic->endio_wait.lock); 1963 spin_unlock_irq(&ic->endio_wait.lock);
1949 1964
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index df2011de7be2..1bbe4a34ef4c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job)
566 * no point in continuing. 566 * no point in continuing.
567 */ 567 */
568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && 568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
569 job->master_job->write_err) 569 job->master_job->write_err) {
570 job->write_err = job->master_job->write_err;
570 return -EIO; 571 return -EIO;
572 }
571 573
572 io_job_start(job->kc->throttle); 574 io_job_start(job->kc->throttle);
573 575
@@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
619 else 621 else
620 job->read_err = 1; 622 job->read_err = 1;
621 push(&kc->complete_jobs, job); 623 push(&kc->complete_jobs, job);
624 wake(kc);
622 break; 625 break;
623 } 626 }
624 627
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8a60a4a070ac..1f933dd197cd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3194 */ 3194 */
3195 r = rs_prepare_reshape(rs); 3195 r = rs_prepare_reshape(rs);
3196 if (r) 3196 if (r)
3197 return r; 3197 goto bad;
3198 3198
3199 /* Reshaping ain't recovery, so disable recovery */ 3199 /* Reshaping ain't recovery, so disable recovery */
3200 rs_setup_recovery(rs, MaxSector); 3200 rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7b6c3ee9e755..8820931ec7d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t)
1342} 1342}
1343EXPORT_SYMBOL(dm_table_event); 1343EXPORT_SYMBOL(dm_table_event);
1344 1344
1345sector_t dm_table_get_size(struct dm_table *t) 1345inline sector_t dm_table_get_size(struct dm_table *t)
1346{ 1346{
1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1348} 1348}
@@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1367 unsigned int l, n = 0, k = 0; 1367 unsigned int l, n = 0, k = 0;
1368 sector_t *node; 1368 sector_t *node;
1369 1369
1370 if (unlikely(sector >= dm_table_get_size(t)))
1371 return &t->targets[t->num_targets];
1372
1370 for (l = 0; l < t->depth; l++) { 1373 for (l = 0; l < t->depth; l++) {
1371 n = get_child(n, k); 1374 n = get_child(n, k);
1372 node = get_node(t, l, n); 1375 node = get_node(t, l, n);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -34,7 +35,7 @@
34 * (1) Super block (1 block) 35 * (1) Super block (1 block)
35 * (2) Chunk mapping table (nr_map_blocks) 36 * (2) Chunk mapping table (nr_map_blocks)
36 * (3) Bitmap blocks (nr_bitmap_blocks) 37 * (3) Bitmap blocks (nr_bitmap_blocks)
37 * All metadata blocks are stored in conventional zones, starting from the 38 * All metadata blocks are stored in conventional zones, starting from
38 * the first conventional zone found on disk. 39 * the first conventional zone found on disk.
39 */ 40 */
40struct dmz_super { 41struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore 234 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being 235 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with 236 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are 237 * the map lock and zone state management (active and reclaim state are
237 * mutually exclusive). 238 * mutually exclusive).
238 */ 239 */
239void dmz_lock_metadata(struct dmz_metadata *zmd) 240void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
402 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 403 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
403 struct bio *bio; 404 struct bio *bio;
404 405
406 if (dmz_bdev_is_dying(zmd->dev))
407 return ERR_PTR(-EIO);
408
405 /* Get a new block and a BIO to read it */ 409 /* Get a new block and a BIO to read it */
406 mblk = dmz_alloc_mblock(zmd, mblk_no); 410 mblk = dmz_alloc_mblock(zmd, mblk_no);
407 if (!mblk) 411 if (!mblk)
408 return NULL; 412 return ERR_PTR(-ENOMEM);
409 413
410 bio = bio_alloc(GFP_NOIO, 1); 414 bio = bio_alloc(GFP_NOIO, 1);
411 if (!bio) { 415 if (!bio) {
412 dmz_free_mblock(zmd, mblk); 416 dmz_free_mblock(zmd, mblk);
413 return NULL; 417 return ERR_PTR(-ENOMEM);
414 } 418 }
415 419
416 spin_lock(&zmd->mblk_lock); 420 spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
541 if (!mblk) { 545 if (!mblk) {
542 /* Cache miss: read the block from disk */ 546 /* Cache miss: read the block from disk */
543 mblk = dmz_get_mblock_slow(zmd, mblk_no); 547 mblk = dmz_get_mblock_slow(zmd, mblk_no);
544 if (!mblk) 548 if (IS_ERR(mblk))
545 return ERR_PTR(-ENOMEM); 549 return mblk;
546 } 550 }
547 551
548 /* Wait for on-going read I/O and check for error */ 552 /* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
570/* 574/*
571 * Issue a metadata block write BIO. 575 * Issue a metadata block write BIO.
572 */ 576 */
573static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, 577static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
574 unsigned int set) 578 unsigned int set)
575{ 579{
576 sector_t block = zmd->sb[set].block + mblk->no; 580 sector_t block = zmd->sb[set].block + mblk->no;
577 struct bio *bio; 581 struct bio *bio;
578 582
583 if (dmz_bdev_is_dying(zmd->dev))
584 return -EIO;
585
579 bio = bio_alloc(GFP_NOIO, 1); 586 bio = bio_alloc(GFP_NOIO, 1);
580 if (!bio) { 587 if (!bio) {
581 set_bit(DMZ_META_ERROR, &mblk->state); 588 set_bit(DMZ_META_ERROR, &mblk->state);
582 return; 589 return -ENOMEM;
583 } 590 }
584 591
585 set_bit(DMZ_META_WRITING, &mblk->state); 592 set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
591 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); 598 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
592 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); 599 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
593 submit_bio(bio); 600 submit_bio(bio);
601
602 return 0;
594} 603}
595 604
596/* 605/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
602 struct bio *bio; 611 struct bio *bio;
603 int ret; 612 int ret;
604 613
614 if (dmz_bdev_is_dying(zmd->dev))
615 return -EIO;
616
605 bio = bio_alloc(GFP_NOIO, 1); 617 bio = bio_alloc(GFP_NOIO, 1);
606 if (!bio) 618 if (!bio)
607 return -ENOMEM; 619 return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
659{ 671{
660 struct dmz_mblock *mblk; 672 struct dmz_mblock *mblk;
661 struct blk_plug plug; 673 struct blk_plug plug;
662 int ret = 0; 674 int ret = 0, nr_mblks_submitted = 0;
663 675
664 /* Issue writes */ 676 /* Issue writes */
665 blk_start_plug(&plug); 677 blk_start_plug(&plug);
666 list_for_each_entry(mblk, write_list, link) 678 list_for_each_entry(mblk, write_list, link) {
667 dmz_write_mblock(zmd, mblk, set); 679 ret = dmz_write_mblock(zmd, mblk, set);
680 if (ret)
681 break;
682 nr_mblks_submitted++;
683 }
668 blk_finish_plug(&plug); 684 blk_finish_plug(&plug);
669 685
670 /* Wait for completion */ 686 /* Wait for completion */
671 list_for_each_entry(mblk, write_list, link) { 687 list_for_each_entry(mblk, write_list, link) {
688 if (!nr_mblks_submitted)
689 break;
672 wait_on_bit_io(&mblk->state, DMZ_META_WRITING, 690 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
673 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
674 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 692 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
675 clear_bit(DMZ_META_ERROR, &mblk->state); 693 clear_bit(DMZ_META_ERROR, &mblk->state);
676 ret = -EIO; 694 ret = -EIO;
677 } 695 }
696 nr_mblks_submitted--;
678 } 697 }
679 698
680 /* Flush drive cache (this will also sync data) */ 699 /* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
736 */ 755 */
737 dmz_lock_flush(zmd); 756 dmz_lock_flush(zmd);
738 757
758 if (dmz_bdev_is_dying(zmd->dev)) {
759 ret = -EIO;
760 goto out;
761 }
762
739 /* Get dirty blocks */ 763 /* Get dirty blocks */
740 spin_lock(&zmd->mblk_lock); 764 spin_lock(&zmd->mblk_lock);
741 list_splice_init(&zmd->mblk_dirty_list, &write_list); 765 list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1542 struct dm_zone *zone; 1566 struct dm_zone *zone;
1543 1567
1544 if (list_empty(&zmd->map_rnd_list)) 1568 if (list_empty(&zmd->map_rnd_list))
1545 return NULL; 1569 return ERR_PTR(-EBUSY);
1546 1570
1547 list_for_each_entry(zone, &zmd->map_rnd_list, link) { 1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1548 if (dmz_is_buf(zone)) 1572 if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1553 return dzone; 1577 return dzone;
1554 } 1578 }
1555 1579
1556 return NULL; 1580 return ERR_PTR(-EBUSY);
1557} 1581}
1558 1582
1559/* 1583/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1564 struct dm_zone *zone; 1588 struct dm_zone *zone;
1565 1589
1566 if (list_empty(&zmd->map_seq_list)) 1590 if (list_empty(&zmd->map_seq_list))
1567 return NULL; 1591 return ERR_PTR(-EBUSY);
1568 1592
1569 list_for_each_entry(zone, &zmd->map_seq_list, link) { 1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1570 if (!zone->bzone) 1594 if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1573 return zone; 1597 return zone;
1574 } 1598 }
1575 1599
1576 return NULL; 1600 return ERR_PTR(-EBUSY);
1577} 1601}
1578 1602
1579/* 1603/*
@@ -1628,9 +1652,13 @@ again:
1628 if (op != REQ_OP_WRITE) 1652 if (op != REQ_OP_WRITE)
1629 goto out; 1653 goto out;
1630 1654
1631 /* Alloate a random zone */ 1655 /* Allocate a random zone */
1632 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1633 if (!dzone) { 1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1634 dmz_wait_for_free_zones(zmd); 1662 dmz_wait_for_free_zones(zmd);
1635 goto again; 1663 goto again;
1636 } 1664 }
@@ -1725,9 +1753,13 @@ again:
1725 if (bzone) 1753 if (bzone)
1726 goto out; 1754 goto out;
1727 1755
1728 /* Alloate a random zone */ 1756 /* Allocate a random zone */
1729 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1730 if (!bzone) { 1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1731 dmz_wait_for_free_zones(zmd); 1763 dmz_wait_for_free_zones(zmd);
1732 goto again; 1764 goto again;
1733 } 1765 }
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95eb075..d240d7ca8a8a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -37,7 +38,7 @@ enum {
37/* 38/*
38 * Number of seconds of target BIO inactivity to consider the target idle. 39 * Number of seconds of target BIO inactivity to consider the target idle.
39 */ 40 */
40#define DMZ_IDLE_PERIOD (10UL * HZ) 41#define DMZ_IDLE_PERIOD (10UL * HZ)
41 42
42/* 43/*
43 * Percentage of unmapped (free) random zones below which reclaim starts 44 * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
134 set_bit(DM_KCOPYD_WRITE_SEQ, &flags); 135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
135 136
136 while (block < end_block) { 137 while (block < end_block) {
138 if (dev->flags & DMZ_BDEV_DYING)
139 return -EIO;
140
137 /* Get a valid region from the source zone */ 141 /* Get a valid region from the source zone */
138 ret = dmz_first_valid_block(zmd, src_zone, &block); 142 ret = dmz_first_valid_block(zmd, src_zone, &block);
139 if (ret <= 0) 143 if (ret <= 0)
@@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
215 219
216 dmz_unlock_flush(zmd); 220 dmz_unlock_flush(zmd);
217 221
218 return 0; 222 return ret;
219} 223}
220 224
221/* 225/*
@@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
259 263
260 dmz_unlock_flush(zmd); 264 dmz_unlock_flush(zmd);
261 265
262 return 0; 266 return ret;
263} 267}
264 268
265/* 269/*
@@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
312 316
313 dmz_unlock_flush(zmd); 317 dmz_unlock_flush(zmd);
314 318
315 return 0; 319 return ret;
316} 320}
317 321
318/* 322/*
@@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
334/* 338/*
335 * Find a candidate zone for reclaim and process it. 339 * Find a candidate zone for reclaim and process it.
336 */ 340 */
337static void dmz_reclaim(struct dmz_reclaim *zrc) 341static int dmz_do_reclaim(struct dmz_reclaim *zrc)
338{ 342{
339 struct dmz_metadata *zmd = zrc->metadata; 343 struct dmz_metadata *zmd = zrc->metadata;
340 struct dm_zone *dzone; 344 struct dm_zone *dzone;
@@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
344 348
345 /* Get a data zone */ 349 /* Get a data zone */
346 dzone = dmz_get_zone_for_reclaim(zmd); 350 dzone = dmz_get_zone_for_reclaim(zmd);
347 if (!dzone) 351 if (IS_ERR(dzone))
348 return; 352 return PTR_ERR(dzone);
349 353
350 start = jiffies; 354 start = jiffies;
351 355
@@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
391out: 395out:
392 if (ret) { 396 if (ret) {
393 dmz_unlock_zone_reclaim(dzone); 397 dmz_unlock_zone_reclaim(dzone);
394 return; 398 return ret;
395 } 399 }
396 400
397 (void) dmz_flush_metadata(zrc->metadata); 401 ret = dmz_flush_metadata(zrc->metadata);
402 if (ret) {
403 dmz_dev_debug(zrc->dev,
404 "Metadata flush for zone %u failed, err %d\n",
405 dmz_id(zmd, rzone), ret);
406 return ret;
407 }
398 408
399 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", 409 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
400 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); 410 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
411 return 0;
401} 412}
402 413
403/* 414/*
@@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
427 return false; 438 return false;
428 439
429 /* 440 /*
430 * If the percentage of unmappped random zones is low, 441 * If the percentage of unmapped random zones is low,
431 * reclaim even if the target is busy. 442 * reclaim even if the target is busy.
432 */ 443 */
433 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; 444 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
@@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work)
442 struct dmz_metadata *zmd = zrc->metadata; 453 struct dmz_metadata *zmd = zrc->metadata;
443 unsigned int nr_rnd, nr_unmap_rnd; 454 unsigned int nr_rnd, nr_unmap_rnd;
444 unsigned int p_unmap_rnd; 455 unsigned int p_unmap_rnd;
456 int ret;
457
458 if (dmz_bdev_is_dying(zrc->dev))
459 return;
445 460
446 if (!dmz_should_reclaim(zrc)) { 461 if (!dmz_should_reclaim(zrc)) {
447 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); 462 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work)
471 (dmz_target_idle(zrc) ? "Idle" : "Busy"), 486 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
472 p_unmap_rnd, nr_unmap_rnd, nr_rnd); 487 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
473 488
474 dmz_reclaim(zrc); 489 ret = dmz_do_reclaim(zrc);
490 if (ret) {
491 dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
492 if (ret == -EIO)
493 /*
494 * LLD might be performing some error handling sequence
495 * at the underlying device. To not interfere, do not
496 * attempt to schedule the next reclaim run immediately.
497 */
498 return;
499 }
475 500
476 dmz_schedule_reclaim(zrc); 501 dmz_schedule_reclaim(zrc);
477} 502}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
133 134
134 refcount_inc(&bioctx->ref); 135 refcount_inc(&bioctx->ref);
135 generic_make_request(clone); 136 generic_make_request(clone);
137 if (clone->bi_status == BLK_STS_IOERR)
138 return -EIO;
136 139
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks; 141 zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
277 280
278 /* Get the buffer zone. One will be allocated if needed */ 281 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone); 282 bzone = dmz_get_chunk_buffer(zmd, zone);
280 if (!bzone) 283 if (IS_ERR(bzone))
281 return -ENOSPC; 284 return PTR_ERR(bzone);
282 285
283 if (dmz_is_readonly(bzone)) 286 if (dmz_is_readonly(bzone))
284 return -EROFS; 287 return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
389 392
390 dmz_lock_metadata(zmd); 393 dmz_lock_metadata(zmd);
391 394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
392 /* 400 /*
393 * Get the data zone mapping the chunk. There may be no 401 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained, 402 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
493 501
494 /* Flush dirty metadata blocks */ 502 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata); 503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
496 506
497 /* Process queued flush requests */ 507 /* Process queued flush requests */
498 while (1) { 508 while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
513 * Get a chunk work and start it to process a new BIO. 523 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one. 524 * If the BIO chunk has no work yet, create one.
515 */ 525 */
516static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) 526static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
517{ 527{
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); 528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw; 529 struct dm_chunk_work *cw;
530 int ret = 0;
520 531
521 mutex_lock(&dmz->chunk_lock); 532 mutex_lock(&dmz->chunk_lock);
522 533
523 /* Get the BIO chunk work. If one is not active yet, create one */ 534 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); 535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
525 if (!cw) { 536 if (!cw) {
526 int ret;
527 537
528 /* Create a new chunk work */ 538 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); 539 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
530 if (!cw) 540 if (unlikely(!cw)) {
541 ret = -ENOMEM;
531 goto out; 542 goto out;
543 }
532 544
533 INIT_WORK(&cw->work, dmz_chunk_work); 545 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0); 546 refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); 551 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
540 if (unlikely(ret)) { 552 if (unlikely(ret)) {
541 kfree(cw); 553 kfree(cw);
542 cw = NULL;
543 goto out; 554 goto out;
544 } 555 }
545 } 556 }
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
547 bio_list_add(&cw->bio_list, bio); 558 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw); 559 dmz_get_chunk_work(cw);
549 560
561 dmz_reclaim_bio_acc(dmz->reclaim);
550 if (queue_work(dmz->chunk_wq, &cw->work)) 562 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw); 563 dmz_get_chunk_work(cw);
552out: 564out:
553 mutex_unlock(&dmz->chunk_lock); 565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567}
568
569/*
570 * Check the backing device availability. If it's on the way out,
571 * start failing I/O. Reclaim and metadata components also call this
572 * function to cleanly abort operation in the event of such failure.
573 */
574bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575{
576 struct gendisk *disk;
577
578 if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
579 disk = dmz_dev->bdev->bd_disk;
580 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
581 dmz_dev_warn(dmz_dev, "Backing device queue dying");
582 dmz_dev->flags |= DMZ_BDEV_DYING;
583 } else if (disk->fops->check_events) {
584 if (disk->fops->check_events(disk, 0) &
585 DISK_EVENT_MEDIA_CHANGE) {
586 dmz_dev_warn(dmz_dev, "Backing device offline");
587 dmz_dev->flags |= DMZ_BDEV_DYING;
588 }
589 }
590 }
591
592 return dmz_dev->flags & DMZ_BDEV_DYING;
554} 593}
555 594
556/* 595/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
564 sector_t sector = bio->bi_iter.bi_sector; 603 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio); 604 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector; 605 sector_t chunk_sector;
606 int ret;
607
608 if (dmz_bdev_is_dying(dmz->dev))
609 return DM_MAPIO_KILL;
567 610
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", 611 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors, 612 bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); 644 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
602 645
603 /* Now ready to handle this BIO */ 646 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim); 647 ret = dmz_queue_chunk_work(dmz, bio);
605 dmz_queue_chunk_work(dmz, bio); 648 if (ret) {
649 dmz_dev_debug(dmz->dev,
650 "BIO op %d, can't process chunk %llu, err %i\n",
651 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
652 ret);
653 return DM_MAPIO_REQUEUE;
654 }
606 655
607 return DM_MAPIO_SUBMITTED; 656 return DM_MAPIO_SUBMITTED;
608} 657}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
855{ 904{
856 struct dmz_target *dmz = ti->private; 905 struct dmz_target *dmz = ti->private;
857 906
907 if (dmz_bdev_is_dying(dmz->dev))
908 return -ENODEV;
909
858 *bdev = dmz->dev->bdev; 910 *bdev = dmz->dev->bdev;
859 911
860 return 0; 912 return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49c9a08..d8e70b0ade35 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -56,6 +57,8 @@ struct dmz_dev {
56 57
57 unsigned int nr_zones; 58 unsigned int nr_zones;
58 59
60 unsigned int flags;
61
59 sector_t zone_nr_sectors; 62 sector_t zone_nr_sectors;
60 unsigned int zone_nr_sectors_shift; 63 unsigned int zone_nr_sectors_shift;
61 64
@@ -67,6 +70,9 @@ struct dmz_dev {
67 (dev)->zone_nr_sectors_shift) 70 (dev)->zone_nr_sectors_shift)
68#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) 71#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
69 72
73/* Device flags. */
74#define DMZ_BDEV_DYING (1 << 0)
75
70/* 76/*
71 * Zone descriptor. 77 * Zone descriptor.
72 */ 78 */
@@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
245void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); 251void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
246void dmz_schedule_reclaim(struct dmz_reclaim *zrc); 252void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
247 253
254/*
255 * Functions defined in dm-zoned-target.c
256 */
257bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
258
248#endif /* DM_ZONED_H */ 259#endif /* DM_ZONED_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b319757b1e..8aae0624a297 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
628 628
629 new_parent = shadow_current(s); 629 new_parent = shadow_current(s);
630 630
631 pn = dm_block_data(new_parent);
632 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
633 sizeof(__le64) : s->info->value_type.size;
634
635 /* create & init the left block */
631 r = new_block(s->info, &left); 636 r = new_block(s->info, &left);
632 if (r < 0) 637 if (r < 0)
633 return r; 638 return r;
634 639
640 ln = dm_block_data(left);
641 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
642
643 ln->header.flags = pn->header.flags;
644 ln->header.nr_entries = cpu_to_le32(nr_left);
645 ln->header.max_entries = pn->header.max_entries;
646 ln->header.value_size = pn->header.value_size;
647 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
648 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
649
650 /* create & init the right block */
635 r = new_block(s->info, &right); 651 r = new_block(s->info, &right);
636 if (r < 0) { 652 if (r < 0) {
637 unlock_block(s->info, left); 653 unlock_block(s->info, left);
638 return r; 654 return r;
639 } 655 }
640 656
641 pn = dm_block_data(new_parent);
642 ln = dm_block_data(left);
643 rn = dm_block_data(right); 657 rn = dm_block_data(right);
644
645 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; 658 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
647 659
648 ln->header.flags = pn->header.flags;
649 ln->header.nr_entries = cpu_to_le32(nr_left);
650 ln->header.max_entries = pn->header.max_entries;
651 ln->header.value_size = pn->header.value_size;
652
653 rn->header.flags = pn->header.flags; 660 rn->header.flags = pn->header.flags;
654 rn->header.nr_entries = cpu_to_le32(nr_right); 661 rn->header.nr_entries = cpu_to_le32(nr_right);
655 rn->header.max_entries = pn->header.max_entries; 662 rn->header.max_entries = pn->header.max_entries;
656 rn->header.value_size = pn->header.value_size; 663 rn->header.value_size = pn->header.value_size;
657
658 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
659 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); 664 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
660
661 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
662 sizeof(__le64) : s->info->value_type.size;
663 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
664 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), 665 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
665 nr_right * size); 666 nr_right * size);
666 667
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec449243966..25328582cc48 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
249 } 249 }
250 250
251 if (smm->recursion_count == 1) 251 if (smm->recursion_count == 1)
252 apply_bops(smm); 252 r = apply_bops(smm);
253 253
254 smm->recursion_count--; 254 smm->recursion_count--;
255 255
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 601cefb5c9d8..050478cabc95 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
729 return 0; 729 return 0;
730} 730}
731 731
732static int rk8xx_suspend(struct device *dev) 732static int __maybe_unused rk8xx_suspend(struct device *dev)
733{ 733{
734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
735 int ret = 0; 735 int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
749 return ret; 749 return ret;
750} 750}
751 751
752static int rk8xx_resume(struct device *dev) 752static int __maybe_unused rk8xx_resume(struct device *dev)
753{ 753{
754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
755 int ret = 0; 755 int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
768 768
769 return ret; 769 return ret;
770} 770}
771SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); 771static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
772 772
773static struct i2c_driver rk808_i2c_driver = { 773static struct i2c_driver rk808_i2c_driver = {
774 .driver = { 774 .driver = {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 895510d40ce4..47602af4ee34 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
81 default: 81 default:
82 printk(KERN_WARNING "SA1100 flash: unknown base address " 82 printk(KERN_WARNING "SA1100 flash: unknown base address "
83 "0x%08lx, assuming CS0\n", phys); 83 "0x%08lx, assuming CS0\n", phys);
84 /* Fall through */
84 85
85 case SA1100_CS0_PHYS: 86 case SA1100_CS0_PHYS:
86 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; 87 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4f839348011d..26509fa37a50 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -481,6 +481,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
481 unsigned long *supported, 481 unsigned long *supported,
482 struct phylink_link_state *state) 482 struct phylink_link_state *state)
483{ 483{
484 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
484 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 485 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
485 486
486 if (!phy_interface_mode_is_rgmii(state->interface) && 487 if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -490,8 +491,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
490 state->interface != PHY_INTERFACE_MODE_INTERNAL && 491 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
491 state->interface != PHY_INTERFACE_MODE_MOCA) { 492 state->interface != PHY_INTERFACE_MODE_MOCA) {
492 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 493 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
493 dev_err(ds->dev, 494 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
494 "Unsupported interface: %d\n", state->interface); 495 dev_err(ds->dev,
496 "Unsupported interface: %d for port %d\n",
497 state->interface, port);
495 return; 498 return;
496 } 499 }
497 500
@@ -529,6 +532,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
529 u32 id_mode_dis = 0, port_mode; 532 u32 id_mode_dis = 0, port_mode;
530 u32 reg, offset; 533 u32 reg, offset;
531 534
535 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
536 return;
537
532 if (priv->type == BCM7445_DEVICE_ID) 538 if (priv->type == BCM7445_DEVICE_ID)
533 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 539 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
534 else 540 else
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 497298752381..aca95f64bde8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
50 u64_stats_fetch_begin(&priv->tx[ring].statss); 50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done; 51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done; 52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 start)); 54 start));
55 } 55 }
56 } 56 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 8b93101e1a09..7833ddef0427 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -109,13 +109,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
109 109
110static void tx_fill_wi(struct mlx5e_txqsq *sq, 110static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs, 111 u16 pi, u8 num_wqebbs,
112 skb_frag_t *resync_dump_frag) 112 skb_frag_t *resync_dump_frag,
113 u32 num_bytes)
113{ 114{
114 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 115 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115 116
116 wi->skb = NULL; 117 wi->skb = NULL;
117 wi->num_wqebbs = num_wqebbs; 118 wi->num_wqebbs = num_wqebbs;
118 wi->resync_dump_frag = resync_dump_frag; 119 wi->resync_dump_frag = resync_dump_frag;
120 wi->num_bytes = num_bytes;
119} 121}
120 122
121void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) 123void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
143 145
144 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); 146 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); 147 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); 148 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
147 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; 149 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
148} 150}
149 151
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
157 159
158 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); 160 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); 161 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); 162 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
161 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; 163 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
162} 164}
163 165
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
248 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); 250 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
249} 251}
250 252
253struct mlx5e_dump_wqe {
254 struct mlx5_wqe_ctrl_seg ctrl;
255 struct mlx5_wqe_data_seg data;
256};
257
251static int 258static int
252tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, 259tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 skb_frag_t *frag, u32 tisn, bool first) 260 skb_frag_t *frag, u32 tisn, bool first)
254{ 261{
255 struct mlx5_wqe_ctrl_seg *cseg; 262 struct mlx5_wqe_ctrl_seg *cseg;
256 struct mlx5_wqe_eth_seg *eseg;
257 struct mlx5_wqe_data_seg *dseg; 263 struct mlx5_wqe_data_seg *dseg;
258 struct mlx5e_tx_wqe *wqe; 264 struct mlx5e_dump_wqe *wqe;
259 dma_addr_t dma_addr = 0; 265 dma_addr_t dma_addr = 0;
260 u16 ds_cnt, ds_cnt_inl;
261 u8 num_wqebbs; 266 u8 num_wqebbs;
262 u16 pi, ihs; 267 u16 ds_cnt;
263 int fsz; 268 int fsz;
264 269 u16 pi;
265 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266 ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268 ds_cnt += ds_cnt_inl;
269 ds_cnt += 1; /* one frag */
270 270
271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272 272
273 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 274 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 275
275 cseg = &wqe->ctrl; 276 cseg = &wqe->ctrl;
276 eseg = &wqe->eth; 277 dseg = &wqe->data;
277 dseg = wqe->data;
278 278
279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); 279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281 cseg->tisn = cpu_to_be32(tisn << 8); 281 cseg->tisn = cpu_to_be32(tisn << 8);
282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283 283
284 eseg->inline_hdr.sz = cpu_to_be16(ihs);
285 memcpy(eseg->inline_hdr.start, skb->data, ihs);
286 dseg += ds_cnt_inl;
287
288 fsz = skb_frag_size(frag); 284 fsz = skb_frag_size(frag);
289 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 285 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
290 DMA_TO_DEVICE); 286 DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
296 dseg->byte_count = cpu_to_be32(fsz); 292 dseg->byte_count = cpu_to_be32(fsz);
297 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 293 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
298 294
299 tx_fill_wi(sq, pi, num_wqebbs, frag); 295 tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
300 sq->pc += num_wqebbs; 296 sq->pc += num_wqebbs;
301 297
302 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, 298 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
323 struct mlx5_wq_cyc *wq = &sq->wq; 319 struct mlx5_wq_cyc *wq = &sq->wq;
324 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 320 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
325 321
326 tx_fill_wi(sq, pi, 1, NULL); 322 tx_fill_wi(sq, pi, 1, NULL, 0);
327 323
328 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); 324 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
329} 325}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9314777d99e3..d685122d9ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
590 data_size = crdump_size - offset; 590 data_size = crdump_size - offset;
591 else 591 else
592 data_size = MLX5_CR_DUMP_CHUNK_SIZE; 592 data_size = MLX5_CR_DUMP_CHUNK_SIZE;
593 err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); 593 err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
594 data_size);
594 if (err) 595 if (err)
595 goto free_data; 596 goto free_data;
596 } 597 }
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
700 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 701 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
701 goto out; 702 goto out;
702 703
704 fatal_error = check_fatal_sensors(dev);
705
706 if (fatal_error && !health->fatal_error) {
707 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
708 dev->priv.health.fatal_error = fatal_error;
709 print_health_info(dev);
710 mlx5_trigger_health_work(dev);
711 goto out;
712 }
713
703 count = ioread32be(health->health_counter); 714 count = ioread32be(health->health_counter);
704 if (count == health->prev) 715 if (count == health->prev)
705 ++health->miss_counter; 716 ++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
718 if (health->synd && health->synd != prev_synd) 729 if (health->synd && health->synd != prev_synd)
719 queue_work(health->wq, &health->report_work); 730 queue_work(health->wq, &health->report_work);
720 731
721 fatal_error = check_fatal_sensors(dev);
722
723 if (fatal_error && !health->fatal_error) {
724 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
725 dev->priv.health.fatal_error = fatal_error;
726 print_health_info(dev);
727 mlx5_trigger_health_work(dev);
728 }
729
730out: 732out:
731 mod_timer(&health->timer, get_next_poll_jiffies()); 733 mod_timer(&health->timer, get_next_poll_jiffies());
732} 734}
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 39aca1ab4687..86fc6e6b46dd 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
317 break; 317 break;
318 case OCELOT_ACL_ACTION_TRAP: 318 case OCELOT_ACL_ACTION_TRAP:
319 VCAP_ACT_SET(PORT_MASK, 0x0); 319 VCAP_ACT_SET(PORT_MASK, 0x0);
320 VCAP_ACT_SET(MASK_MODE, 0x0); 320 VCAP_ACT_SET(MASK_MODE, 0x1);
321 VCAP_ACT_SET(POLICE_ENA, 0x0); 321 VCAP_ACT_SET(POLICE_ENA, 0x0);
322 VCAP_ACT_SET(POLICE_IDX, 0x0); 322 VCAP_ACT_SET(POLICE_IDX, 0x0);
323 VCAP_ACT_SET(CPU_QU_NUM, 0x0); 323 VCAP_ACT_SET(CPU_QU_NUM, 0x0);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 3a4f4f042ae7..b0708460e342 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1586,6 +1586,13 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1586 1586
1587 switch (f->command) { 1587 switch (f->command) {
1588 case FLOW_BLOCK_BIND: 1588 case FLOW_BLOCK_BIND:
1589 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1590 if (cb_priv &&
1591 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1592 cb_priv,
1593 &nfp_block_cb_list))
1594 return -EBUSY;
1595
1589 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1596 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1590 if (!cb_priv) 1597 if (!cb_priv)
1591 return -ENOMEM; 1598 return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 0a7645937412..7891f8c5a1bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1327,7 +1327,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1327 &drv_version); 1327 &drv_version);
1328 if (rc) { 1328 if (rc) {
1329 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1329 DP_NOTICE(cdev, "Failed sending drv version command\n");
1330 return rc; 1330 goto err4;
1331 } 1331 }
1332 } 1332 }
1333 1333
@@ -1335,6 +1335,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1335 1335
1336 return 0; 1336 return 0;
1337 1337
1338err4:
1339 qed_ll2_dealloc_if(cdev);
1338err3: 1340err3:
1339 qed_hw_stop(cdev); 1341 qed_hw_stop(cdev);
1340err2: 1342err2:
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 54010957da5c..f298d714efd6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
2775 if (!cpsw) 2775 if (!cpsw)
2776 return -ENOMEM; 2776 return -ENOMEM;
2777 2777
2778 platform_set_drvdata(pdev, cpsw);
2778 cpsw->dev = dev; 2779 cpsw->dev = dev;
2779 2780
2780 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 2781 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2878,7 +2879,6 @@ static int cpsw_probe(struct platform_device *pdev)
2878 goto clean_cpts; 2879 goto clean_cpts;
2879 } 2880 }
2880 2881
2881 platform_set_drvdata(pdev, cpsw);
2882 priv = netdev_priv(ndev); 2882 priv = netdev_priv(ndev);
2883 priv->cpsw = cpsw; 2883 priv->cpsw = cpsw;
2884 priv->ndev = ndev; 2884 priv->ndev = ndev;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b41696e16bdc..c20e7ef18bc9 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
802 err = hwsim_subscribe_all_others(phy); 802 err = hwsim_subscribe_all_others(phy);
803 if (err < 0) { 803 if (err < 0) {
804 mutex_unlock(&hwsim_phys_lock); 804 mutex_unlock(&hwsim_phys_lock);
805 goto err_reg; 805 goto err_subscribe;
806 } 806 }
807 } 807 }
808 list_add_tail(&phy->list, &hwsim_phys); 808 list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
812 812
813 return idx; 813 return idx;
814 814
815err_subscribe:
816 ieee802154_unregister_hw(phy->hw);
815err_reg: 817err_reg:
816 kfree(pib); 818 kfree(pib);
817err_pib: 819err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
901 return 0; 903 return 0;
902 904
903platform_drv: 905platform_drv:
904 genl_unregister_family(&hwsim_genl_family);
905platform_dev:
906 platform_device_unregister(mac802154hwsim_dev); 906 platform_device_unregister(mac802154hwsim_dev);
907platform_dev:
908 genl_unregister_family(&hwsim_genl_family);
907 return rc; 909 return rc;
908} 910}
909 911
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 17f0e9e98697..c6fa0c17c13d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -812,8 +812,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
812 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 812 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
813 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 813 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
814 value, index, tmp, size, 500); 814 value, index, tmp, size, 500);
815 if (ret < 0)
816 memset(data, 0xff, size);
817 else
818 memcpy(data, tmp, size);
815 819
816 memcpy(data, tmp, size);
817 kfree(tmp); 820 kfree(tmp);
818 821
819 return ret; 822 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index cb22d447fcb8..fe776e35b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
554 cpu_to_le32(vif->bss_conf.use_short_slot ? 554 cpu_to_le32(vif->bss_conf.use_short_slot ?
555 MAC_FLG_SHORT_SLOT : 0); 555 MAC_FLG_SHORT_SLOT : 0);
556 556
557 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); 557 cmd->filter_flags = 0;
558 558
559 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 559 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); 560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
623 /* We need the dtim_period to set the MAC as associated */ 623 /* We need the dtim_period to set the MAC as associated */
624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
625 !force_assoc_off) { 625 !force_assoc_off) {
626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
627 u8 ap_sta_id = mvmvif->ap_sta_id;
626 u32 dtim_offs; 628 u32 dtim_offs;
627 629
628 /* 630 /*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
658 dtim_offs); 660 dtim_offs);
659 661
660 ctxt_sta->is_assoc = cpu_to_le32(1); 662 ctxt_sta->is_assoc = cpu_to_le32(1);
663
664 /*
665 * allow multicast data frames only as long as the station is
666 * authorized, i.e., GTK keys are already installed (if needed)
667 */
668 if (ap_sta_id < IWL_MVM_STATION_COUNT) {
669 struct ieee80211_sta *sta;
670
671 rcu_read_lock();
672
673 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
674 if (!IS_ERR_OR_NULL(sta)) {
675 struct iwl_mvm_sta *mvmsta =
676 iwl_mvm_sta_from_mac80211(sta);
677
678 if (mvmsta->sta_state ==
679 IEEE80211_STA_AUTHORIZED)
680 cmd.filter_flags |=
681 cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
682 }
683
684 rcu_read_unlock();
685 }
661 } else { 686 } else {
662 ctxt_sta->is_assoc = cpu_to_le32(0); 687 ctxt_sta->is_assoc = cpu_to_le32(0);
663 688
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
703 MAC_FILTER_IN_CONTROL_AND_MGMT | 728 MAC_FILTER_IN_CONTROL_AND_MGMT |
704 MAC_FILTER_IN_BEACON | 729 MAC_FILTER_IN_BEACON |
705 MAC_FILTER_IN_PROBE_REQUEST | 730 MAC_FILTER_IN_PROBE_REQUEST |
706 MAC_FILTER_IN_CRC32); 731 MAC_FILTER_IN_CRC32 |
732 MAC_FILTER_ACCEPT_GRP);
707 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); 733 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
708 734
709 /* Allocate sniffer station */ 735 /* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
727 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); 753 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
728 754
729 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | 755 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
730 MAC_FILTER_IN_PROBE_REQUEST); 756 MAC_FILTER_IN_PROBE_REQUEST |
757 MAC_FILTER_ACCEPT_GRP);
731 758
732 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ 759 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
733 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); 760 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index b74bd58f3f45..d6499763f0dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3327 /* enable beacon filtering */ 3327 /* enable beacon filtering */
3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
3329 3329
3330 /*
3331 * Now that the station is authorized, i.e., keys were already
3332 * installed, need to indicate to the FW that
3333 * multicast data frames can be forwarded to the driver
3334 */
3335 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3336
3330 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3337 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3331 true); 3338 true);
3332 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3339 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
3333 new_state == IEEE80211_STA_ASSOC) { 3340 new_state == IEEE80211_STA_ASSOC) {
3341 /* Multicast data frames are no longer allowed */
3342 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3343
3334 /* disable beacon filtering */ 3344 /* disable beacon filtering */
3335 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3345 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3336 WARN_ON(ret && 3346 WARN_ON(ret &&
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index de711c1160d3..7c5aaeaf7fe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1063,6 +1063,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; 1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
1065 } 1065 }
1066
1067 /* same thing for QuZ... */
1068 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
1069 if (cfg == &iwl_ax101_cfg_qu_hr)
1070 cfg = &iwl_ax101_cfg_quz_hr;
1071 else if (cfg == &iwl_ax201_cfg_qu_hr)
1072 cfg = &iwl_ax201_cfg_quz_hr;
1073 else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
1074 cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
1075 else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
1076 cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
1077 else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
1078 cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
1079 else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1080 cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
1081 }
1082
1066#endif 1083#endif
1067 1084
1068 pci_set_drvdata(pdev, iwl_trans); 1085 pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..935e35dafce5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3603,6 +3603,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3605 ((trans->cfg != &iwl_ax200_cfg_cc && 3605 ((trans->cfg != &iwl_ax200_cfg_cc &&
3606 trans->cfg != &iwl_ax201_cfg_qu_hr &&
3606 trans->cfg != &killer1650x_2ax_cfg && 3607 trans->cfg != &killer1650x_2ax_cfg &&
3607 trans->cfg != &killer1650w_2ax_cfg && 3608 trans->cfg != &killer1650w_2ax_cfg &&
3608 trans->cfg != &iwl_ax201_cfg_quz_hr) || 3609 trans->cfg != &iwl_ax201_cfg_quz_hr) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..9ef6b8fe03c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
99 u16 len = byte_cnt; 99 u16 len = byte_cnt;
100 __le16 bc_ent; 100 __le16 bc_ent;
101 101
102 if (trans_pcie->bc_table_dword) 102 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
103 len = DIV_ROUND_UP(len, 4);
104
105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
106 return; 103 return;
107 104
108 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 105 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
117 */ 114 */
118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 115 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
119 116
120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 117 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) 118 /* Starting from 22560, the HW expects bytes */
119 WARN_ON(trans_pcie->bc_table_dword);
120 WARN_ON(len > 0x3FFF);
121 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else 123 } else {
124 /* Until 22560, the HW expects DW */
125 WARN_ON(!trans_pcie->bc_table_dword);
126 len = DIV_ROUND_UP(len, 4);
127 WARN_ON(len > 0xFFF);
128 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
124 scd_bc_tbl->tfd_offset[idx] = bc_ent; 129 scd_bc_tbl->tfd_offset[idx] = bc_ent;
130 }
125} 131}
126 132
127/* 133/*
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 627ed1fc7b15..645f4d15fb61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
136 .release_buffered_frames = mt76_release_buffered_frames, 136 .release_buffered_frames = mt76_release_buffered_frames,
137}; 137};
138 138
139static int mt76x0u_init_hardware(struct mt76x02_dev *dev) 139static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
140{ 140{
141 int err; 141 int err;
142 142
143 mt76x0_chip_onoff(dev, true, true); 143 mt76x0_chip_onoff(dev, true, reset);
144 144
145 if (!mt76x02_wait_for_mac(&dev->mt76)) 145 if (!mt76x02_wait_for_mac(&dev->mt76))
146 return -ETIMEDOUT; 146 return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
173 if (err < 0) 173 if (err < 0)
174 goto out_err; 174 goto out_err;
175 175
176 err = mt76x0u_init_hardware(dev); 176 err = mt76x0u_init_hardware(dev, true);
177 if (err < 0) 177 if (err < 0)
178 goto out_err; 178 goto out_err;
179 179
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
309 if (ret < 0) 309 if (ret < 0)
310 goto err; 310 goto err;
311 311
312 ret = mt76x0u_init_hardware(dev); 312 ret = mt76x0u_init_hardware(dev, false);
313 if (ret) 313 if (ret)
314 goto err; 314 goto err;
315 315
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c9b957ac5733..ecbe78b8027b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
6095 } 6095 }
6096 6096
6097 /* 6097 /*
6098 * Clear encryption initialization vectors on start, but keep them
6099 * for watchdog reset. Otherwise we will have wrong IVs and not be
6100 * able to keep connections after reset.
6101 */
6102 if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
6103 for (i = 0; i < 256; i++)
6104 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
6105
6106 /*
6098 * Clear all beacons 6107 * Clear all beacons
6099 */ 6108 */
6100 for (i = 0; i < 8; i++) 6109 for (i = 0; i < 8; i++)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 7e43690a861c..2b216edd0c7d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
658 DEVICE_STATE_ENABLED_RADIO, 658 DEVICE_STATE_ENABLED_RADIO,
659 DEVICE_STATE_SCANNING, 659 DEVICE_STATE_SCANNING,
660 DEVICE_STATE_FLUSHING, 660 DEVICE_STATE_FLUSHING,
661 DEVICE_STATE_RESET,
661 662
662 /* 663 /*
663 * Driver configuration 664 * Driver configuration
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 35414f97a978..9d158237ac67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1256 1256
1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) 1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1258{ 1258{
1259 int retval; 1259 int retval = 0;
1260 1260
1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { 1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
1262 /* 1262 /*
1263 * This is special case for ieee80211_restart_hw(), otherwise 1263 * This is special case for ieee80211_restart_hw(), otherwise
1264 * mac80211 never call start() two times in row without stop(); 1264 * mac80211 never call start() two times in row without stop();
1265 */ 1265 */
1266 set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1266 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); 1267 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
1267 rt2x00lib_stop(rt2x00dev); 1268 rt2x00lib_stop(rt2x00dev);
1268 } 1269 }
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1273 */ 1274 */
1274 retval = rt2x00lib_load_firmware(rt2x00dev); 1275 retval = rt2x00lib_load_firmware(rt2x00dev);
1275 if (retval) 1276 if (retval)
1276 return retval; 1277 goto out;
1277 1278
1278 /* 1279 /*
1279 * Initialize the device. 1280 * Initialize the device.
1280 */ 1281 */
1281 retval = rt2x00lib_initialize(rt2x00dev); 1282 retval = rt2x00lib_initialize(rt2x00dev);
1282 if (retval) 1283 if (retval)
1283 return retval; 1284 goto out;
1284 1285
1285 rt2x00dev->intf_ap_count = 0; 1286 rt2x00dev->intf_ap_count = 0;
1286 rt2x00dev->intf_sta_count = 0; 1287 rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1289 /* Enable the radio */ 1290 /* Enable the radio */
1290 retval = rt2x00lib_enable_radio(rt2x00dev); 1291 retval = rt2x00lib_enable_radio(rt2x00dev);
1291 if (retval) 1292 if (retval)
1292 return retval; 1293 goto out;
1293 1294
1294 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); 1295 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1295 1296
1296 return 0; 1297out:
1298 clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1299 return retval;
1297} 1300}
1298 1301
1299void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1302void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c258a1ce4b28..d3d6b7bd6903 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2257,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2257 .vid = 0x1179, 2257 .vid = 0x1179,
2258 .mn = "THNSF5256GPUK TOSHIBA", 2258 .mn = "THNSF5256GPUK TOSHIBA",
2259 .quirks = NVME_QUIRK_NO_APST, 2259 .quirks = NVME_QUIRK_NO_APST,
2260 },
2261 {
2262 /*
2263 * This LiteON CL1-3D*-Q11 firmware version has a race
2264 * condition associated with actions related to suspend to idle
2265 * LiteON has resolved the problem in future firmware
2266 */
2267 .vid = 0x14a4,
2268 .fr = "22301111",
2269 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2260 } 2270 }
2261}; 2271};
2262 2272
@@ -2597,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2597 goto out_free; 2607 goto out_free;
2598 } 2608 }
2599 2609
2610 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2611 ctrl->cntlid = le16_to_cpu(id->cntlid);
2612
2600 if (!ctrl->identified) { 2613 if (!ctrl->identified) {
2601 int i; 2614 int i;
2602 2615
@@ -2697,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2697 goto out_free; 2710 goto out_free;
2698 } 2711 }
2699 } else { 2712 } else {
2700 ctrl->cntlid = le16_to_cpu(id->cntlid);
2701 ctrl->hmpre = le32_to_cpu(id->hmpre); 2713 ctrl->hmpre = le32_to_cpu(id->hmpre);
2702 ctrl->hmmin = le32_to_cpu(id->hmmin); 2714 ctrl->hmmin = le32_to_cpu(id->hmmin);
2703 ctrl->hmminds = le32_to_cpu(id->hmminds); 2715 ctrl->hmminds = le32_to_cpu(id->hmminds);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 888d4543894e..af831d3d15d0 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -428,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
428 srcu_read_unlock(&head->srcu, srcu_idx); 428 srcu_read_unlock(&head->srcu, srcu_idx);
429 } 429 }
430 430
431 synchronize_srcu(&ns->head->srcu);
431 kblockd_schedule_work(&ns->head->requeue_work); 432 kblockd_schedule_work(&ns->head->requeue_work);
432} 433}
433 434
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 778b3a0b6adb..2d678fb968c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -92,6 +92,11 @@ enum nvme_quirks {
92 * Broken Write Zeroes. 92 * Broken Write Zeroes.
93 */ 93 */
94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
95
96 /*
97 * Force simple suspend/resume path.
98 */
99 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
95}; 100};
96 101
97/* 102/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6bd9b1033965..732d5b63ec05 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2876,7 +2876,8 @@ static int nvme_suspend(struct device *dev)
2876 * state (which may not be possible if the link is up). 2876 * state (which may not be possible if the link is up).
2877 */ 2877 */
2878 if (pm_suspend_via_firmware() || !ctrl->npss || 2878 if (pm_suspend_via_firmware() || !ctrl->npss ||
2879 !pcie_aspm_enabled(pdev)) { 2879 !pcie_aspm_enabled(pdev) ||
2880 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
2880 nvme_dev_disable(ndev, true); 2881 nvme_dev_disable(ndev, true);
2881 return 0; 2882 return 0;
2882 } 2883 }
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 208aacf39329..44c4ae1abd00 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5256 */ 5256 */
5257 if (ioread32(map + 0x2240c) & 0x2) { 5257 if (ioread32(map + 0x2240c) & 0x2) {
5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); 5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5259 ret = pci_reset_function(pdev); 5259 ret = pci_reset_bus(pdev);
5260 if (ret < 0) 5260 if (ret < 0)
5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret); 5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5262 } 5262 }
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e504d255d5ce..430731cdf827 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
707 */ 707 */
708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) 708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
709{ 709{
710 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 710 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
713 713
@@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
722 */ 722 */
723static int __maybe_unused cros_ec_ishtp_resume(struct device *device) 723static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
724{ 724{
725 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 725 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
728 728
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de448de802..86d88aec94a1 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
742 USB_CH_IP_CUR_LVL_1P5; 742 USB_CH_IP_CUR_LVL_1P5;
743 break; 743 break;
744 } 744 }
745 /* Else, fall through */
745 case USB_STAT_HM_IDGND: 746 case USB_STAT_HM_IDGND:
746 dev_err(di->dev, "USB Type - Charging not allowed\n"); 747 dev_err(di->dev, "USB Type - Charging not allowed\n");
747 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; 748 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8b4ea5f2832b..a7868c8133ee 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4410,6 +4410,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4410 get_user(req_len, &ureq->hdr.req_len)) 4410 get_user(req_len, &ureq->hdr.req_len))
4411 return -EFAULT; 4411 return -EFAULT;
4412 4412
4413 /* Sanitize user input, to avoid overflows in iob size calculation: */
4414 if (req_len > QETH_BUFSIZE)
4415 return -EINVAL;
4416
4413 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4417 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4414 if (!iob) 4418 if (!iob)
4415 return -ENOMEM; 4419 return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index abcad097ff2f..f47b4b281b14 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work)
459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); 459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
460 /* Fall through */ 460 /* Fall through */
461#endif 461#endif
462 /* Fall through - only for the #else condition above. */
462 default: 463 default:
463 error = -ENXIO; 464 error = -ENXIO;
464 pr_err("unhandled device %d\n", dev->dev_type); 465 pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..bade2e025ecf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -824,6 +824,7 @@ struct lpfc_hba {
824 uint32_t cfg_cq_poll_threshold; 824 uint32_t cfg_cq_poll_threshold;
825 uint32_t cfg_cq_max_proc_limit; 825 uint32_t cfg_cq_max_proc_limit;
826 uint32_t cfg_fcp_cpu_map; 826 uint32_t cfg_fcp_cpu_map;
827 uint32_t cfg_fcp_mq_threshold;
827 uint32_t cfg_hdw_queue; 828 uint32_t cfg_hdw_queue;
828 uint32_t cfg_irq_chann; 829 uint32_t cfg_irq_chann;
829 uint32_t cfg_suppress_rsp; 830 uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..8d8c495b5b60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5709 "Embed NVME Command in WQE"); 5709 "Embed NVME Command in WQE");
5710 5710
5711/* 5711/*
5712 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5713 * the driver will advertise it supports to the SCSI layer.
5714 *
5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
5717 *
5718 * Value range is [0,128]. Default value is 8.
5719 */
5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5722 "Set the number of SCSI Queues advertised");
5723
5724/*
5712 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5725 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5713 * will advertise it supports to the NVME and SCSI layers. This also 5726 * will advertise it supports to the NVME and SCSI layers. This also
5714 * will map to the number of CQ/WQ pairs the driver will create. 5727 * will map to the number of CQ/WQ pairs the driver will create.
@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
6030 &dev_attr_lpfc_cq_poll_threshold, 6043 &dev_attr_lpfc_cq_poll_threshold,
6031 &dev_attr_lpfc_cq_max_proc_limit, 6044 &dev_attr_lpfc_cq_max_proc_limit,
6032 &dev_attr_lpfc_fcp_cpu_map, 6045 &dev_attr_lpfc_fcp_cpu_map,
6046 &dev_attr_lpfc_fcp_mq_threshold,
6033 &dev_attr_lpfc_hdw_queue, 6047 &dev_attr_lpfc_hdw_queue,
6034 &dev_attr_lpfc_irq_chann, 6048 &dev_attr_lpfc_irq_chann,
6035 &dev_attr_lpfc_suppress_rsp, 6049 &dev_attr_lpfc_suppress_rsp,
@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7112 /* Initialize first burst. Target vs Initiator are different. */ 7126 /* Initialize first burst. Target vs Initiator are different. */
7113 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7127 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7114 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7128 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7129 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7115 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7130 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7116 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7131 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7117 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7132 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a7549ae32542..1ac98becb5ba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4309 shost->max_cmd_len = 16; 4309 shost->max_cmd_len = 16;
4310 4310
4311 if (phba->sli_rev == LPFC_SLI_REV4) { 4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4312 if (!phba->cfg_fcp_mq_threshold ||
4313 shost->nr_hw_queues = phba->cfg_hdw_queue; 4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4314 else 4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4315 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4315
4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4317 phba->cfg_fcp_mq_threshold);
4316 4318
4317 shost->dma_boundary = 4319 shost->dma_boundary =
4318 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..329f7aa7e169 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
44#define LPFC_HBA_HDWQ_MAX 128 44#define LPFC_HBA_HDWQ_MAX 128
45#define LPFC_HBA_HDWQ_DEF 0 45#define LPFC_HBA_HDWQ_DEF 0
46 46
47/* FCP MQ queue count limiting */
48#define LPFC_FCP_MQ_THRESHOLD_MIN 0
49#define LPFC_FCP_MQ_THRESHOLD_MAX 128
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51
47/* Common buffer size to accomidate SCSI and NVME IO buffers */ 52/* Common buffer size to accomidate SCSI and NVME IO buffers */
48#define LPFC_COMMON_IO_BUF_SZ 768 53#define LPFC_COMMON_IO_BUF_SZ 768
49 54
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8d560c562e9c..6b7b390b2e52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2957 vha->gnl.ldma); 2957 vha->gnl.ldma);
2958 2958
2959 vha->gnl.l = NULL;
2960
2959 vfree(vha->scan.l); 2961 vfree(vha->scan.l);
2960 2962
2961 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2963 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2e58cff9d200..98e60a34afd9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3440,6 +3440,12 @@ skip_dpc:
3440 return 0; 3440 return 0;
3441 3441
3442probe_failed: 3442probe_failed:
3443 if (base_vha->gnl.l) {
3444 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3445 base_vha->gnl.l, base_vha->gnl.ldma);
3446 base_vha->gnl.l = NULL;
3447 }
3448
3443 if (base_vha->timer_active) 3449 if (base_vha->timer_active)
3444 qla2x00_stop_timer(base_vha); 3450 qla2x00_stop_timer(base_vha);
3445 base_vha->flags.online = 0; 3451 base_vha->flags.online = 0;
@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3673 if (!atomic_read(&pdev->enable_cnt)) { 3679 if (!atomic_read(&pdev->enable_cnt)) {
3674 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3680 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3675 base_vha->gnl.l, base_vha->gnl.ldma); 3681 base_vha->gnl.l, base_vha->gnl.ldma);
3676 3682 base_vha->gnl.l = NULL;
3677 scsi_host_put(base_vha->host); 3683 scsi_host_put(base_vha->host);
3678 kfree(ha); 3684 kfree(ha);
3679 pci_set_drvdata(pdev, NULL); 3685 pci_set_drvdata(pdev, NULL);
@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3713 dma_free_coherent(&ha->pdev->dev, 3719 dma_free_coherent(&ha->pdev->dev,
3714 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3720 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3715 3721
3722 base_vha->gnl.l = NULL;
3723
3716 vfree(base_vha->scan.l); 3724 vfree(base_vha->scan.l);
3717 3725
3718 if (IS_QLAFX00(ha)) 3726 if (IS_QLAFX00(ha))
@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4816 "Alloc failed for scan database.\n"); 4824 "Alloc failed for scan database.\n");
4817 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4825 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4818 vha->gnl.l, vha->gnl.ldma); 4826 vha->gnl.l, vha->gnl.ldma);
4827 vha->gnl.l = NULL;
4819 scsi_remove_host(vha->host); 4828 scsi_remove_host(vha->host);
4820 return NULL; 4829 return NULL;
4821 } 4830 }
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e274053109d0..029da74bb2f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7063 struct ufs_vreg *vreg) 7063 struct ufs_vreg *vreg)
7064{ 7064{
7065 if (!vreg)
7066 return 0;
7067
7065 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 7068 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7066} 7069}
7067 7070
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 04eda111920e..661bb9358364 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1132 struct se_cmd *se_cmd = cmd->se_cmd; 1132 struct se_cmd *se_cmd = cmd->se_cmd;
1133 struct tcmu_dev *udev = cmd->tcmu_dev; 1133 struct tcmu_dev *udev = cmd->tcmu_dev;
1134 bool read_len_valid = false; 1134 bool read_len_valid = false;
1135 uint32_t read_len = se_cmd->data_length; 1135 uint32_t read_len;
1136 1136
1137 /* 1137 /*
1138 * cmd has been completed already from timeout, just reclaim 1138 * cmd has been completed already from timeout, just reclaim
1139 * data area space and free cmd 1139 * data area space and free cmd
1140 */ 1140 */
1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142 WARN_ON_ONCE(se_cmd);
1142 goto out; 1143 goto out;
1144 }
1143 1145
1144 list_del_init(&cmd->queue_entry); 1146 list_del_init(&cmd->queue_entry);
1145 1147
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1152 goto done; 1154 goto done;
1153 } 1155 }
1154 1156
1157 read_len = se_cmd->data_length;
1155 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1158 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1156 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1159 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1157 read_len_valid = true; 1160 read_len_valid = true;
@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1307 */ 1310 */
1308 scsi_status = SAM_STAT_CHECK_CONDITION; 1311 scsi_status = SAM_STAT_CHECK_CONDITION;
1309 list_del_init(&cmd->queue_entry); 1312 list_del_init(&cmd->queue_entry);
1313 cmd->se_cmd = NULL;
1310 } else { 1314 } else {
1311 list_del_init(&cmd->queue_entry); 1315 list_del_init(&cmd->queue_entry);
1312 idr_remove(&udev->commands, id); 1316 idr_remove(&udev->commands, id);
@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2022 2026
2023 idr_remove(&udev->commands, i); 2027 idr_remove(&udev->commands, i);
2024 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2028 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2029 WARN_ON(!cmd->se_cmd);
2025 list_del_init(&cmd->queue_entry); 2030 list_del_init(&cmd->queue_entry);
2026 if (err_level == 1) { 2031 if (err_level == 1) {
2027 /* 2032 /*
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 92f23e3bc27a..7cacae5a8797 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt)
858 case 'M': 858 case 'M':
859 case 'm': 859 case 'm':
860 size *= 1024; 860 size *= 1024;
861 /* Fall through */
861 case 'K': 862 case 'K':
862 case 'k': 863 case 'k':
863 size *= 1024; 864 size *= 1024;
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 4eacfb1ce1ac..eb729d704836 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
168 soft_margin = new_margin; 168 soft_margin = new_margin;
169 reload = soft_margin * (mem_fclk_21285 / 256); 169 reload = soft_margin * (mem_fclk_21285 / 256);
170 watchdog_ping(); 170 watchdog_ping();
171 /* Fall */ 171 /* Fall through */
172 case WDIOC_GETTIMEOUT: 172 case WDIOC_GETTIMEOUT:
173 ret = put_user(soft_margin, int_arg); 173 ret = put_user(soft_margin, int_arg);
174 break; 174 break;
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
74 cell = rcu_dereference_raw(net->ws_cell); 74 cell = rcu_dereference_raw(net->ws_cell);
75 if (cell) { 75 if (cell) {
76 afs_get_cell(cell); 76 afs_get_cell(cell);
77 ret = 0;
77 break; 78 break;
78 } 79 }
79 ret = -EDESTADDRREQ; 80 ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
108 109
109 done_seqretry(&net->cells_lock, seq); 110 done_seqretry(&net->cells_lock, seq);
110 111
112 if (ret != 0 && cell)
113 afs_put_cell(net, cell);
114
111 return ret == 0 ? cell : ERR_PTR(ret); 115 return ret == 0 ? cell : ERR_PTR(ret);
112} 116}
113 117
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 81207dc3c997..139b4e3cc946 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -959,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
959 inode ? AFS_FS_I(inode) : NULL); 959 inode ? AFS_FS_I(inode) : NULL);
960 } else { 960 } else {
961 trace_afs_lookup(dvnode, &dentry->d_name, 961 trace_afs_lookup(dvnode, &dentry->d_name,
962 inode ? AFS_FS_I(inode) : NULL); 962 IS_ERR_OR_NULL(inode) ? NULL
963 : AFS_FS_I(inode));
963 } 964 }
964 return d; 965 return d;
965} 966}
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 2575503170fc..ca2452806ebf 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -2171,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); 2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
2172 2172
2173 size = round_up(acl->size, 4); 2173 size = round_up(acl->size, 4);
2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, 2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
2175 sizeof(__be32) * 2 + 2175 sizeof(__be32) * 2 +
2176 sizeof(struct yfs_xdr_YFSFid) + 2176 sizeof(struct yfs_xdr_YFSFid) +
2177 sizeof(__be32) + size, 2177 sizeof(__be32) + size,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..b3c8b886bf64 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ get_more_pages:
913 if (page_offset(page) >= ceph_wbc.i_size) { 913 if (page_offset(page) >= ceph_wbc.i_size) {
914 dout("%p page eof %llu\n", 914 dout("%p page eof %llu\n",
915 page, ceph_wbc.i_size); 915 page, ceph_wbc.i_size);
916 if (ceph_wbc.size_stable || 916 if ((ceph_wbc.size_stable ||
917 page_offset(page) >= i_size_read(inode)) 917 page_offset(page) >= i_size_read(inode)) &&
918 clear_page_dirty_for_io(page))
918 mapping->a_ops->invalidatepage(page, 919 mapping->a_ops->invalidatepage(page,
919 0, PAGE_SIZE); 920 0, PAGE_SIZE);
920 unlock_page(page); 921 unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..ce0f5658720a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1301{ 1301{
1302 struct ceph_inode_info *ci = cap->ci; 1302 struct ceph_inode_info *ci = cap->ci;
1303 struct inode *inode = &ci->vfs_inode; 1303 struct inode *inode = &ci->vfs_inode;
1304 struct ceph_buffer *old_blob = NULL;
1304 struct cap_msg_args arg; 1305 struct cap_msg_args arg;
1305 int held, revoking; 1306 int held, revoking;
1306 int wake = 0; 1307 int wake = 0;
@@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1365 ci->i_requested_max_size = arg.max_size; 1366 ci->i_requested_max_size = arg.max_size;
1366 1367
1367 if (flushing & CEPH_CAP_XATTR_EXCL) { 1368 if (flushing & CEPH_CAP_XATTR_EXCL) {
1368 __ceph_build_xattrs_blob(ci); 1369 old_blob = __ceph_build_xattrs_blob(ci);
1369 arg.xattr_version = ci->i_xattrs.version; 1370 arg.xattr_version = ci->i_xattrs.version;
1370 arg.xattr_buf = ci->i_xattrs.blob; 1371 arg.xattr_buf = ci->i_xattrs.blob;
1371 } else { 1372 } else {
@@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1409 1410
1410 spin_unlock(&ci->i_ceph_lock); 1411 spin_unlock(&ci->i_ceph_lock);
1411 1412
1413 ceph_buffer_put(old_blob);
1414
1412 ret = send_cap_msg(&arg); 1415 ret = send_cap_msg(&arg);
1413 if (ret < 0) { 1416 if (ret < 0) {
1414 dout("error sending cap msg, must requeue %p\n", inode); 1417 dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..18500edefc56 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
736 int issued, new_issued, info_caps; 736 int issued, new_issued, info_caps;
737 struct timespec64 mtime, atime, ctime; 737 struct timespec64 mtime, atime, ctime;
738 struct ceph_buffer *xattr_blob = NULL; 738 struct ceph_buffer *xattr_blob = NULL;
739 struct ceph_buffer *old_blob = NULL;
739 struct ceph_string *pool_ns = NULL; 740 struct ceph_string *pool_ns = NULL;
740 struct ceph_cap *new_cap = NULL; 741 struct ceph_cap *new_cap = NULL;
741 int err = 0; 742 int err = 0;
@@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
881 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 882 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
882 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 883 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
883 if (ci->i_xattrs.blob) 884 if (ci->i_xattrs.blob)
884 ceph_buffer_put(ci->i_xattrs.blob); 885 old_blob = ci->i_xattrs.blob;
885 ci->i_xattrs.blob = xattr_blob; 886 ci->i_xattrs.blob = xattr_blob;
886 if (xattr_blob) 887 if (xattr_blob)
887 memcpy(ci->i_xattrs.blob->vec.iov_base, 888 memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1022out: 1023out:
1023 if (new_cap) 1024 if (new_cap)
1024 ceph_put_cap(mdsc, new_cap); 1025 ceph_put_cap(mdsc, new_cap);
1025 if (xattr_blob) 1026 ceph_buffer_put(old_blob);
1026 ceph_buffer_put(xattr_blob); 1027 ceph_buffer_put(xattr_blob);
1027 ceph_put_string(pool_ns); 1028 ceph_put_string(pool_ns);
1028 return err; 1029 return err;
1029} 1030}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..5083e238ad15 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
111 req->r_wait_for_completion = ceph_lock_wait_for_completion; 111 req->r_wait_for_completion = ceph_lock_wait_for_completion;
112 112
113 err = ceph_mdsc_do_request(mdsc, inode, req); 113 err = ceph_mdsc_do_request(mdsc, inode, req);
114 114 if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
115 if (operation == CEPH_MDS_OP_GETFILELOCK) {
116 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); 115 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
117 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) 116 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
118 fl->fl_type = F_RDLCK; 117 fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
465 struct inode *inode = &ci->vfs_inode; 465 struct inode *inode = &ci->vfs_inode;
466 struct ceph_cap_snap *capsnap; 466 struct ceph_cap_snap *capsnap;
467 struct ceph_snap_context *old_snapc, *new_snapc; 467 struct ceph_snap_context *old_snapc, *new_snapc;
468 struct ceph_buffer *old_blob = NULL;
468 int used, dirty; 469 int used, dirty;
469 470
470 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 471 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
541 capsnap->gid = inode->i_gid; 542 capsnap->gid = inode->i_gid;
542 543
543 if (dirty & CEPH_CAP_XATTR_EXCL) { 544 if (dirty & CEPH_CAP_XATTR_EXCL) {
544 __ceph_build_xattrs_blob(ci); 545 old_blob = __ceph_build_xattrs_blob(ci);
545 capsnap->xattr_blob = 546 capsnap->xattr_blob =
546 ceph_buffer_get(ci->i_xattrs.blob); 547 ceph_buffer_get(ci->i_xattrs.blob);
547 capsnap->xattr_version = ci->i_xattrs.version; 548 capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
584 } 585 }
585 spin_unlock(&ci->i_ceph_lock); 586 spin_unlock(&ci->i_ceph_lock);
586 587
588 ceph_buffer_put(old_blob);
587 kfree(capsnap); 589 kfree(capsnap);
588 ceph_put_snap_context(old_snapc); 590 ceph_put_snap_context(old_snapc);
589} 591}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..6b9f1ee7de85 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); 926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); 927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); 928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
929extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); 929extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); 930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
931extern const struct xattr_handler *ceph_xattr_handlers[]; 931extern const struct xattr_handler *ceph_xattr_handlers[];
932 932
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..939eab7aa219 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
754 754
755/* 755/*
756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
757 * and swap into place. 757 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
758 * that it can be freed by the caller as the i_ceph_lock is likely to be
759 * held.
758 */ 760 */
759void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) 761struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
760{ 762{
761 struct rb_node *p; 763 struct rb_node *p;
762 struct ceph_inode_xattr *xattr = NULL; 764 struct ceph_inode_xattr *xattr = NULL;
765 struct ceph_buffer *old_blob = NULL;
763 void *dest; 766 void *dest;
764 767
765 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 768 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
790 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 793 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
791 794
792 if (ci->i_xattrs.blob) 795 if (ci->i_xattrs.blob)
793 ceph_buffer_put(ci->i_xattrs.blob); 796 old_blob = ci->i_xattrs.blob;
794 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 797 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
795 ci->i_xattrs.prealloc_blob = NULL; 798 ci->i_xattrs.prealloc_blob = NULL;
796 ci->i_xattrs.dirty = false; 799 ci->i_xattrs.dirty = false;
797 ci->i_xattrs.version++; 800 ci->i_xattrs.version++;
798 } 801 }
802
803 return old_blob;
799} 804}
800 805
801static inline int __get_request_mask(struct inode *in) { 806static inline int __get_request_mask(struct inode *in) {
@@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1036 struct ceph_inode_info *ci = ceph_inode(inode); 1041 struct ceph_inode_info *ci = ceph_inode(inode);
1037 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1042 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1038 struct ceph_cap_flush *prealloc_cf = NULL; 1043 struct ceph_cap_flush *prealloc_cf = NULL;
1044 struct ceph_buffer *old_blob = NULL;
1039 int issued; 1045 int issued;
1040 int err; 1046 int err;
1041 int dirty = 0; 1047 int dirty = 0;
@@ -1109,13 +1115,15 @@ retry:
1109 struct ceph_buffer *blob; 1115 struct ceph_buffer *blob;
1110 1116
1111 spin_unlock(&ci->i_ceph_lock); 1117 spin_unlock(&ci->i_ceph_lock);
1112 dout(" preaallocating new blob size=%d\n", required_blob_size); 1118 ceph_buffer_put(old_blob); /* Shouldn't be required */
1119 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1113 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1120 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1114 if (!blob) 1121 if (!blob)
1115 goto do_sync_unlocked; 1122 goto do_sync_unlocked;
1116 spin_lock(&ci->i_ceph_lock); 1123 spin_lock(&ci->i_ceph_lock);
1124 /* prealloc_blob can't be released while holding i_ceph_lock */
1117 if (ci->i_xattrs.prealloc_blob) 1125 if (ci->i_xattrs.prealloc_blob)
1118 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 1126 old_blob = ci->i_xattrs.prealloc_blob;
1119 ci->i_xattrs.prealloc_blob = blob; 1127 ci->i_xattrs.prealloc_blob = blob;
1120 goto retry; 1128 goto retry;
1121 } 1129 }
@@ -1131,6 +1139,7 @@ retry:
1131 } 1139 }
1132 1140
1133 spin_unlock(&ci->i_ceph_lock); 1141 spin_unlock(&ci->i_ceph_lock);
1142 ceph_buffer_put(old_blob);
1134 if (lock_snap_rwsem) 1143 if (lock_snap_rwsem)
1135 up_read(&mdsc->snap_rwsem); 1144 up_read(&mdsc->snap_rwsem);
1136 if (dirty) 1145 if (dirty)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a15a6e738eb5..1795e80cbdf7 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,7 +1113,7 @@ cifs_demultiplex_thread(void *p)
1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1114 1114
1115 set_freezable(); 1115 set_freezable();
1116 allow_signal(SIGKILL); 1116 allow_kernel_signal(SIGKILL);
1117 while (server->tcpStatus != CifsExiting) { 1117 while (server->tcpStatus != CifsExiting) {
1118 if (try_to_freeze()) 1118 if (try_to_freeze())
1119 continue; 1119 continue;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 24bbe3cb7ad4..cfb48bd088e1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req)
679 io_free_req(req); 679 io_free_req(req);
680} 680}
681 681
682static unsigned io_cqring_events(struct io_cq_ring *ring)
683{
684 /* See comment at the top of this file */
685 smp_rmb();
686 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
687}
688
682/* 689/*
683 * Find and free completed poll iocbs 690 * Find and free completed poll iocbs
684 */ 691 */
@@ -771,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
771static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, 778static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
772 long min) 779 long min)
773{ 780{
774 while (!list_empty(&ctx->poll_list)) { 781 while (!list_empty(&ctx->poll_list) && !need_resched()) {
775 int ret; 782 int ret;
776 783
777 ret = io_do_iopoll(ctx, nr_events, min); 784 ret = io_do_iopoll(ctx, nr_events, min);
@@ -798,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
798 unsigned int nr_events = 0; 805 unsigned int nr_events = 0;
799 806
800 io_iopoll_getevents(ctx, &nr_events, 1); 807 io_iopoll_getevents(ctx, &nr_events, 1);
808
809 /*
810 * Ensure we allow local-to-the-cpu processing to take place,
811 * in this case we need to ensure that we reap all events.
812 */
813 cond_resched();
801 } 814 }
802 mutex_unlock(&ctx->uring_lock); 815 mutex_unlock(&ctx->uring_lock);
803} 816}
@@ -805,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
805static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, 818static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
806 long min) 819 long min)
807{ 820{
808 int ret = 0; 821 int iters, ret = 0;
809 822
823 /*
824 * We disallow the app entering submit/complete with polling, but we
825 * still need to lock the ring to prevent racing with polled issue
826 * that got punted to a workqueue.
827 */
828 mutex_lock(&ctx->uring_lock);
829
830 iters = 0;
810 do { 831 do {
811 int tmin = 0; 832 int tmin = 0;
812 833
834 /*
835 * Don't enter poll loop if we already have events pending.
836 * If we do, we can potentially be spinning for commands that
837 * already triggered a CQE (eg in error).
838 */
839 if (io_cqring_events(ctx->cq_ring))
840 break;
841
842 /*
843 * If a submit got punted to a workqueue, we can have the
844 * application entering polling for a command before it gets
845 * issued. That app will hold the uring_lock for the duration
846 * of the poll right here, so we need to take a breather every
847 * now and then to ensure that the issue has a chance to add
848 * the poll to the issued list. Otherwise we can spin here
849 * forever, while the workqueue is stuck trying to acquire the
850 * very same mutex.
851 */
852 if (!(++iters & 7)) {
853 mutex_unlock(&ctx->uring_lock);
854 mutex_lock(&ctx->uring_lock);
855 }
856
813 if (*nr_events < min) 857 if (*nr_events < min)
814 tmin = min - *nr_events; 858 tmin = min - *nr_events;
815 859
@@ -819,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
819 ret = 0; 863 ret = 0;
820 } while (min && !*nr_events && !need_resched()); 864 } while (min && !*nr_events && !need_resched());
821 865
866 mutex_unlock(&ctx->uring_lock);
822 return ret; 867 return ret;
823} 868}
824 869
@@ -2280,15 +2325,7 @@ static int io_sq_thread(void *data)
2280 unsigned nr_events = 0; 2325 unsigned nr_events = 0;
2281 2326
2282 if (ctx->flags & IORING_SETUP_IOPOLL) { 2327 if (ctx->flags & IORING_SETUP_IOPOLL) {
2283 /*
2284 * We disallow the app entering submit/complete
2285 * with polling, but we still need to lock the
2286 * ring to prevent racing with polled issue
2287 * that got punted to a workqueue.
2288 */
2289 mutex_lock(&ctx->uring_lock);
2290 io_iopoll_check(ctx, &nr_events, 0); 2328 io_iopoll_check(ctx, &nr_events, 0);
2291 mutex_unlock(&ctx->uring_lock);
2292 } else { 2329 } else {
2293 /* 2330 /*
2294 * Normal IO, just pretend everything completed. 2331 * Normal IO, just pretend everything completed.
@@ -2433,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2433 return submit; 2470 return submit;
2434} 2471}
2435 2472
2436static unsigned io_cqring_events(struct io_cq_ring *ring)
2437{
2438 /* See comment at the top of this file */
2439 smp_rmb();
2440 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
2441}
2442
2443/* 2473/*
2444 * Wait until events become available, if we don't already have some. The 2474 * Wait until events become available, if we don't already have some. The
2445 * application must reap them itself, as they reside on the shared cq ring. 2475 * application must reap them itself, as they reside on the shared cq ring.
@@ -3190,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3190 min_complete = min(min_complete, ctx->cq_entries); 3220 min_complete = min(min_complete, ctx->cq_entries);
3191 3221
3192 if (ctx->flags & IORING_SETUP_IOPOLL) { 3222 if (ctx->flags & IORING_SETUP_IOPOLL) {
3193 mutex_lock(&ctx->uring_lock);
3194 ret = io_iopoll_check(ctx, &nr_events, min_complete); 3223 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3195 mutex_unlock(&ctx->uring_lock);
3196 } else { 3224 } else {
3197 ret = io_cqring_wait(ctx, min_complete, sig, sigsz); 3225 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3198 } 3226 }
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..0adfd8840110 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) 1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1488 nfs_file_set_open_context(file, ctx); 1488 nfs_file_set_open_context(file, ctx);
1489 else 1489 else
1490 err = -ESTALE; 1490 err = -EOPENSTALE;
1491out: 1491out:
1492 return err; 1492 return err;
1493} 1493}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..222d7115db71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
401 unsigned long bytes = 0; 401 unsigned long bytes = 0;
402 struct nfs_direct_req *dreq = hdr->dreq; 402 struct nfs_direct_req *dreq = hdr->dreq;
403 403
404 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
405 goto out_put;
406
407 spin_lock(&dreq->lock); 404 spin_lock(&dreq->lock);
408 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) 405 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
409 dreq->error = hdr->error; 406 dreq->error = hdr->error;
410 else 407
408 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
409 spin_unlock(&dreq->lock);
410 goto out_put;
411 }
412
413 if (hdr->good_bytes != 0)
411 nfs_direct_good_bytes(dreq, hdr); 414 nfs_direct_good_bytes(dreq, hdr);
412 415
416 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
417 dreq->error = 0;
418
413 spin_unlock(&dreq->lock); 419 spin_unlock(&dreq->lock);
414 420
415 while (!list_empty(&hdr->pages)) { 421 while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
782 bool request_commit = false; 788 bool request_commit = false;
783 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 789 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
784 790
785 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
786 goto out_put;
787
788 nfs_init_cinfo_from_dreq(&cinfo, dreq); 791 nfs_init_cinfo_from_dreq(&cinfo, dreq);
789 792
790 spin_lock(&dreq->lock); 793 spin_lock(&dreq->lock);
791 794
792 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) 795 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
793 dreq->error = hdr->error; 796 dreq->error = hdr->error;
794 if (dreq->error == 0) { 797
798 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
799 spin_unlock(&dreq->lock);
800 goto out_put;
801 }
802
803 if (hdr->good_bytes != 0) {
795 nfs_direct_good_bytes(dreq, hdr); 804 nfs_direct_good_bytes(dreq, hdr);
796 if (nfs_write_need_commit(hdr)) { 805 if (nfs_write_need_commit(hdr)) {
797 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 806 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/nfs_fs.h> 10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
11#include <linux/nfs_page.h> 12#include <linux/nfs_page.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/sched/mm.h> 14#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
928 pgm = &pgio->pg_mirrors[0]; 929 pgm = &pgio->pg_mirrors[0];
929 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 930 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
930 931
931 pgio->pg_maxretrans = io_maxretrans; 932 if (NFS_SERVER(pgio->pg_inode)->flags &
933 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
934 pgio->pg_maxretrans = io_maxretrans;
932 return; 935 return;
933out_nolseg: 936out_nolseg:
934 if (pgio->pg_error < 0) 937 if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
940 pgio->pg_lseg); 943 pgio->pg_lseg);
941 pnfs_put_lseg(pgio->pg_lseg); 944 pnfs_put_lseg(pgio->pg_lseg);
942 pgio->pg_lseg = NULL; 945 pgio->pg_lseg = NULL;
946 pgio->pg_maxretrans = 0;
943 nfs_pageio_reset_read_mds(pgio); 947 nfs_pageio_reset_read_mds(pgio);
944} 948}
945 949
@@ -1000,7 +1004,9 @@ retry:
1000 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 1004 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1001 } 1005 }
1002 1006
1003 pgio->pg_maxretrans = io_maxretrans; 1007 if (NFS_SERVER(pgio->pg_inode)->flags &
1008 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1009 pgio->pg_maxretrans = io_maxretrans;
1004 return; 1010 return;
1005 1011
1006out_mds: 1012out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
1010 pgio->pg_lseg); 1016 pgio->pg_lseg);
1011 pnfs_put_lseg(pgio->pg_lseg); 1017 pnfs_put_lseg(pgio->pg_lseg);
1012 pgio->pg_lseg = NULL; 1018 pgio->pg_lseg = NULL;
1019 pgio->pg_maxretrans = 0;
1013 nfs_pageio_reset_write_mds(pgio); 1020 nfs_pageio_reset_write_mds(pgio);
1014} 1021}
1015 1022
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1148 break; 1155 break;
1149 case -NFS4ERR_RETRY_UNCACHED_REP: 1156 case -NFS4ERR_RETRY_UNCACHED_REP:
1150 break; 1157 break;
1151 case -EAGAIN:
1152 return -NFS4ERR_RESET_TO_PNFS;
1153 /* Invalidate Layout errors */ 1158 /* Invalidate Layout errors */
1154 case -NFS4ERR_PNFS_NO_LAYOUT: 1159 case -NFS4ERR_PNFS_NO_LAYOUT:
1155 case -ESTALE: /* mapped NFS4ERR_STALE */ 1160 case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 case -EBADHANDLE: 1215 case -EBADHANDLE:
1211 case -ELOOP: 1216 case -ELOOP:
1212 case -ENOSPC: 1217 case -ENOSPC:
1213 case -EAGAIN:
1214 break; 1218 break;
1215 case -EJUKEBOX: 1219 case -EJUKEBOX:
1216 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1220 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1445 ff_layout_read_prepare_common(task, hdr); 1449 ff_layout_read_prepare_common(task, hdr);
1446} 1450}
1447 1451
1448static void
1449ff_layout_io_prepare_transmit(struct rpc_task *task,
1450 void *data)
1451{
1452 struct nfs_pgio_header *hdr = data;
1453
1454 if (!pnfs_is_valid_lseg(hdr->lseg))
1455 rpc_exit(task, -EAGAIN);
1456}
1457
1458static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1452static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1459{ 1453{
1460 struct nfs_pgio_header *hdr = data; 1454 struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
1740 1734
1741static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1735static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1742 .rpc_call_prepare = ff_layout_read_prepare_v3, 1736 .rpc_call_prepare = ff_layout_read_prepare_v3,
1743 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1744 .rpc_call_done = ff_layout_read_call_done, 1737 .rpc_call_done = ff_layout_read_call_done,
1745 .rpc_count_stats = ff_layout_read_count_stats, 1738 .rpc_count_stats = ff_layout_read_count_stats,
1746 .rpc_release = ff_layout_read_release, 1739 .rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1748 1741
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1742static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750 .rpc_call_prepare = ff_layout_read_prepare_v4, 1743 .rpc_call_prepare = ff_layout_read_prepare_v4,
1751 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1752 .rpc_call_done = ff_layout_read_call_done, 1744 .rpc_call_done = ff_layout_read_call_done,
1753 .rpc_count_stats = ff_layout_read_count_stats, 1745 .rpc_count_stats = ff_layout_read_count_stats,
1754 .rpc_release = ff_layout_read_release, 1746 .rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1756 1748
1757static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1749static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1758 .rpc_call_prepare = ff_layout_write_prepare_v3, 1750 .rpc_call_prepare = ff_layout_write_prepare_v3,
1759 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1760 .rpc_call_done = ff_layout_write_call_done, 1751 .rpc_call_done = ff_layout_write_call_done,
1761 .rpc_count_stats = ff_layout_write_count_stats, 1752 .rpc_count_stats = ff_layout_write_count_stats,
1762 .rpc_release = ff_layout_write_release, 1753 .rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1764 1755
1765static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1756static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1766 .rpc_call_prepare = ff_layout_write_prepare_v4, 1757 .rpc_call_prepare = ff_layout_write_prepare_v4,
1767 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1768 .rpc_call_done = ff_layout_write_call_done, 1758 .rpc_call_done = ff_layout_write_call_done,
1769 .rpc_count_stats = ff_layout_write_count_stats, 1759 .rpc_count_stats = ff_layout_write_count_stats,
1770 .rpc_release = ff_layout_write_release, 1760 .rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..c764cfe456e5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,21 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1404 return 0; 1404 return 0;
1405 1405
1406 /* No fileid? Just exit */
1407 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1408 return 0;
1406 /* Has the inode gone and changed behind our back? */ 1409 /* Has the inode gone and changed behind our back? */
1407 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 1410 if (nfsi->fileid != fattr->fileid) {
1411 /* Is this perhaps the mounted-on fileid? */
1412 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1413 nfsi->fileid == fattr->mounted_on_fileid)
1414 return 0;
1408 return -ESTALE; 1415 return -ESTALE;
1416 }
1409 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1417 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
1410 return -ESTALE; 1418 return -ESTALE;
1411 1419
1420
1412 if (!nfs_file_has_buffered_writers(nfsi)) { 1421 if (!nfs_file_has_buffered_writers(nfsi)) {
1413 /* Verify a few of the more important attributes */ 1422 /* Verify a few of the more important attributes */
1414 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr)) 1423 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1777,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
1768EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); 1777EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
1769 1778
1770 1779
1771static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
1772 struct nfs_fattr *fattr)
1773{
1774 bool ret1 = true, ret2 = true;
1775
1776 if (fattr->valid & NFS_ATTR_FATTR_FILEID)
1777 ret1 = (nfsi->fileid == fattr->fileid);
1778 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1779 ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
1780 return ret1 || ret2;
1781}
1782
1783/* 1780/*
1784 * Many nfs protocol calls return the new file attributes after 1781 * Many nfs protocol calls return the new file attributes after
1785 * an operation. Here we update the inode to reflect the state 1782 * an operation. Here we update the inode to reflect the state
@@ -1810,7 +1807,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1810 nfs_display_fhandle_hash(NFS_FH(inode)), 1807 nfs_display_fhandle_hash(NFS_FH(inode)),
1811 atomic_read(&inode->i_count), fattr->valid); 1808 atomic_read(&inode->i_count), fattr->valid);
1812 1809
1813 if (!nfs_fileid_valid(nfsi, fattr)) { 1810 /* No fileid? Just exit */
1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1812 return 0;
1813 /* Has the inode gone and changed behind our back? */
1814 if (nfsi->fileid != fattr->fileid) {
1815 /* Is this perhaps the mounted-on fileid? */
1816 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1817 nfsi->fileid == fattr->mounted_on_fileid)
1818 return 0;
1814 printk(KERN_ERR "NFS: server %s error: fileid changed\n" 1819 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1815 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", 1820 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1816 NFS_SERVER(inode)->nfs_client->cl_hostname, 1821 NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..e64f810223be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
775 } 775 }
776} 776}
777 777
778static inline bool nfs_error_is_fatal_on_server(int err)
779{
780 switch (err) {
781 case 0:
782 case -ERESTARTSYS:
783 case -EINTR:
784 return false;
785 }
786 return nfs_error_is_fatal(err);
787}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
73 if (IS_ERR(inode)) { 73 if (IS_ERR(inode)) {
74 err = PTR_ERR(inode); 74 err = PTR_ERR(inode);
75 switch (err) { 75 switch (err) {
76 case -EPERM:
77 case -EACCES:
78 case -EDQUOT:
79 case -ENOSPC:
80 case -EROFS:
81 goto out_put_ctx;
82 default: 76 default:
77 goto out_put_ctx;
78 case -ENOENT:
79 case -ESTALE:
80 case -EISDIR:
81 case -ENOTDIR:
82 case -ELOOP:
83 goto out_drop; 83 goto out_drop;
84 } 84 }
85 } 85 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
590 } 590 }
591 591
592 hdr->res.fattr = &hdr->fattr; 592 hdr->res.fattr = &hdr->fattr;
593 hdr->res.count = count; 593 hdr->res.count = 0;
594 hdr->res.eof = 0; 594 hdr->res.eof = 0;
595 hdr->res.verf = &hdr->verf; 595 hdr->res.verf = &hdr->verf;
596 nfs_fattr_init(&hdr->fattr); 596 nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252 struct nfs_pgio_header *hdr) 1252 struct nfs_pgio_header *hdr)
1253{ 1253{
1254 LIST_HEAD(failed); 1254 LIST_HEAD(pages);
1255 1255
1256 desc->pg_io_completion = hdr->io_completion; 1256 desc->pg_io_completion = hdr->io_completion;
1257 desc->pg_dreq = hdr->dreq; 1257 desc->pg_dreq = hdr->dreq;
1258 while (!list_empty(&hdr->pages)) { 1258 list_splice_init(&hdr->pages, &pages);
1259 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1259 while (!list_empty(&pages)) {
1260 struct nfs_page *req = nfs_list_entry(pages.next);
1260 1261
1261 if (!nfs_pageio_add_request(desc, req)) 1262 if (!nfs_pageio_add_request(desc, req))
1262 nfs_list_move_request(req, &failed); 1263 break;
1263 } 1264 }
1264 nfs_pageio_complete(desc); 1265 nfs_pageio_complete(desc);
1265 if (!list_empty(&failed)) { 1266 if (!list_empty(&pages)) {
1266 list_move(&failed, &hdr->pages); 1267 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1267 return desc->pg_error < 0 ? desc->pg_error : -EIO; 1268 hdr->completion_ops->error_cleanup(&pages, err);
1269 nfs_set_pgio_error(hdr, err, hdr->io_start);
1270 return err;
1268 } 1271 }
1269 return 0; 1272 return 0;
1270} 1273}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
627 /* Add this address as an alias */ 627 /* Add this address as an alias */
628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
629 rpc_clnt_test_and_add_xprt, NULL); 629 rpc_clnt_test_and_add_xprt, NULL);
630 } else 630 continue;
631 clp = get_v3_ds_connect(mds_srv, 631 }
632 (struct sockaddr *)&da->da_addr, 632 clp = get_v3_ds_connect(mds_srv,
633 da->da_addrlen, IPPROTO_TCP, 633 (struct sockaddr *)&da->da_addr,
634 timeo, retrans); 634 da->da_addrlen, IPPROTO_TCP,
635 timeo, retrans);
636 if (IS_ERR(clp))
637 continue;
638 clp->cl_rpcclient->cl_softerr = 0;
639 clp->cl_rpcclient->cl_softrtry = 0;
635 } 640 }
636 641
637 if (IS_ERR(clp)) { 642 if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
594 /* Emulate the eof flag, which isn't normally needed in NFSv2 594 /* Emulate the eof flag, which isn't normally needed in NFSv2
595 * as it is guaranteed to always return the file attributes 595 * as it is guaranteed to always return the file attributes
596 */ 596 */
597 if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) 597 if ((hdr->res.count == 0 && hdr->args.count > 0) ||
598 hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
598 hdr->res.eof = 1; 599 hdr->res.eof = 1;
599 } 600 }
600 return 0; 601 return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
615 616
616static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 617static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
617{ 618{
618 if (task->tk_status >= 0) 619 if (task->tk_status >= 0) {
620 hdr->res.count = hdr->args.count;
619 nfs_writeback_update_inode(hdr); 621 nfs_writeback_update_inode(hdr);
622 }
620 return 0; 623 return 0;
621} 624}
622 625
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
91} 91}
92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
93 93
94static void nfs_readpage_release(struct nfs_page *req) 94static void nfs_readpage_release(struct nfs_page *req, int error)
95{ 95{
96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
97 struct page *page = req->wb_page;
97 98
98 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 99 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
99 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 100 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
100 (long long)req_offset(req)); 101 (long long)req_offset(req));
101 102
103 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
104 SetPageError(page);
102 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 105 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
103 if (PageUptodate(req->wb_page)) 106 struct address_space *mapping = page_file_mapping(page);
104 nfs_readpage_to_fscache(inode, req->wb_page, 0);
105 107
106 unlock_page(req->wb_page); 108 if (PageUptodate(page))
109 nfs_readpage_to_fscache(inode, page, 0);
110 else if (!PageError(page) && !PagePrivate(page))
111 generic_error_remove_page(mapping, page);
112 unlock_page(page);
107 } 113 }
108 nfs_release_request(req); 114 nfs_release_request(req);
109} 115}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
131 &nfs_async_read_completion_ops); 137 &nfs_async_read_completion_ops);
132 if (!nfs_pageio_add_request(&pgio, new)) { 138 if (!nfs_pageio_add_request(&pgio, new)) {
133 nfs_list_remove_request(new); 139 nfs_list_remove_request(new);
134 nfs_readpage_release(new); 140 nfs_readpage_release(new, pgio.pg_error);
135 } 141 }
136 nfs_pageio_complete(&pgio); 142 nfs_pageio_complete(&pgio);
137 143
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
153static void nfs_read_completion(struct nfs_pgio_header *hdr) 159static void nfs_read_completion(struct nfs_pgio_header *hdr)
154{ 160{
155 unsigned long bytes = 0; 161 unsigned long bytes = 0;
162 int error;
156 163
157 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 164 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
158 goto out; 165 goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
179 zero_user_segment(page, start, end); 186 zero_user_segment(page, start, end);
180 } 187 }
181 } 188 }
189 error = 0;
182 bytes += req->wb_bytes; 190 bytes += req->wb_bytes;
183 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 191 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
184 if (bytes <= hdr->good_bytes) 192 if (bytes <= hdr->good_bytes)
185 nfs_page_group_set_uptodate(req); 193 nfs_page_group_set_uptodate(req);
194 else {
195 error = hdr->error;
196 xchg(&nfs_req_openctx(req)->error, error);
197 }
186 } else 198 } else
187 nfs_page_group_set_uptodate(req); 199 nfs_page_group_set_uptodate(req);
188 nfs_list_remove_request(req); 200 nfs_list_remove_request(req);
189 nfs_readpage_release(req); 201 nfs_readpage_release(req, error);
190 } 202 }
191out: 203out:
192 hdr->release(hdr); 204 hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
213 while (!list_empty(head)) { 225 while (!list_empty(head)) {
214 req = nfs_list_entry(head->next); 226 req = nfs_list_entry(head->next);
215 nfs_list_remove_request(req); 227 nfs_list_remove_request(req);
216 nfs_readpage_release(req); 228 nfs_readpage_release(req, error);
217 } 229 }
218} 230}
219 231
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
337 goto out; 349 goto out;
338 } 350 }
339 351
352 xchg(&ctx->error, 0);
340 error = nfs_readpage_async(ctx, inode, page); 353 error = nfs_readpage_async(ctx, inode, page);
341 354 if (!error) {
355 error = wait_on_page_locked_killable(page);
356 if (!PageUptodate(page) && !error)
357 error = xchg(&ctx->error, 0);
358 }
342out: 359out:
343 put_nfs_open_context(ctx); 360 put_nfs_open_context(ctx);
344 return error; 361 return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
372 zero_user_segment(page, len, PAGE_SIZE); 389 zero_user_segment(page, len, PAGE_SIZE);
373 if (!nfs_pageio_add_request(desc->pgio, new)) { 390 if (!nfs_pageio_add_request(desc->pgio, new)) {
374 nfs_list_remove_request(new); 391 nfs_list_remove_request(new);
375 nfs_readpage_release(new);
376 error = desc->pgio->pg_error; 392 error = desc->pgio->pg_error;
393 nfs_readpage_release(new, error);
377 goto out; 394 goto out;
378 } 395 }
379 return 0; 396 return 0;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..85ca49549b39 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 58static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59static const struct nfs_rw_ops nfs_rw_write_ops; 59static const struct nfs_rw_ops nfs_rw_write_ops;
60static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req); 61static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 62static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode); 63 struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
591 592
592static void nfs_write_error(struct nfs_page *req, int error) 593static void nfs_write_error(struct nfs_page *req, int error)
593{ 594{
595 nfs_set_pageerror(page_file_mapping(req->wb_page));
594 nfs_mapping_set_error(req->wb_page, error); 596 nfs_mapping_set_error(req->wb_page, error);
597 nfs_inode_remove_request(req);
595 nfs_end_page_writeback(req); 598 nfs_end_page_writeback(req);
596 nfs_release_request(req); 599 nfs_release_request(req);
597} 600}
598 601
599static bool
600nfs_error_is_fatal_on_server(int err)
601{
602 switch (err) {
603 case 0:
604 case -ERESTARTSYS:
605 case -EINTR:
606 return false;
607 }
608 return nfs_error_is_fatal(err);
609}
610
611/* 602/*
612 * Find an associated nfs write request, and prepare to flush it out 603 * Find an associated nfs write request, and prepare to flush it out
613 * May return an error if the user signalled nfs_wait_on_request(). 604 * May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
615static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 606static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
616 struct page *page) 607 struct page *page)
617{ 608{
618 struct address_space *mapping;
619 struct nfs_page *req; 609 struct nfs_page *req;
620 int ret = 0; 610 int ret = 0;
621 611
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
630 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
631 621
632 /* If there is a fatal error that covers this write, just exit */ 622 /* If there is a fatal error that covers this write, just exit */
633 ret = 0; 623 ret = pgio->pg_error;
634 mapping = page_file_mapping(page); 624 if (nfs_error_is_fatal_on_server(ret))
635 if (test_bit(AS_ENOSPC, &mapping->flags) ||
636 test_bit(AS_EIO, &mapping->flags))
637 goto out_launder; 625 goto out_launder;
638 626
627 ret = 0;
639 if (!nfs_pageio_add_request(pgio, req)) { 628 if (!nfs_pageio_add_request(pgio, req)) {
640 ret = pgio->pg_error; 629 ret = pgio->pg_error;
641 /* 630 /*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
647 } else 636 } else
648 ret = -EAGAIN; 637 ret = -EAGAIN;
649 nfs_redirty_request(req); 638 nfs_redirty_request(req);
639 pgio->pg_error = 0;
650 } else 640 } else
651 nfs_add_stats(page_file_mapping(page)->host, 641 nfs_add_stats(page_file_mapping(page)->host,
652 NFSIOS_WRITEPAGES, 1); 642 NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
666 ret = nfs_page_async_flush(pgio, page); 656 ret = nfs_page_async_flush(pgio, page);
667 if (ret == -EAGAIN) { 657 if (ret == -EAGAIN) {
668 redirty_page_for_writepage(wbc, page); 658 redirty_page_for_writepage(wbc, page);
669 ret = 0; 659 ret = AOP_WRITEPAGE_ACTIVATE;
670 } 660 }
671 return ret; 661 return ret;
672} 662}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
685 nfs_pageio_init_write(&pgio, inode, 0, 675 nfs_pageio_init_write(&pgio, inode, 0,
686 false, &nfs_async_write_completion_ops); 676 false, &nfs_async_write_completion_ops);
687 err = nfs_do_writepage(page, wbc, &pgio); 677 err = nfs_do_writepage(page, wbc, &pgio);
678 pgio.pg_error = 0;
688 nfs_pageio_complete(&pgio); 679 nfs_pageio_complete(&pgio);
689 if (err < 0) 680 if (err < 0)
690 return err; 681 return err;
691 if (pgio.pg_error < 0) 682 if (nfs_error_is_fatal(pgio.pg_error))
692 return pgio.pg_error; 683 return pgio.pg_error;
693 return 0; 684 return 0;
694} 685}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
698 int ret; 689 int ret;
699 690
700 ret = nfs_writepage_locked(page, wbc); 691 ret = nfs_writepage_locked(page, wbc);
701 unlock_page(page); 692 if (ret != AOP_WRITEPAGE_ACTIVATE)
693 unlock_page(page);
702 return ret; 694 return ret;
703} 695}
704 696
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
707 int ret; 699 int ret;
708 700
709 ret = nfs_do_writepage(page, wbc, data); 701 ret = nfs_do_writepage(page, wbc, data);
710 unlock_page(page); 702 if (ret != AOP_WRITEPAGE_ACTIVATE)
703 unlock_page(page);
711 return ret; 704 return ret;
712} 705}
713 706
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
733 &nfs_async_write_completion_ops); 726 &nfs_async_write_completion_ops);
734 pgio.pg_io_completion = ioc; 727 pgio.pg_io_completion = ioc;
735 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 728 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
729 pgio.pg_error = 0;
736 nfs_pageio_complete(&pgio); 730 nfs_pageio_complete(&pgio);
737 nfs_io_completion_put(ioc); 731 nfs_io_completion_put(ioc);
738 732
739 if (err < 0) 733 if (err < 0)
740 goto out_err; 734 goto out_err;
741 err = pgio.pg_error; 735 err = pgio.pg_error;
742 if (err < 0) 736 if (nfs_error_is_fatal(err))
743 goto out_err; 737 goto out_err;
744 return 0; 738 return 0;
745out_err: 739out_err:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
571 */ 571 */
572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
573{ 573{
574 struct nfsd_net *nn = v; 574 struct nfsd_net *nn = m->private;
575 575
576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
577 seq_printf(m, "num entries: %u\n", 577 seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..3cf4f6aa48d6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
1171 return inode; 1171 return inode;
1172} 1172}
1173 1173
1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
1175{ 1175{
1176 struct inode *inode; 1176 struct inode *inode;
1177 1177
1178 inode = nfsd_get_inode(dir->i_sb, mode); 1178 inode = nfsd_get_inode(dir->i_sb, mode);
1179 if (!inode) 1179 if (!inode)
1180 return -ENOMEM; 1180 return -ENOMEM;
1181 if (ncl) {
1182 inode->i_private = ncl;
1183 kref_get(&ncl->cl_ref);
1184 }
1181 d_add(dentry, inode); 1185 d_add(dentry, inode);
1182 inc_nlink(dir); 1186 inc_nlink(dir);
1183 fsnotify_mkdir(dir, dentry); 1187 fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
1194 dentry = d_alloc_name(parent, name); 1198 dentry = d_alloc_name(parent, name);
1195 if (!dentry) 1199 if (!dentry)
1196 goto out_err; 1200 goto out_err;
1197 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600); 1201 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
1198 if (ret) 1202 if (ret)
1199 goto out_err; 1203 goto out_err;
1200 if (ncl) {
1201 d_inode(dentry)->i_private = ncl;
1202 kref_get(&ncl->cl_ref);
1203 }
1204out: 1204out:
1205 inode_unlock(dir); 1205 inode_unlock(dir);
1206 return dentry; 1206 return dentry;
1207out_err: 1207out_err:
1208 dput(dentry);
1208 dentry = ERR_PTR(ret); 1209 dentry = ERR_PTR(ret);
1209 goto out; 1210 goto out;
1210} 1211}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
1214 struct nfsdfs_client *ncl = inode->i_private; 1215 struct nfsdfs_client *ncl = inode->i_private;
1215 1216
1216 inode->i_private = NULL; 1217 inode->i_private = NULL;
1217 synchronize_rcu();
1218 kref_put(&ncl->cl_ref, ncl->cl_release); 1218 kref_put(&ncl->cl_ref, ncl->cl_release);
1219} 1219}
1220 1220
1221
1222static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode) 1221static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
1223{ 1222{
1224 struct nfsdfs_client *nc = inode->i_private; 1223 struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
1232{ 1231{
1233 struct nfsdfs_client *nc; 1232 struct nfsdfs_client *nc;
1234 1233
1235 rcu_read_lock(); 1234 inode_lock_shared(inode);
1236 nc = __get_nfsdfs_client(inode); 1235 nc = __get_nfsdfs_client(inode);
1237 rcu_read_unlock(); 1236 inode_unlock_shared(inode);
1238 return nc; 1237 return nc;
1239} 1238}
1240/* from __rpc_unlink */ 1239/* from __rpc_unlink */
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; 1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
1812} 1812}
1813 1813
1814/* 1814/* Read a page's worth of file data into the page cache. */
1815 * Read a page's worth of file data into the page cache. Return the page
1816 * locked.
1817 */
1818static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) 1815static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1819{ 1816{
1820 struct page *page; 1817 struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1826 put_page(page); 1823 put_page(page);
1827 return ERR_PTR(-EIO); 1824 return ERR_PTR(-EIO);
1828 } 1825 }
1829 lock_page(page);
1830 return page; 1826 return page;
1831} 1827}
1832 1828
1833/* 1829/*
1830 * Lock two pages, ensuring that we lock in offset order if the pages are from
1831 * the same file.
1832 */
1833static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1834{
1835 /* Always lock in order of increasing index. */
1836 if (page1->index > page2->index)
1837 swap(page1, page2);
1838
1839 lock_page(page1);
1840 if (page1 != page2)
1841 lock_page(page2);
1842}
1843
1844/* Unlock two pages, being careful not to unlock the same page twice. */
1845static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1846{
1847 unlock_page(page1);
1848 if (page1 != page2)
1849 unlock_page(page2);
1850}
1851
1852/*
1834 * Compare extents of two files to see if they are the same. 1853 * Compare extents of two files to see if they are the same.
1835 * Caller must have locked both inodes to prevent write races. 1854 * Caller must have locked both inodes to prevent write races.
1836 */ 1855 */
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1867 dest_page = vfs_dedupe_get_page(dest, destoff); 1886 dest_page = vfs_dedupe_get_page(dest, destoff);
1868 if (IS_ERR(dest_page)) { 1887 if (IS_ERR(dest_page)) {
1869 error = PTR_ERR(dest_page); 1888 error = PTR_ERR(dest_page);
1870 unlock_page(src_page);
1871 put_page(src_page); 1889 put_page(src_page);
1872 goto out_error; 1890 goto out_error;
1873 } 1891 }
1892
1893 vfs_lock_two_pages(src_page, dest_page);
1894
1895 /*
1896 * Now that we've locked both pages, make sure they're still
1897 * mapped to the file data we're interested in. If not,
1898 * someone is invalidating pages on us and we lose.
1899 */
1900 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1901 src_page->mapping != src->i_mapping ||
1902 dest_page->mapping != dest->i_mapping) {
1903 same = false;
1904 goto unlock;
1905 }
1906
1874 src_addr = kmap_atomic(src_page); 1907 src_addr = kmap_atomic(src_page);
1875 dest_addr = kmap_atomic(dest_page); 1908 dest_addr = kmap_atomic(dest_page);
1876 1909
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1882 1915
1883 kunmap_atomic(dest_addr); 1916 kunmap_atomic(dest_addr);
1884 kunmap_atomic(src_addr); 1917 kunmap_atomic(src_addr);
1885 unlock_page(dest_page); 1918unlock:
1886 unlock_page(src_page); 1919 vfs_unlock_two_pages(src_page, dest_page);
1887 put_page(dest_page); 1920 put_page(dest_page);
1888 put_page(src_page); 1921 put_page(src_page);
1889 1922
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
51static void shrink_liability(struct ubifs_info *c, int nr_to_write) 51static void shrink_liability(struct ubifs_info *c, int nr_to_write)
52{ 52{
53 down_read(&c->vfs_sb->s_umount); 53 down_read(&c->vfs_sb->s_umount);
54 writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); 54 writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
55 up_read(&c->vfs_sb->s_umount); 55 up_read(&c->vfs_sb->s_umount);
56} 56}
57 57
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..3b4b4114f208 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) 129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
130{ 130{
131 if (orph->del) { 131 if (orph->del) {
132 spin_unlock(&c->orphan_lock);
133 dbg_gen("deleted twice ino %lu", orph->inum); 132 dbg_gen("deleted twice ino %lu", orph->inum);
134 return; 133 return;
135 } 134 }
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
138 orph->del = 1; 137 orph->del = 1;
139 orph->dnext = c->orph_dnext; 138 orph->dnext = c->orph_dnext;
140 c->orph_dnext = orph; 139 c->orph_dnext = orph;
141 spin_unlock(&c->orphan_lock);
142 dbg_gen("delete later ino %lu", orph->inum); 140 dbg_gen("delete later ino %lu", orph->inum);
143 return; 141 return;
144 } 142 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 2c0803b0ac3a..8c1d571334bc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -609,6 +609,10 @@ static int init_constants_early(struct ubifs_info *c)
609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
610 if (c->max_bu_buf_len > c->leb_size) 610 if (c->max_bu_buf_len > c->leb_size)
611 c->max_bu_buf_len = c->leb_size; 611 c->max_bu_buf_len = c->leb_size;
612
613 /* Log is ready, preserve one LEB for commits. */
614 c->min_log_bytes = c->leb_size;
615
612 return 0; 616 return 0;
613} 617}
614 618
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..fe6d804a38dc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
880 /* len == 0 means wake all */ 880 /* len == 0 means wake all */
881 struct userfaultfd_wake_range range = { .len = 0, }; 881 struct userfaultfd_wake_range range = { .len = 0, };
882 unsigned long new_flags; 882 unsigned long new_flags;
883 bool still_valid;
883 884
884 WRITE_ONCE(ctx->released, true); 885 WRITE_ONCE(ctx->released, true);
885 886
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
895 * taking the mmap_sem for writing. 896 * taking the mmap_sem for writing.
896 */ 897 */
897 down_write(&mm->mmap_sem); 898 down_write(&mm->mmap_sem);
898 if (!mmget_still_valid(mm)) 899 still_valid = mmget_still_valid(mm);
899 goto skip_mm;
900 prev = NULL; 900 prev = NULL;
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { 901 for (vma = mm->mmap; vma; vma = vma->vm_next) {
902 cond_resched(); 902 cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
907 continue; 907 continue;
908 } 908 }
909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); 909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
910 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 910 if (still_valid) {
911 new_flags, vma->anon_vma, 911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
912 vma->vm_file, vma->vm_pgoff, 912 new_flags, vma->anon_vma,
913 vma_policy(vma), 913 vma->vm_file, vma->vm_pgoff,
914 NULL_VM_UFFD_CTX); 914 vma_policy(vma),
915 if (prev) 915 NULL_VM_UFFD_CTX);
916 vma = prev; 916 if (prev)
917 else 917 vma = prev;
918 prev = vma; 918 else
919 prev = vma;
920 }
919 vma->vm_flags = new_flags; 921 vma->vm_flags = new_flags;
920 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 922 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
921 } 923 }
922skip_mm:
923 up_write(&mm->mmap_sem); 924 up_write(&mm->mmap_sem);
924 mmput(mm); 925 mmput(mm);
925wakeup: 926wakeup:
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..7bd7534f5051 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -547,63 +547,12 @@ xfs_file_compat_ioctl(
547 struct inode *inode = file_inode(filp); 547 struct inode *inode = file_inode(filp);
548 struct xfs_inode *ip = XFS_I(inode); 548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount; 549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = (void __user *)p; 550 void __user *arg = compat_ptr(p);
551 int error; 551 int error;
552 552
553 trace_xfs_file_compat_ioctl(ip); 553 trace_xfs_file_compat_ioctl(ip);
554 554
555 switch (cmd) { 555 switch (cmd) {
556 /* No size or alignment issues on any arch */
557 case XFS_IOC_DIOINFO:
558 case XFS_IOC_FSGEOMETRY_V4:
559 case XFS_IOC_FSGEOMETRY:
560 case XFS_IOC_AG_GEOMETRY:
561 case XFS_IOC_FSGETXATTR:
562 case XFS_IOC_FSSETXATTR:
563 case XFS_IOC_FSGETXATTRA:
564 case XFS_IOC_FSSETDM:
565 case XFS_IOC_GETBMAP:
566 case XFS_IOC_GETBMAPA:
567 case XFS_IOC_GETBMAPX:
568 case XFS_IOC_FSCOUNTS:
569 case XFS_IOC_SET_RESBLKS:
570 case XFS_IOC_GET_RESBLKS:
571 case XFS_IOC_FSGROWFSLOG:
572 case XFS_IOC_GOINGDOWN:
573 case XFS_IOC_ERROR_INJECTION:
574 case XFS_IOC_ERROR_CLEARALL:
575 case FS_IOC_GETFSMAP:
576 case XFS_IOC_SCRUB_METADATA:
577 case XFS_IOC_BULKSTAT:
578 case XFS_IOC_INUMBERS:
579 return xfs_file_ioctl(filp, cmd, p);
580#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
581 /*
582 * These are handled fine if no alignment issues. To support x32
583 * which uses native 64-bit alignment we must emit these cases in
584 * addition to the ia-32 compat set below.
585 */
586 case XFS_IOC_ALLOCSP:
587 case XFS_IOC_FREESP:
588 case XFS_IOC_RESVSP:
589 case XFS_IOC_UNRESVSP:
590 case XFS_IOC_ALLOCSP64:
591 case XFS_IOC_FREESP64:
592 case XFS_IOC_RESVSP64:
593 case XFS_IOC_UNRESVSP64:
594 case XFS_IOC_FSGEOMETRY_V1:
595 case XFS_IOC_FSGROWFSDATA:
596 case XFS_IOC_FSGROWFSRT:
597 case XFS_IOC_ZERO_RANGE:
598#ifdef CONFIG_X86_X32
599 /*
600 * x32 special: this gets a different cmd number from the ia-32 compat
601 * case below; the associated data will match native 64-bit alignment.
602 */
603 case XFS_IOC_SWAPEXT:
604#endif
605 return xfs_file_ioctl(filp, cmd, p);
606#endif
607#if defined(BROKEN_X86_ALIGNMENT) 556#if defined(BROKEN_X86_ALIGNMENT)
608 case XFS_IOC_ALLOCSP_32: 557 case XFS_IOC_ALLOCSP_32:
609 case XFS_IOC_FREESP_32: 558 case XFS_IOC_FREESP_32:
@@ -705,6 +654,7 @@ xfs_file_compat_ioctl(
705 case XFS_IOC_FSSETDM_BY_HANDLE_32: 654 case XFS_IOC_FSSETDM_BY_HANDLE_32:
706 return xfs_compat_fssetdm_by_handle(filp, arg); 655 return xfs_compat_fssetdm_by_handle(filp, arg);
707 default: 656 default:
708 return -ENOIOCTLCMD; 657 /* try the native version */
658 return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
709 } 659 }
710} 660}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..fe285d123d69 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -793,6 +793,7 @@ xfs_setattr_nonsize(
793 793
794out_cancel: 794out_cancel:
795 xfs_trans_cancel(tp); 795 xfs_trans_cancel(tp);
796 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796out_dqrele: 797out_dqrele:
797 xfs_qm_dqrele(udqp); 798 xfs_qm_dqrele(udqp);
798 xfs_qm_dqrele(gdqp); 799 xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..a339bd5fa260 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,7 +32,7 @@ xfs_break_leased_layouts(
32 struct xfs_inode *ip = XFS_I(inode); 32 struct xfs_inode *ip = XFS_I(inode);
33 int error; 33 int error;
34 34
35 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { 35 while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
36 xfs_iunlock(ip, *iolock); 36 xfs_iunlock(ip, *iolock);
37 *did_unlock = true; 37 *did_unlock = true;
38 error = break_layout(inode, true); 38 error = break_layout(inode, true);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..edbe37b7f636 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks(
1190} 1190}
1191 1191
1192/* 1192/*
1193 * Grab the exclusive iolock for a data copy from src to dest, making 1193 * Grab the exclusive iolock for a data copy from src to dest, making sure to
1194 * sure to abide vfs locking order (lowest pointer value goes first) and 1194 * abide vfs locking order (lowest pointer value goes first) and breaking the
1195 * breaking the pnfs layout leases on dest before proceeding. The loop 1195 * layout leases before proceeding. The loop is needed because we cannot call
1196 * is needed because we cannot call the blocking break_layout() with the 1196 * the blocking break_layout() with the iolocks held, and therefore have to
1197 * src iolock held, and therefore have to back out both locks. 1197 * back out both locks.
1198 */ 1198 */
1199static int 1199static int
1200xfs_iolock_two_inodes_and_break_layout( 1200xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout(
1203{ 1203{
1204 int error; 1204 int error;
1205 1205
1206retry: 1206 if (src > dest)
1207 if (src < dest) { 1207 swap(src, dest);
1208 inode_lock_shared(src);
1209 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1210 } else {
1211 /* src >= dest */
1212 inode_lock(dest);
1213 }
1214 1208
1215 error = break_layout(dest, false); 1209retry:
1216 if (error == -EWOULDBLOCK) { 1210 /* Wait to break both inodes' layouts before we start locking. */
1217 inode_unlock(dest); 1211 error = break_layout(src, true);
1218 if (src < dest) 1212 if (error)
1219 inode_unlock_shared(src); 1213 return error;
1214 if (src != dest) {
1220 error = break_layout(dest, true); 1215 error = break_layout(dest, true);
1221 if (error) 1216 if (error)
1222 return error; 1217 return error;
1223 goto retry;
1224 } 1218 }
1219
1220 /* Lock one inode and make sure nobody got in and leased it. */
1221 inode_lock(src);
1222 error = break_layout(src, false);
1225 if (error) { 1223 if (error) {
1224 inode_unlock(src);
1225 if (error == -EWOULDBLOCK)
1226 goto retry;
1227 return error;
1228 }
1229
1230 if (src == dest)
1231 return 0;
1232
1233 /* Lock the other inode and make sure nobody got in and leased it. */
1234 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1235 error = break_layout(dest, false);
1236 if (error) {
1237 inode_unlock(src);
1226 inode_unlock(dest); 1238 inode_unlock(dest);
1227 if (src < dest) 1239 if (error == -EWOULDBLOCK)
1228 inode_unlock_shared(src); 1240 goto retry;
1229 return error; 1241 return error;
1230 } 1242 }
1231 if (src > dest) 1243
1232 inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
1233 return 0; 1244 return 0;
1234} 1245}
1235 1246
@@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock(
1247 1258
1248 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1259 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1249 if (!same_inode) 1260 if (!same_inode)
1250 xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1261 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1251 inode_unlock(inode_out); 1262 inode_unlock(inode_out);
1252 if (!same_inode) 1263 if (!same_inode)
1253 inode_unlock_shared(inode_in); 1264 inode_unlock(inode_in);
1254} 1265}
1255 1266
1256/* 1267/*
@@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep(
1325 if (same_inode) 1336 if (same_inode)
1326 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1337 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1327 else 1338 else
1328 xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest, 1339 xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
1329 XFS_MMAPLOCK_EXCL); 1340 XFS_MMAPLOCK_EXCL);
1330 1341
1331 /* Check file eligibility and prepare for block sharing. */ 1342 /* Check file eligibility and prepare for block sharing. */
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb29b1a3..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
30 30
31static inline void ceph_buffer_put(struct ceph_buffer *b) 31static inline void ceph_buffer_put(struct ceph_buffer *b)
32{ 32{
33 kref_put(&b->kref, ceph_buffer_release); 33 if (b)
34 kref_put(&b->kref, ceph_buffer_release);
34} 35}
35 36
36extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); 37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index c05d4e661489..03f8e98e3bcc 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
161 gfp_t gfp) 161 gfp_t gfp)
162{ 162{
163 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 163 return NULL;
164 size_t align = get_order(PAGE_ALIGN(size));
165
166 return alloc_pages_node(node, gfp, align);
167} 164}
168 165
169static inline void dma_free_contiguous(struct device *dev, struct page *page, 166static inline void dma_free_contiguous(struct device *dev, struct page *page,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 40915b461f18..f757a58191a6 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq)
241 return -EINVAL; 241 return -EINVAL;
242} 242}
243 243
244static inline int
245gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
246 unsigned int gpio_offset, unsigned int pin_offset,
247 unsigned int npins)
248{
249 WARN_ON(1);
250 return -EINVAL;
251}
252
253static inline int
254gpiochip_add_pingroup_range(struct gpio_chip *chip,
255 struct pinctrl_dev *pctldev,
256 unsigned int gpio_offset, const char *pin_group)
257{
258 WARN_ON(1);
259 return -EINVAL;
260}
261
262static inline void
263gpiochip_remove_pin_ranges(struct gpio_chip *chip)
264{
265 WARN_ON(1);
266}
267
268static inline int devm_gpio_request(struct device *dev, unsigned gpio, 244static inline int devm_gpio_request(struct device *dev, unsigned gpio,
269 const char *label) 245 const char *label)
270{ 246{
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index 7a6871ac8784..74c6f9241944 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -4,6 +4,9 @@
4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> 4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
5 */ 5 */
6 6
7#ifndef _NF_CONNTRACK_H323_TYPES_H
8#define _NF_CONNTRACK_H323_TYPES_H
9
7typedef struct TransportAddress_ipAddress { /* SEQUENCE */ 10typedef struct TransportAddress_ipAddress { /* SEQUENCE */
8 int options; /* No use */ 11 int options; /* No use */
9 unsigned int ip; 12 unsigned int ip;
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
931 InfoRequestResponse infoRequestResponse; 934 InfoRequestResponse infoRequestResponse;
932 }; 935 };
933} RasMessage; 936} RasMessage;
937
938#endif /* _NF_CONNTRACK_H323_TYPES_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b5d99482d3fe..1a5f88316b08 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
282extern void exit_signals(struct task_struct *tsk); 282extern void exit_signals(struct task_struct *tsk);
283extern void kernel_sigaction(int, __sighandler_t); 283extern void kernel_sigaction(int, __sighandler_t);
284 284
285#define SIG_KTHREAD ((__force __sighandler_t)2)
286#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
287
285static inline void allow_signal(int sig) 288static inline void allow_signal(int sig)
286{ 289{
287 /* 290 /*
@@ -289,7 +292,17 @@ static inline void allow_signal(int sig)
289 * know it'll be handled, so that they don't get converted to 292 * know it'll be handled, so that they don't get converted to
290 * SIGKILL or just silently dropped. 293 * SIGKILL or just silently dropped.
291 */ 294 */
292 kernel_sigaction(sig, (__force __sighandler_t)2); 295 kernel_sigaction(sig, SIG_KTHREAD);
296}
297
298static inline void allow_kernel_signal(int sig)
299{
300 /*
301 * Kernel threads handle their own signals. Let the signal code
302 * know signals sent by the kernel will be handled, so that they
303 * don't get silently dropped.
304 */
305 kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
293} 306}
294 307
295static inline void disallow_signal(int sig) 308static inline void disallow_signal(int sig)
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index baa3ecdb882f..27536b961552 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
98 98
99struct rpc_call_ops { 99struct rpc_call_ops {
100 void (*rpc_call_prepare)(struct rpc_task *, void *); 100 void (*rpc_call_prepare)(struct rpc_task *, void *);
101 void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
102 void (*rpc_call_done)(struct rpc_task *, void *); 101 void (*rpc_call_done)(struct rpc_task *, void *);
103 void (*rpc_count_stats)(struct rpc_task *, void *); 102 void (*rpc_count_stats)(struct rpc_task *, void *);
104 void (*rpc_release)(void *); 103 void (*rpc_release)(void *);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 7acb953298a7..84ff2844df2a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,6 +57,7 @@ struct tk_read_base {
57 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds 59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
60 * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
60 * @cycle_interval: Number of clock cycles in one NTP interval 61 * @cycle_interval: Number of clock cycles in one NTP interval
61 * @xtime_interval: Number of clock shifted nano seconds in one NTP 62 * @xtime_interval: Number of clock shifted nano seconds in one NTP
62 * interval. 63 * interval.
@@ -84,6 +85,9 @@ struct tk_read_base {
84 * 85 *
85 * wall_to_monotonic is no longer the boot time, getboottime must be 86 * wall_to_monotonic is no longer the boot time, getboottime must be
86 * used instead. 87 * used instead.
88 *
89 * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
90 * accelerate the VDSO update for CLOCK_BOOTTIME.
87 */ 91 */
88struct timekeeper { 92struct timekeeper {
89 struct tk_read_base tkr_mono; 93 struct tk_read_base tkr_mono;
@@ -99,6 +103,7 @@ struct timekeeper {
99 u8 cs_was_changed_seq; 103 u8 cs_was_changed_seq;
100 ktime_t next_leap_ktime; 104 ktime_t next_leap_ktime;
101 u64 raw_sec; 105 u64 raw_sec;
106 struct timespec64 monotonic_to_boot;
102 107
103 /* The following members are for timekeeping internal use */ 108 /* The following members are for timekeeping internal use */
104 u64 cycle_interval; 109 u64 cycle_interval;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index becdad576859..3f62b347b04a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
206 unsigned int len) 206 unsigned int len)
207{ 207{
208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) 208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
209 return -EINVAL; 209 return 0;
210 210
211 return pskb_may_pull(skb, len); 211 return pskb_may_pull(skb, len);
212} 212}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index cb668bc2692d..ab40d7afdc54 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,7 +52,7 @@ struct bpf_prog;
52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
53 53
54struct net { 54struct net {
55 refcount_t passive; /* To decided when the network 55 refcount_t passive; /* To decide when the network
56 * namespace should be freed. 56 * namespace should be freed.
57 */ 57 */
58 refcount_t count; /* To decided when the network 58 refcount_t count; /* To decided when the network
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 25f1f9a8419b..95f766c31c90 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
141 141
142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp);
143 rc = nh_grp->num_nh; 143 rc = nh_grp->num_nh;
144 } else {
145 const struct nh_info *nhi;
146
147 nhi = rcu_dereference_rtnl(nh->nh_info);
148 if (nhi->reject_nh)
149 rc = 0;
150 } 144 }
151 145
152 return rc; 146 return rc;
diff --git a/include/net/route.h b/include/net/route.h
index 630a0493f1f3..dfce19c9fa96 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
233 233
234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
235 u32 table_id, struct fib_info *fi, 235 u32 table_id, struct fib_info *fi,
236 int *fa_index, int fa_start); 236 int *fa_index, int fa_start, unsigned int flags);
237 237
238static inline void ip_rt_put(struct rtable *rt) 238static inline void ip_rt_put(struct rtable *rt)
239{ 239{
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index b0fc6b26bdf5..83df1ec6664e 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -105,8 +105,7 @@ struct rdma_restrack_entry {
105}; 105};
106 106
107int rdma_restrack_count(struct ib_device *dev, 107int rdma_restrack_count(struct ib_device *dev,
108 enum rdma_restrack_type type, 108 enum rdma_restrack_type type);
109 struct pid_namespace *ns);
110 109
111void rdma_restrack_kadd(struct rdma_restrack_entry *res); 110void rdma_restrack_kadd(struct rdma_restrack_entry *res);
112void rdma_restrack_uadd(struct rdma_restrack_entry *res); 111void rdma_restrack_uadd(struct rdma_restrack_entry *res);
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 50f49e043668..d1a93c73f006 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -46,7 +46,9 @@ struct mcip_cmd {
46#define CMD_IDU_ENABLE 0x71 46#define CMD_IDU_ENABLE 0x71
47#define CMD_IDU_DISABLE 0x72 47#define CMD_IDU_DISABLE 0x72
48#define CMD_IDU_SET_MODE 0x74 48#define CMD_IDU_SET_MODE 0x74
49#define CMD_IDU_READ_MODE 0x75
49#define CMD_IDU_SET_DEST 0x76 50#define CMD_IDU_SET_DEST 0x76
51#define CMD_IDU_ACK_CIRQ 0x79
50#define CMD_IDU_SET_MASK 0x7C 52#define CMD_IDU_SET_MASK 0x7C
51 53
52#define IDU_M_TRIG_LEVEL 0x0 54#define IDU_M_TRIG_LEVEL 0x0
@@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
119 __mcip_cmd(cmd, param); 121 __mcip_cmd(cmd, param);
120} 122}
121 123
124/*
125 * Read MCIP register
126 */
127static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
128{
129 __mcip_cmd(cmd, param);
130 return read_aux_reg(ARC_REG_MCIP_READBACK);
131}
132
122#endif 133#endif
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index a18b719f49d4..784ba0b9690a 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -77,11 +77,6 @@
77 77
78#define JFFS2_ACL_VERSION 0x0001 78#define JFFS2_ACL_VERSION 0x0001
79 79
80// Maybe later...
81//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
82//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
83
84
85#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at 80#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
86 mount time, don't wait for it to 81 mount time, don't wait for it to
87 happen later */ 82 happen later */
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d760ee3..b5123ab8d54a 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
11 struct nf_acct *nfacct; 11 struct nf_acct *nfacct;
12}; 12};
13 13
14struct xt_nfacct_match_info_v1 {
15 char name[NFACCT_NAME_MAX];
16 struct nf_acct *nfacct __attribute__((aligned(8)));
17};
18
14#endif /* _XT_NFACCT_MATCH_H */ 19#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index fd6b5f66e2c5..cba368e55863 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
250 __u32 rdma_mr_max; 250 __u32 rdma_mr_max;
251 __u32 rdma_mr_size; 251 __u32 rdma_mr_size;
252 __u8 tos; 252 __u8 tos;
253 __u8 sl;
253 __u32 cache_allocs; 254 __u32 cache_allocs;
254}; 255};
255 256
@@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
265 __u32 rdma_mr_max; 266 __u32 rdma_mr_max;
266 __u32 rdma_mr_size; 267 __u32 rdma_mr_size;
267 __u8 tos; 268 __u8 tos;
269 __u8 sl;
268 __u32 cache_allocs; 270 __u32 cache_allocs;
269}; 271};
270 272
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d141f16f6fa..272071e9112f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1707 if (err) 1707 if (err)
1708 goto free_used_maps; 1708 goto free_used_maps;
1709 1709
1710 err = bpf_prog_new_fd(prog); 1710 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
1711 if (err < 0) { 1711 * effectively publicly exposed. However, retrieving via
1712 /* failed to allocate fd. 1712 * bpf_prog_get_fd_by_id() will take another reference,
1713 * bpf_prog_put() is needed because the above 1713 * therefore it cannot be gone underneath us.
1714 * bpf_prog_alloc_id() has published the prog 1714 *
1715 * to the userspace and the userspace may 1715 * Only for the time /after/ successful bpf_prog_new_fd()
1716 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1716 * and before returning to userspace, we might just hold
1717 */ 1717 * one reference and any parallel close on that fd could
1718 bpf_prog_put(prog); 1718 * rip everything out. Hence, below notifications must
1719 return err; 1719 * happen before bpf_prog_new_fd().
1720 } 1720 *
1721 1721 * Also, any failure handling from this point onwards must
1722 * be using bpf_prog_put() given the program is exposed.
1723 */
1722 bpf_prog_kallsyms_add(prog); 1724 bpf_prog_kallsyms_add(prog);
1723 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1725 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
1726
1727 err = bpf_prog_new_fd(prog);
1728 if (err < 0)
1729 bpf_prog_put(prog);
1724 return err; 1730 return err;
1725 1731
1726free_used_maps: 1732free_used_maps:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 10c0ff93f52b..16d66bd7af09 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
985 reg->smax_value = S64_MAX; 985 reg->smax_value = S64_MAX;
986 reg->umin_value = 0; 986 reg->umin_value = 0;
987 reg->umax_value = U64_MAX; 987 reg->umax_value = U64_MAX;
988
989 /* constant backtracking is enabled for root only for now */
990 reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
991} 988}
992 989
993/* Mark a register as having a completely unknown (scalar) value. */ 990/* Mark a register as having a completely unknown (scalar) value. */
@@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
1014 __mark_reg_not_init(regs + regno); 1011 __mark_reg_not_init(regs + regno);
1015 return; 1012 return;
1016 } 1013 }
1017 __mark_reg_unknown(regs + regno); 1014 regs += regno;
1015 __mark_reg_unknown(regs);
1016 /* constant backtracking is enabled for root without bpf2bpf calls */
1017 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1018 true : false;
1018} 1019}
1019 1020
1020static void __mark_reg_not_init(struct bpf_reg_state *reg) 1021static void __mark_reg_not_init(struct bpf_reg_state *reg)
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 2bd410f934b3..69cfb4345388 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
230 */ 230 */
231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) 231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
232{ 232{
233 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 233 size_t count = size >> PAGE_SHIFT;
234 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
235 size_t align = get_order(PAGE_ALIGN(size));
236 struct page *page = NULL; 234 struct page *page = NULL;
237 struct cma *cma = NULL; 235 struct cma *cma = NULL;
238 236
@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
243 241
244 /* CMA can be used only in the context which permits sleeping */ 242 /* CMA can be used only in the context which permits sleeping */
245 if (cma && gfpflags_allow_blocking(gfp)) { 243 if (cma && gfpflags_allow_blocking(gfp)) {
244 size_t align = get_order(size);
246 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); 245 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
247 246
248 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); 247 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
249 } 248 }
250 249
251 /* Fallback allocation of normal pages */
252 if (!page)
253 page = alloc_pages_node(node, gfp, align);
254 return page; 250 return page;
255} 251}
256 252
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 795c9b095d75..706113c6bebc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
87{ 87{
88 size_t alloc_size = PAGE_ALIGN(size);
89 int node = dev_to_node(dev);
88 struct page *page = NULL; 90 struct page *page = NULL;
89 u64 phys_mask; 91 u64 phys_mask;
90 92
@@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
95 gfp &= ~__GFP_ZERO; 97 gfp &= ~__GFP_ZERO;
96 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 98 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
97 &phys_mask); 99 &phys_mask);
100 page = dma_alloc_contiguous(dev, alloc_size, gfp);
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102 dma_free_contiguous(dev, page, alloc_size);
103 page = NULL;
104 }
98again: 105again:
99 page = dma_alloc_contiguous(dev, size, gfp); 106 if (!page)
107 page = alloc_pages_node(node, gfp, get_order(alloc_size));
100 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
101 dma_free_contiguous(dev, page, size); 109 dma_free_contiguous(dev, page, size);
102 page = NULL; 110 page = NULL;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9484e88dabc2..9be995fc3c5a 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
295 } 295 }
296} 296}
297 297
298static void irq_sysfs_del(struct irq_desc *desc)
299{
300 /*
301 * If irq_sysfs_init() has not yet been invoked (early boot), then
302 * irq_kobj_base is NULL and the descriptor was never added.
303 * kobject_del() complains about a object with no parent, so make
304 * it conditional.
305 */
306 if (irq_kobj_base)
307 kobject_del(&desc->kobj);
308}
309
298static int __init irq_sysfs_init(void) 310static int __init irq_sysfs_init(void)
299{ 311{
300 struct irq_desc *desc; 312 struct irq_desc *desc;
@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
325}; 337};
326 338
327static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 339static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
340static void irq_sysfs_del(struct irq_desc *desc) {}
328 341
329#endif /* CONFIG_SYSFS */ 342#endif /* CONFIG_SYSFS */
330 343
@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
438 * The sysfs entry must be serialized against a concurrent 451 * The sysfs entry must be serialized against a concurrent
439 * irq_sysfs_init() as well. 452 * irq_sysfs_init() as well.
440 */ 453 */
441 kobject_del(&desc->kobj); 454 irq_sysfs_del(desc);
442 delete_irq_desc(irq); 455 delete_irq_desc(irq);
443 456
444 /* 457 /*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..d9770a5393c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
470 */ 470 */
471static void do_optimize_kprobes(void) 471static void do_optimize_kprobes(void)
472{ 472{
473 lockdep_assert_held(&text_mutex);
473 /* 474 /*
474 * The optimization/unoptimization refers online_cpus via 475 * The optimization/unoptimization refers online_cpus via
475 * stop_machine() and cpu-hotplug modifies online_cpus. 476 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
487 list_empty(&optimizing_list)) 488 list_empty(&optimizing_list))
488 return; 489 return;
489 490
490 mutex_lock(&text_mutex);
491 arch_optimize_kprobes(&optimizing_list); 491 arch_optimize_kprobes(&optimizing_list);
492 mutex_unlock(&text_mutex);
493} 492}
494 493
495/* 494/*
@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
500{ 499{
501 struct optimized_kprobe *op, *tmp; 500 struct optimized_kprobe *op, *tmp;
502 501
502 lockdep_assert_held(&text_mutex);
503 /* See comment in do_optimize_kprobes() */ 503 /* See comment in do_optimize_kprobes() */
504 lockdep_assert_cpus_held(); 504 lockdep_assert_cpus_held();
505 505
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
507 if (list_empty(&unoptimizing_list)) 507 if (list_empty(&unoptimizing_list))
508 return; 508 return;
509 509
510 mutex_lock(&text_mutex);
511 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 510 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
512 /* Loop free_list for disarming */ 511 /* Loop free_list for disarming */
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 512 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
524 } else 523 } else
525 list_del_init(&op->list); 524 list_del_init(&op->list);
526 } 525 }
527 mutex_unlock(&text_mutex);
528} 526}
529 527
530/* Reclaim all kprobes on the free_list */ 528/* Reclaim all kprobes on the free_list */
@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
556{ 554{
557 mutex_lock(&kprobe_mutex); 555 mutex_lock(&kprobe_mutex);
558 cpus_read_lock(); 556 cpus_read_lock();
557 mutex_lock(&text_mutex);
559 /* Lock modules while optimizing kprobes */ 558 /* Lock modules while optimizing kprobes */
560 mutex_lock(&module_mutex); 559 mutex_lock(&module_mutex);
561 560
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
583 do_free_cleaned_kprobes(); 582 do_free_cleaned_kprobes();
584 583
585 mutex_unlock(&module_mutex); 584 mutex_unlock(&module_mutex);
585 mutex_unlock(&text_mutex);
586 cpus_read_unlock(); 586 cpus_read_unlock();
587 mutex_unlock(&kprobe_mutex); 587 mutex_unlock(&kprobe_mutex);
588 588
diff --git a/kernel/module.c b/kernel/module.c
index 5933395af9a0..9ee93421269c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -65,9 +65,9 @@
65/* 65/*
66 * Modules' sections will be aligned on page boundaries 66 * Modules' sections will be aligned on page boundaries
67 * to ensure complete separation of code and data, but 67 * to ensure complete separation of code and data, but
68 * only when CONFIG_STRICT_MODULE_RWX=y 68 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
69 */ 69 */
70#ifdef CONFIG_STRICT_MODULE_RWX 70#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
71# define debug_align(X) ALIGN(X, PAGE_SIZE) 71# define debug_align(X) ALIGN(X, PAGE_SIZE)
72#else 72#else
73# define debug_align(X) (X) 73# define debug_align(X) (X)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b037f195473..010d578118d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3904,7 +3904,7 @@ void __noreturn do_task_dead(void)
3904 3904
3905static inline void sched_submit_work(struct task_struct *tsk) 3905static inline void sched_submit_work(struct task_struct *tsk)
3906{ 3906{
3907 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3907 if (!tsk->state)
3908 return; 3908 return;
3909 3909
3910 /* 3910 /*
@@ -3920,6 +3920,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
3920 preempt_enable_no_resched(); 3920 preempt_enable_no_resched();
3921 } 3921 }
3922 3922
3923 if (tsk_is_pi_blocked(tsk))
3924 return;
3925
3923 /* 3926 /*
3924 * If we are going to sleep and we have plugged IO queued, 3927 * If we are going to sleep and we have plugged IO queued,
3925 * make sure to submit it to avoid deadlocks. 3928 * make sure to submit it to avoid deadlocks.
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 23fbbcc414d5..6e52b67b420e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132 */ 1132 */
1133 if (kworker_to_destroy) { 1133 if (kworker_to_destroy) {
1134 /*
1135 * After the RCU grace period has expired, the worker
1136 * can no longer be found through group->poll_kworker.
1137 * But it might have been already scheduled before
1138 * that - deschedule it cleanly before destroying it.
1139 */
1134 kthread_cancel_delayed_work_sync(&group->poll_work); 1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1135 kthread_destroy_worker(kworker_to_destroy); 1143 kthread_destroy_worker(kworker_to_destroy);
1136 } 1144 }
1137 kfree(t); 1145 kfree(t);
diff --git a/kernel/signal.c b/kernel/signal.c
index e667be6907d7..534fec266a33 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -90,6 +90,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true; 91 return true;
92 92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
93 return sig_handler_ignored(handler, sig); 98 return sig_handler_ignored(handler, sig);
94} 99}
95 100
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d911c8470149..ca69290bee2a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -146,6 +146,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
147{ 147{
148 tk->offs_boot = ktime_add(tk->offs_boot, delta); 148 tk->offs_boot = ktime_add(tk->offs_boot, delta);
149 /*
150 * Timespec representation for VDSO update to avoid 64bit division
151 * on every update.
152 */
153 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
149} 154}
150 155
151/* 156/*
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 8cf3596a4ce6..4bc37ac3bb05 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata,
17 struct timekeeper *tk) 17 struct timekeeper *tk)
18{ 18{
19 struct vdso_timestamp *vdso_ts; 19 struct vdso_timestamp *vdso_ts;
20 u64 nsec; 20 u64 nsec, sec;
21 21
22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; 22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; 23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask;
@@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata,
45 } 45 }
46 vdso_ts->nsec = nsec; 46 vdso_ts->nsec = nsec;
47 47
48 /* CLOCK_MONOTONIC_RAW */ 48 /* Copy MONOTONIC time for BOOTTIME */
49 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 49 sec = vdso_ts->sec;
50 vdso_ts->sec = tk->raw_sec; 50 /* Add the boot offset */
51 vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 51 sec += tk->monotonic_to_boot.tv_sec;
52 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
52 53
53 /* CLOCK_BOOTTIME */ 54 /* CLOCK_BOOTTIME */
54 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; 55 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
55 vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 56 vdso_ts->sec = sec;
56 nsec = tk->tkr_mono.xtime_nsec; 57
57 nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
58 ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
59 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 58 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
60 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); 59 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
61 vdso_ts->sec++; 60 vdso_ts->sec++;
62 } 61 }
63 vdso_ts->nsec = nsec; 62 vdso_ts->nsec = nsec;
64 63
64 /* CLOCK_MONOTONIC_RAW */
65 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
66 vdso_ts->sec = tk->raw_sec;
67 vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
68
65 /* CLOCK_TAI */ 69 /* CLOCK_TAI */
66 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; 70 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
67 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; 71 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 738065f765ab..de1f15969e27 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -32,6 +32,7 @@
32#include <linux/shmem_fs.h> 32#include <linux/shmem_fs.h>
33#include <linux/oom.h> 33#include <linux/oom.h>
34#include <linux/numa.h> 34#include <linux/numa.h>
35#include <linux/page_owner.h>
35 36
36#include <asm/tlb.h> 37#include <asm/tlb.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -2516,6 +2517,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2516 } 2517 }
2517 2518
2518 ClearPageCompound(head); 2519 ClearPageCompound(head);
2520
2521 split_page_owner(head, HPAGE_PMD_ORDER);
2522
2519 /* See comment in __split_huge_page_tail() */ 2523 /* See comment in __split_huge_page_tail() */
2520 if (PageAnon(head)) { 2524 if (PageAnon(head)) {
2521 /* Additional pin to swap cache */ 2525 /* Additional pin to swap cache */
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 2277b82902d8..95d16a42db6b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -407,8 +407,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
407 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 407 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 return shadow_byte < 0 || 408 return shadow_byte < 0 ||
409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410 else 410
411 return tag != (u8)shadow_byte; 411 /* else CONFIG_KASAN_SW_TAGS: */
412 if ((u8)shadow_byte == KASAN_TAG_INVALID)
413 return true;
414 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415 return true;
416
417 return false;
412} 418}
413 419
414static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 420static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6f5c0c517c49..26e2999af608 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3260,6 +3260,60 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3260 } 3260 }
3261} 3261}
3262 3262
3263static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3264{
3265 unsigned long stat[MEMCG_NR_STAT];
3266 struct mem_cgroup *mi;
3267 int node, cpu, i;
3268
3269 for (i = 0; i < MEMCG_NR_STAT; i++)
3270 stat[i] = 0;
3271
3272 for_each_online_cpu(cpu)
3273 for (i = 0; i < MEMCG_NR_STAT; i++)
3274 stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
3275
3276 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3277 for (i = 0; i < MEMCG_NR_STAT; i++)
3278 atomic_long_add(stat[i], &mi->vmstats[i]);
3279
3280 for_each_node(node) {
3281 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3282 struct mem_cgroup_per_node *pi;
3283
3284 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3285 stat[i] = 0;
3286
3287 for_each_online_cpu(cpu)
3288 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3289 stat[i] += raw_cpu_read(
3290 pn->lruvec_stat_cpu->count[i]);
3291
3292 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3293 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3294 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3295 }
3296}
3297
3298static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3299{
3300 unsigned long events[NR_VM_EVENT_ITEMS];
3301 struct mem_cgroup *mi;
3302 int cpu, i;
3303
3304 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3305 events[i] = 0;
3306
3307 for_each_online_cpu(cpu)
3308 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3309 events[i] += raw_cpu_read(
3310 memcg->vmstats_percpu->events[i]);
3311
3312 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3313 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3314 atomic_long_add(events[i], &mi->vmevents[i]);
3315}
3316
3263#ifdef CONFIG_MEMCG_KMEM 3317#ifdef CONFIG_MEMCG_KMEM
3264static int memcg_online_kmem(struct mem_cgroup *memcg) 3318static int memcg_online_kmem(struct mem_cgroup *memcg)
3265{ 3319{
@@ -4682,6 +4736,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4682{ 4736{
4683 int node; 4737 int node;
4684 4738
4739 /*
4740 * Flush percpu vmstats and vmevents to guarantee the value correctness
4741 * on parent's and all ancestor levels.
4742 */
4743 memcg_flush_percpu_vmstats(memcg);
4744 memcg_flush_percpu_vmevents(memcg);
4685 for_each_node(node) 4745 for_each_node(node)
4686 free_mem_cgroup_per_node_info(memcg, node); 4746 free_mem_cgroup_per_node_info(memcg, node);
4687 free_percpu(memcg->vmstats_percpu); 4747 free_percpu(memcg->vmstats_percpu);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 272c6de1bf4e..9c9194959271 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone,
2238 unsigned int order; 2238 unsigned int order;
2239 int pages_moved = 0; 2239 int pages_moved = 0;
2240 2240
2241#ifndef CONFIG_HOLES_IN_ZONE
2242 /*
2243 * page_zone is not safe to call in this context when
2244 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2245 * anyway as we check zone boundaries in move_freepages_block().
2246 * Remove at a later date when no bug reports exist related to
2247 * grouping pages by mobility
2248 */
2249 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2250 pfn_valid(page_to_pfn(end_page)) &&
2251 page_zone(start_page) != page_zone(end_page));
2252#endif
2253 for (page = start_page; page <= end_page;) { 2241 for (page = start_page; page <= end_page;) {
2254 if (!pfn_valid_within(page_to_pfn(page))) { 2242 if (!pfn_valid_within(page_to_pfn(page))) {
2255 page++; 2243 page++;
2256 continue; 2244 continue;
2257 } 2245 }
2258 2246
2259 /* Make sure we are not inadvertently changing nodes */
2260 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2261
2262 if (!PageBuddy(page)) { 2247 if (!PageBuddy(page)) {
2263 /* 2248 /*
2264 * We assume that pages that could be isolated for 2249 * We assume that pages that could be isolated for
@@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone,
2273 continue; 2258 continue;
2274 } 2259 }
2275 2260
2261 /* Make sure we are not inadvertently changing nodes */
2262 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2263 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2264
2276 order = page_order(page); 2265 order = page_order(page);
2277 move_to_free_area(page, &zone->free_area[order], migratetype); 2266 move_to_free_area(page, &zone->free_area[order], migratetype);
2278 page += 1 << order; 2267 page += 1 << order;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index ed19d98c9dcd..e31cd9bd4ed5 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/wait.h>
44#include <linux/zpool.h> 45#include <linux/zpool.h>
45#include <linux/magic.h> 46#include <linux/magic.h>
46 47
@@ -145,6 +146,8 @@ struct z3fold_header {
145 * @release_wq: workqueue for safe page release 146 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release 147 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem 148 * @inode: inode for z3fold pseudo filesystem
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
148 * 151 *
149 * This structure is allocated at pool creation time and maintains metadata 152 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool. 153 * pertaining to a particular z3fold pool.
@@ -163,8 +166,11 @@ struct z3fold_pool {
163 const struct zpool_ops *zpool_ops; 166 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq; 167 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq; 168 struct workqueue_struct *release_wq;
169 struct wait_queue_head isolate_wait;
166 struct work_struct work; 170 struct work_struct work;
167 struct inode *inode; 171 struct inode *inode;
172 bool destroying;
173 int isolated;
168}; 174};
169 175
170/* 176/*
@@ -769,6 +775,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
769 goto out_c; 775 goto out_c;
770 spin_lock_init(&pool->lock); 776 spin_lock_init(&pool->lock);
771 spin_lock_init(&pool->stale_lock); 777 spin_lock_init(&pool->stale_lock);
778 init_waitqueue_head(&pool->isolate_wait);
772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 779 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
773 if (!pool->unbuddied) 780 if (!pool->unbuddied)
774 goto out_pool; 781 goto out_pool;
@@ -808,6 +815,15 @@ out:
808 return NULL; 815 return NULL;
809} 816}
810 817
818static bool pool_isolated_are_drained(struct z3fold_pool *pool)
819{
820 bool ret;
821
822 spin_lock(&pool->lock);
823 ret = pool->isolated == 0;
824 spin_unlock(&pool->lock);
825 return ret;
826}
811/** 827/**
812 * z3fold_destroy_pool() - destroys an existing z3fold pool 828 * z3fold_destroy_pool() - destroys an existing z3fold pool
813 * @pool: the z3fold pool to be destroyed 829 * @pool: the z3fold pool to be destroyed
@@ -817,6 +833,22 @@ out:
817static void z3fold_destroy_pool(struct z3fold_pool *pool) 833static void z3fold_destroy_pool(struct z3fold_pool *pool)
818{ 834{
819 kmem_cache_destroy(pool->c_handle); 835 kmem_cache_destroy(pool->c_handle);
836 /*
837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
840 */
841
842 spin_lock(&pool->lock);
843 pool->destroying = true;
844 spin_unlock(&pool->lock);
845
846 /*
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
849 * workqueues.
850 */
851 wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
820 852
821 /* 853 /*
822 * We need to destroy pool->compact_wq before pool->release_wq, 854 * We need to destroy pool->compact_wq before pool->release_wq,
@@ -1307,6 +1339,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1307 return atomic64_read(&pool->pages_nr); 1339 return atomic64_read(&pool->pages_nr);
1308} 1340}
1309 1341
1342/*
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1344 */
1345static void z3fold_dec_isolated(struct z3fold_pool *pool)
1346{
1347 assert_spin_locked(&pool->lock);
1348 VM_BUG_ON(pool->isolated <= 0);
1349 pool->isolated--;
1350
1351 /*
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1354 */
1355 if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
1356 wake_up_all(&pool->isolate_wait);
1357}
1358
1359static void z3fold_inc_isolated(struct z3fold_pool *pool)
1360{
1361 pool->isolated++;
1362}
1363
1310static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1364static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1311{ 1365{
1312 struct z3fold_header *zhdr; 1366 struct z3fold_header *zhdr;
@@ -1333,6 +1387,33 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1333 spin_lock(&pool->lock); 1387 spin_lock(&pool->lock);
1334 if (!list_empty(&page->lru)) 1388 if (!list_empty(&page->lru))
1335 list_del(&page->lru); 1389 list_del(&page->lru);
1390 /*
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1393 * proceed.
1394 */
1395 if (unlikely(pool->destroying)) {
1396 spin_unlock(&pool->lock);
1397 /*
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1401 */
1402 if (unlikely(kref_put(&zhdr->refcount,
1403 release_z3fold_page_locked))) {
1404 /*
1405 * If we get here we have kref problems, so we
1406 * should freak out.
1407 */
1408 WARN(1, "Z3fold is experiencing kref problems\n");
1409 return false;
1410 }
1411 z3fold_page_unlock(zhdr);
1412 return false;
1413 }
1414
1415
1416 z3fold_inc_isolated(pool);
1336 spin_unlock(&pool->lock); 1417 spin_unlock(&pool->lock);
1337 z3fold_page_unlock(zhdr); 1418 z3fold_page_unlock(zhdr);
1338 return true; 1419 return true;
@@ -1401,6 +1482,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
1401 1482
1402 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1483 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1403 1484
1485 spin_lock(&pool->lock);
1486 z3fold_dec_isolated(pool);
1487 spin_unlock(&pool->lock);
1488
1404 page_mapcount_reset(page); 1489 page_mapcount_reset(page);
1405 put_page(page); 1490 put_page(page);
1406 return 0; 1491 return 0;
@@ -1420,10 +1505,14 @@ static void z3fold_page_putback(struct page *page)
1420 INIT_LIST_HEAD(&page->lru); 1505 INIT_LIST_HEAD(&page->lru);
1421 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1506 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1422 atomic64_dec(&pool->pages_nr); 1507 atomic64_dec(&pool->pages_nr);
1508 spin_lock(&pool->lock);
1509 z3fold_dec_isolated(pool);
1510 spin_unlock(&pool->lock);
1423 return; 1511 return;
1424 } 1512 }
1425 spin_lock(&pool->lock); 1513 spin_lock(&pool->lock);
1426 list_add(&page->lru, &pool->lru); 1514 list_add(&page->lru, &pool->lru);
1515 z3fold_dec_isolated(pool);
1427 spin_unlock(&pool->lock); 1516 spin_unlock(&pool->lock);
1428 z3fold_page_unlock(zhdr); 1517 z3fold_page_unlock(zhdr);
1429} 1518}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 57fbb7ced69f..08def3a0d200 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -54,6 +54,7 @@
54#include <linux/mount.h> 54#include <linux/mount.h>
55#include <linux/pseudo_fs.h> 55#include <linux/pseudo_fs.h>
56#include <linux/migrate.h> 56#include <linux/migrate.h>
57#include <linux/wait.h>
57#include <linux/pagemap.h> 58#include <linux/pagemap.h>
58#include <linux/fs.h> 59#include <linux/fs.h>
59 60
@@ -268,6 +269,10 @@ struct zs_pool {
268#ifdef CONFIG_COMPACTION 269#ifdef CONFIG_COMPACTION
269 struct inode *inode; 270 struct inode *inode;
270 struct work_struct free_work; 271 struct work_struct free_work;
272 /* A wait queue for when migration races with async_free_zspage() */
273 struct wait_queue_head migration_wait;
274 atomic_long_t isolated_pages;
275 bool destroying;
271#endif 276#endif
272}; 277};
273 278
@@ -1862,6 +1867,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
1862 zspage->isolated--; 1867 zspage->isolated--;
1863} 1868}
1864 1869
1870static void putback_zspage_deferred(struct zs_pool *pool,
1871 struct size_class *class,
1872 struct zspage *zspage)
1873{
1874 enum fullness_group fg;
1875
1876 fg = putback_zspage(class, zspage);
1877 if (fg == ZS_EMPTY)
1878 schedule_work(&pool->free_work);
1879
1880}
1881
1882static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1883{
1884 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1885 atomic_long_dec(&pool->isolated_pages);
1886 /*
1887 * There's no possibility of racing, since wait_for_isolated_drain()
1888 * checks the isolated count under &class->lock after enqueuing
1889 * on migration_wait.
1890 */
1891 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1892 wake_up_all(&pool->migration_wait);
1893}
1894
1865static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1895static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1866 struct page *newpage, struct page *oldpage) 1896 struct page *newpage, struct page *oldpage)
1867{ 1897{
@@ -1931,6 +1961,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1931 */ 1961 */
1932 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { 1962 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1933 get_zspage_mapping(zspage, &class_idx, &fullness); 1963 get_zspage_mapping(zspage, &class_idx, &fullness);
1964 atomic_long_inc(&pool->isolated_pages);
1934 remove_zspage(class, zspage, fullness); 1965 remove_zspage(class, zspage, fullness);
1935 } 1966 }
1936 1967
@@ -2030,8 +2061,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2030 * Page migration is done so let's putback isolated zspage to 2061 * Page migration is done so let's putback isolated zspage to
2031 * the list if @page is final isolated subpage in the zspage. 2062 * the list if @page is final isolated subpage in the zspage.
2032 */ 2063 */
2033 if (!is_zspage_isolated(zspage)) 2064 if (!is_zspage_isolated(zspage)) {
2034 putback_zspage(class, zspage); 2065 /*
2066 * We cannot race with zs_destroy_pool() here because we wait
2067 * for isolation to hit zero before we start destroying.
2068 * Also, we ensure that everyone can see pool->destroying before
2069 * we start waiting.
2070 */
2071 putback_zspage_deferred(pool, class, zspage);
2072 zs_pool_dec_isolated(pool);
2073 }
2035 2074
2036 reset_page(page); 2075 reset_page(page);
2037 put_page(page); 2076 put_page(page);
@@ -2077,13 +2116,12 @@ static void zs_page_putback(struct page *page)
2077 spin_lock(&class->lock); 2116 spin_lock(&class->lock);
2078 dec_zspage_isolation(zspage); 2117 dec_zspage_isolation(zspage);
2079 if (!is_zspage_isolated(zspage)) { 2118 if (!is_zspage_isolated(zspage)) {
2080 fg = putback_zspage(class, zspage);
2081 /* 2119 /*
2082 * Due to page_lock, we cannot free zspage immediately 2120 * Due to page_lock, we cannot free zspage immediately
2083 * so let's defer. 2121 * so let's defer.
2084 */ 2122 */
2085 if (fg == ZS_EMPTY) 2123 putback_zspage_deferred(pool, class, zspage);
2086 schedule_work(&pool->free_work); 2124 zs_pool_dec_isolated(pool);
2087 } 2125 }
2088 spin_unlock(&class->lock); 2126 spin_unlock(&class->lock);
2089} 2127}
@@ -2107,8 +2145,36 @@ static int zs_register_migration(struct zs_pool *pool)
2107 return 0; 2145 return 0;
2108} 2146}
2109 2147
2148static bool pool_isolated_are_drained(struct zs_pool *pool)
2149{
2150 return atomic_long_read(&pool->isolated_pages) == 0;
2151}
2152
2153/* Function for resolving migration */
2154static void wait_for_isolated_drain(struct zs_pool *pool)
2155{
2156
2157 /*
2158 * We're in the process of destroying the pool, so there are no
2159 * active allocations. zs_page_isolate() fails for completely free
2160 * zspages, so we need only wait for the zs_pool's isolated
2161 * count to hit zero.
2162 */
2163 wait_event(pool->migration_wait,
2164 pool_isolated_are_drained(pool));
2165}
2166
2110static void zs_unregister_migration(struct zs_pool *pool) 2167static void zs_unregister_migration(struct zs_pool *pool)
2111{ 2168{
2169 pool->destroying = true;
2170 /*
2171 * We need a memory barrier here to ensure global visibility of
2172 * pool->destroying. Thus pool->isolated pages will either be 0 in which
2173 * case we don't care, or it will be > 0 and pool->destroying will
2174 * ensure that we wake up once isolation hits 0.
2175 */
2176 smp_mb();
2177 wait_for_isolated_drain(pool); /* This can block */
2112 flush_work(&pool->free_work); 2178 flush_work(&pool->free_work);
2113 iput(pool->inode); 2179 iput(pool->inode);
2114} 2180}
@@ -2346,6 +2412,8 @@ struct zs_pool *zs_create_pool(const char *name)
2346 if (!pool->name) 2412 if (!pool->name)
2347 goto err; 2413 goto err;
2348 2414
2415 init_waitqueue_head(&pool->migration_wait);
2416
2349 if (create_cache(pool)) 2417 if (create_cache(pool))
2350 goto err; 2418 goto err;
2351 2419
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 6f08fd122a8d..7e052d6f759b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
164{ 164{
165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); 165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
166 166
167 return attr ? nla_get_u32(attr) : 0; 167 return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
168} 168}
169 169
170/** 170/**
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c8177a89f52c..4096d8a74a2b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
221 return NF_DROP; 221 return NF_DROP;
222 } 222 }
223 223
224 ADD_COUNTER(*(counter_base + i), 1, skb->len); 224 ADD_COUNTER(*(counter_base + i), skb->len, 1);
225 225
226 /* these should only watch: not modify, nor tell us 226 /* these should only watch: not modify, nor tell us
227 * what to do with the packet 227 * what to do with the packet
@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
959 continue; 959 continue;
960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
961 for (i = 0; i < nentries; i++) 961 for (i = 0; i < nentries; i++)
962 ADD_COUNTER(counters[i], counter_base[i].pcnt, 962 ADD_COUNTER(counters[i], counter_base[i].bcnt,
963 counter_base[i].bcnt); 963 counter_base[i].pcnt);
964 } 964 }
965} 965}
966 966
@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
1280 1280
1281 /* we add to the counters of the first cpu */ 1281 /* we add to the counters of the first cpu */
1282 for (i = 0; i < num_counters; i++) 1282 for (i = 0; i < num_counters; i++)
1283 ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); 1283 ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
1284 1284
1285 write_unlock_bh(&t->lock); 1285 write_unlock_bh(&t->lock);
1286 ret = 0; 1286 ret = 0;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0b2df09b2554..78ae6e8c953d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1496,7 +1496,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1496 struct ceph_osds up, acting; 1496 struct ceph_osds up, acting;
1497 bool force_resend = false; 1497 bool force_resend = false;
1498 bool unpaused = false; 1498 bool unpaused = false;
1499 bool legacy_change; 1499 bool legacy_change = false;
1500 bool split = false; 1500 bool split = false;
1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1502 bool recovery_deletes = ceph_osdmap_flag(osdc, 1502 bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1584,15 +1584,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1584 t->osd = acting.primary; 1584 t->osd = acting.primary;
1585 } 1585 }
1586 1586
1587 if (unpaused || legacy_change || force_resend || 1587 if (unpaused || legacy_change || force_resend || split)
1588 (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1589 RESEND_ON_SPLIT)))
1590 ct_res = CALC_TARGET_NEED_RESEND; 1588 ct_res = CALC_TARGET_NEED_RESEND;
1591 else 1589 else
1592 ct_res = CALC_TARGET_NO_ACTION; 1590 ct_res = CALC_TARGET_NO_ACTION;
1593 1591
1594out: 1592out:
1595 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); 1593 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1594 legacy_change, force_resend, split, ct_res, t->osd);
1596 return ct_res; 1595 return ct_res;
1597} 1596}
1598 1597
diff --git a/net/core/filter.c b/net/core/filter.c
index 0c1059cdad3d..b91988f8b94e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8841,13 +8841,13 @@ sk_reuseport_is_valid_access(int off, int size,
8841 return size == size_default; 8841 return size == size_default;
8842 8842
8843 /* Fields that allow narrowing */ 8843 /* Fields that allow narrowing */
8844 case offsetof(struct sk_reuseport_md, eth_protocol): 8844 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8845 if (size < FIELD_SIZEOF(struct sk_buff, protocol)) 8845 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8846 return false; 8846 return false;
8847 /* fall through */ 8847 /* fall through */
8848 case offsetof(struct sk_reuseport_md, ip_protocol): 8848 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8849 case offsetof(struct sk_reuseport_md, bind_inany): 8849 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8850 case offsetof(struct sk_reuseport_md, len): 8850 case bpf_ctx_range(struct sk_reuseport_md, len):
8851 bpf_ctx_record_field_size(info, size_default); 8851 bpf_ctx_record_field_size(info, size_default);
8852 return bpf_ctx_narrow_access_ok(off, size, size_default); 8852 return bpf_ctx_narrow_access_ok(off, size, size_default);
8853 8853
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9741b593ea53..7c09d87d3269 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
142 mutex_unlock(&flow_dissector_mutex); 142 mutex_unlock(&flow_dissector_mutex);
143 return -ENOENT; 143 return -ENOENT;
144 } 144 }
145 bpf_prog_put(attached);
146 RCU_INIT_POINTER(net->flow_dissector_prog, NULL); 145 RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
146 bpf_prog_put(attached);
147 mutex_unlock(&flow_dissector_mutex); 147 mutex_unlock(&flow_dissector_mutex);
148 return 0; 148 return 0;
149} 149}
diff --git a/net/core/sock.c b/net/core/sock.c
index 6d08553f885c..545fac19a711 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3287,16 +3287,17 @@ static __init int net_inuse_init(void)
3287 3287
3288core_initcall(net_inuse_init); 3288core_initcall(net_inuse_init);
3289 3289
3290static void assign_proto_idx(struct proto *prot) 3290static int assign_proto_idx(struct proto *prot)
3291{ 3291{
3292 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3292 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3293 3293
3294 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3294 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3295 pr_err("PROTO_INUSE_NR exhausted\n"); 3295 pr_err("PROTO_INUSE_NR exhausted\n");
3296 return; 3296 return -ENOSPC;
3297 } 3297 }
3298 3298
3299 set_bit(prot->inuse_idx, proto_inuse_idx); 3299 set_bit(prot->inuse_idx, proto_inuse_idx);
3300 return 0;
3300} 3301}
3301 3302
3302static void release_proto_idx(struct proto *prot) 3303static void release_proto_idx(struct proto *prot)
@@ -3305,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
3305 clear_bit(prot->inuse_idx, proto_inuse_idx); 3306 clear_bit(prot->inuse_idx, proto_inuse_idx);
3306} 3307}
3307#else 3308#else
3308static inline void assign_proto_idx(struct proto *prot) 3309static inline int assign_proto_idx(struct proto *prot)
3309{ 3310{
3311 return 0;
3310} 3312}
3311 3313
3312static inline void release_proto_idx(struct proto *prot) 3314static inline void release_proto_idx(struct proto *prot)
@@ -3355,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
3355 3357
3356int proto_register(struct proto *prot, int alloc_slab) 3358int proto_register(struct proto *prot, int alloc_slab)
3357{ 3359{
3360 int ret = -ENOBUFS;
3361
3358 if (alloc_slab) { 3362 if (alloc_slab) {
3359 prot->slab = kmem_cache_create_usercopy(prot->name, 3363 prot->slab = kmem_cache_create_usercopy(prot->name,
3360 prot->obj_size, 0, 3364 prot->obj_size, 0,
@@ -3391,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
3391 } 3395 }
3392 3396
3393 mutex_lock(&proto_list_mutex); 3397 mutex_lock(&proto_list_mutex);
3398 ret = assign_proto_idx(prot);
3399 if (ret) {
3400 mutex_unlock(&proto_list_mutex);
3401 goto out_free_timewait_sock_slab_name;
3402 }
3394 list_add(&prot->node, &proto_list); 3403 list_add(&prot->node, &proto_list);
3395 assign_proto_idx(prot);
3396 mutex_unlock(&proto_list_mutex); 3404 mutex_unlock(&proto_list_mutex);
3397 return 0; 3405 return ret;
3398 3406
3399out_free_timewait_sock_slab_name: 3407out_free_timewait_sock_slab_name:
3400 kfree(prot->twsk_prot->twsk_slab_name); 3408 if (alloc_slab && prot->twsk_prot)
3409 kfree(prot->twsk_prot->twsk_slab_name);
3401out_free_request_sock_slab: 3410out_free_request_sock_slab:
3402 req_prot_cleanup(prot->rsk_prot); 3411 if (alloc_slab) {
3412 req_prot_cleanup(prot->rsk_prot);
3403 3413
3404 kmem_cache_destroy(prot->slab); 3414 kmem_cache_destroy(prot->slab);
3405 prot->slab = NULL; 3415 prot->slab = NULL;
3416 }
3406out: 3417out:
3407 return -ENOBUFS; 3418 return ret;
3408} 3419}
3409EXPORT_SYMBOL(proto_register); 3420EXPORT_SYMBOL(proto_register);
3410 3421
diff --git a/net/core/stream.c b/net/core/stream.c
index e94bb02a5629..4f1d4aa5fb38 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
120 int err = 0; 120 int err = 0;
121 long vm_wait = 0; 121 long vm_wait = 0;
122 long current_timeo = *timeo_p; 122 long current_timeo = *timeo_p;
123 bool noblock = (*timeo_p ? false : true);
124 DEFINE_WAIT_FUNC(wait, woken_wake_function); 123 DEFINE_WAIT_FUNC(wait, woken_wake_function);
125 124
126 if (sk_stream_memory_free(sk)) 125 if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
133 132
134 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 133 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
135 goto do_error; 134 goto do_error;
136 if (!*timeo_p) { 135 if (!*timeo_p)
137 if (noblock) 136 goto do_eagain;
138 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
139 goto do_nonblock;
140 }
141 if (signal_pending(current)) 137 if (signal_pending(current))
142 goto do_interrupted; 138 goto do_interrupted;
143 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 139 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
169do_error: 165do_error:
170 err = -EPIPE; 166 err = -EPIPE;
171 goto out; 167 goto out;
172do_nonblock: 168do_eagain:
169 /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
170 * be generated later.
171 * When TCP receives ACK packets that make room, tcp_check_space()
172 * only calls tcp_new_space() if SOCK_NOSPACE is set.
173 */
174 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
173 err = -EAGAIN; 175 err = -EAGAIN;
174 goto out; 176 goto out;
175do_interrupted: 177do_interrupted:
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index dacbd58e1799..badc5cfe4dc6 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
1092 1092
1093static int __init af_ieee802154_init(void) 1093static int __init af_ieee802154_init(void)
1094{ 1094{
1095 int rc = -EINVAL; 1095 int rc;
1096 1096
1097 rc = proto_register(&ieee802154_raw_prot, 1); 1097 rc = proto_register(&ieee802154_raw_prot, 1);
1098 if (rc) 1098 if (rc)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b2b3d291ab0..1ab2fb6bb37d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
2145 2145
2146 if (filter->dump_exceptions) { 2146 if (filter->dump_exceptions) {
2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, 2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
2148 &i_fa, s_fa); 2148 &i_fa, s_fa, flags);
2149 if (err < 0) 2149 if (err < 0)
2150 goto stop; 2150 goto stop;
2151 } 2151 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1510e951f451..4298aae74e0e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
582 582
583 if (!rt) 583 if (!rt)
584 goto out; 584 goto out;
585 net = dev_net(rt->dst.dev); 585
586 if (rt->dst.dev)
587 net = dev_net(rt->dst.dev);
588 else if (skb_in->dev)
589 net = dev_net(skb_in->dev);
590 else
591 goto out;
586 592
587 /* 593 /*
588 * Find the original header. It is expected to be valid, of course. 594 * Find the original header. It is expected to be valid, of course.
@@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
902 return false; 908 return false;
903 } 909 }
904 910
905 icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); 911 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
906 return true; 912 return true;
907} 913}
908 914
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 180f6896b98b..480d0b22db1a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
1475 1475
1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1477{ 1477{
1478 __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); 1478 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
1479} 1479}
1480EXPORT_SYMBOL(ip_mc_inc_group); 1480EXPORT_SYMBOL(ip_mc_inc_group);
1481 1481
@@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2197 iml->sflist = NULL; 2197 iml->sflist = NULL;
2198 iml->sfmode = mode; 2198 iml->sfmode = mode;
2199 rcu_assign_pointer(inet->mc_list, iml); 2199 rcu_assign_pointer(inet->mc_list, iml);
2200 __ip_mc_inc_group(in_dev, addr, mode); 2200 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
2201 err = 0; 2201 err = 0;
2202done: 2202done:
2203 return err; 2203 return err;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 517300d587a7..b6a6f18c3dd1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
2728/* called with rcu_read_lock held */ 2728/* called with rcu_read_lock held */
2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2731 struct sk_buff *skb, u32 portid, u32 seq) 2731 struct sk_buff *skb, u32 portid, u32 seq,
2732 unsigned int flags)
2732{ 2733{
2733 struct rtmsg *r; 2734 struct rtmsg *r;
2734 struct nlmsghdr *nlh; 2735 struct nlmsghdr *nlh;
@@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2736 u32 error; 2737 u32 error;
2737 u32 metrics[RTAX_MAX]; 2738 u32 metrics[RTAX_MAX];
2738 2739
2739 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2740 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2740 if (!nlh) 2741 if (!nlh)
2741 return -EMSGSIZE; 2742 return -EMSGSIZE;
2742 2743
@@ -2860,7 +2861,7 @@ nla_put_failure:
2860static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 2861static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2861 struct netlink_callback *cb, u32 table_id, 2862 struct netlink_callback *cb, u32 table_id,
2862 struct fnhe_hash_bucket *bucket, int genid, 2863 struct fnhe_hash_bucket *bucket, int genid,
2863 int *fa_index, int fa_start) 2864 int *fa_index, int fa_start, unsigned int flags)
2864{ 2865{
2865 int i; 2866 int i;
2866 2867
@@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2891 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 2892 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2892 table_id, NULL, skb, 2893 table_id, NULL, skb,
2893 NETLINK_CB(cb->skb).portid, 2894 NETLINK_CB(cb->skb).portid,
2894 cb->nlh->nlmsg_seq); 2895 cb->nlh->nlmsg_seq, flags);
2895 if (err) 2896 if (err)
2896 return err; 2897 return err;
2897next: 2898next:
@@ -2904,7 +2905,7 @@ next:
2904 2905
2905int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 2906int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2906 u32 table_id, struct fib_info *fi, 2907 u32 table_id, struct fib_info *fi,
2907 int *fa_index, int fa_start) 2908 int *fa_index, int fa_start, unsigned int flags)
2908{ 2909{
2909 struct net *net = sock_net(cb->skb->sk); 2910 struct net *net = sock_net(cb->skb->sk);
2910 int nhsel, genid = fnhe_genid(net); 2911 int nhsel, genid = fnhe_genid(net);
@@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2922 err = 0; 2923 err = 0;
2923 if (bucket) 2924 if (bucket)
2924 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 2925 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2925 genid, fa_index, fa_start); 2926 genid, fa_index, fa_start,
2927 flags);
2926 rcu_read_unlock(); 2928 rcu_read_unlock();
2927 if (err) 2929 if (err)
2928 return err; 2930 return err;
@@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3183 fl4.flowi4_tos, res.fi, 0); 3185 fl4.flowi4_tos, res.fi, 0);
3184 } else { 3186 } else {
3185 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3187 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3186 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 3188 NETLINK_CB(in_skb).portid,
3189 nlh->nlmsg_seq, 0);
3187 } 3190 }
3188 if (err < 0) 3191 if (err < 0)
3189 goto errout_rcu; 3192 goto errout_rcu;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index dc73888c7859..6a576ff92c39 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
478 if (!idev) { 478 if (!idev) {
479 idev = ipv6_add_dev(dev); 479 idev = ipv6_add_dev(dev);
480 if (IS_ERR(idev)) 480 if (IS_ERR(idev))
481 return NULL; 481 return idev;
482 } 482 }
483 483
484 if (dev->flags&IFF_UP) 484 if (dev->flags&IFF_UP)
@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1045 int err = 0; 1045 int err = 0;
1046 1046
1047 if (addr_type == IPV6_ADDR_ANY || 1047 if (addr_type == IPV6_ADDR_ANY ||
1048 addr_type & IPV6_ADDR_MULTICAST || 1048 (addr_type & IPV6_ADDR_MULTICAST &&
1049 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1049 (!(idev->dev->flags & IFF_LOOPBACK) && 1050 (!(idev->dev->flags & IFF_LOOPBACK) &&
1050 !netif_is_l3_master(idev->dev) && 1051 !netif_is_l3_master(idev->dev) &&
1051 addr_type & IPV6_ADDR_LOOPBACK)) 1052 addr_type & IPV6_ADDR_LOOPBACK))
@@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2465 ASSERT_RTNL(); 2466 ASSERT_RTNL();
2466 2467
2467 idev = ipv6_find_idev(dev); 2468 idev = ipv6_find_idev(dev);
2468 if (!idev) 2469 if (IS_ERR(idev))
2469 return ERR_PTR(-ENOBUFS); 2470 return idev;
2470 2471
2471 if (idev->cnf.disable_ipv6) 2472 if (idev->cnf.disable_ipv6)
2472 return ERR_PTR(-EACCES); 2473 return ERR_PTR(-EACCES);
@@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
3158 ASSERT_RTNL(); 3159 ASSERT_RTNL();
3159 3160
3160 idev = ipv6_find_idev(dev); 3161 idev = ipv6_find_idev(dev);
3161 if (!idev) { 3162 if (IS_ERR(idev)) {
3162 pr_debug("%s: add_dev failed\n", __func__); 3163 pr_debug("%s: add_dev failed\n", __func__);
3163 return; 3164 return;
3164 } 3165 }
@@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
3373 */ 3374 */
3374 3375
3375 idev = ipv6_find_idev(dev); 3376 idev = ipv6_find_idev(dev);
3376 if (!idev) { 3377 if (IS_ERR(idev)) {
3377 pr_debug("%s: add_dev failed\n", __func__); 3378 pr_debug("%s: add_dev failed\n", __func__);
3378 return; 3379 return;
3379 } 3380 }
@@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
3398 ASSERT_RTNL(); 3399 ASSERT_RTNL();
3399 3400
3400 idev = ipv6_find_idev(dev); 3401 idev = ipv6_find_idev(dev);
3401 if (!idev) { 3402 if (IS_ERR(idev)) {
3402 pr_debug("%s: add_dev failed\n", __func__); 3403 pr_debug("%s: add_dev failed\n", __func__);
3403 return; 3404 return;
3404 } 3405 }
@@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4772 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; 4773 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4773 4774
4774 idev = ipv6_find_idev(dev); 4775 idev = ipv6_find_idev(dev);
4775 if (!idev) 4776 if (IS_ERR(idev))
4776 return -ENOBUFS; 4777 return PTR_ERR(idev);
4777 4778
4778 if (!ipv6_allow_optimistic_dad(net, idev)) 4779 if (!ipv6_allow_optimistic_dad(net, idev))
4779 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; 4780 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ed56b0c6fe19..7c6edb7c5f10 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1549,6 +1549,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1549 if (!is_valid_ether_addr(mac)) 1549 if (!is_valid_ether_addr(mac))
1550 return -EINVAL; 1550 return -EINVAL;
1551 1551
1552 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
1553 sdata->vif.type == NL80211_IFTYPE_STATION &&
1554 !sdata->u.mgd.associated)
1555 return -EINVAL;
1556
1552 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 1557 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
1553 if (!sta) 1558 if (!sta)
1554 return -ENOMEM; 1559 return -ENOMEM;
@@ -1556,10 +1561,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1556 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1561 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1557 sta->sta.tdls = true; 1562 sta->sta.tdls = true;
1558 1563
1559 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1560 !sdata->u.mgd.associated)
1561 return -EINVAL;
1562
1563 err = sta_apply_parameters(local, sta, params); 1564 err = sta_apply_parameters(local, sta, params);
1564 if (err) { 1565 if (err) {
1565 sta_info_free(local, sta); 1566 sta_info_free(local, sta);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index d25e91d7bdc1..44b675016393 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
133 mpls_stats_inc_outucastpkts(out_dev, skb); 133 mpls_stats_inc_outucastpkts(out_dev, skb);
134 134
135 if (rt) { 135 if (rt) {
136 if (rt->rt_gw_family == AF_INET) 136 if (rt->rt_gw_family == AF_INET6)
137 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
138 skb);
139 else if (rt->rt_gw_family == AF_INET6)
140 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, 137 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
141 skb); 138 skb);
139 else
140 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
141 skb);
142 } else if (rt6) { 142 } else if (rt6) {
143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { 143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
144 /* 6PE (RFC 4798) */ 144 /* 6PE (RFC 4798) */
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 5c3fad8cba57..0187e65176c0 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
54 checksum = ncsi_calculate_checksum((unsigned char *)h, 54 checksum = ncsi_calculate_checksum((unsigned char *)h,
55 sizeof(*h) + nca->payload); 55 sizeof(*h) + nca->payload);
56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + 56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
57 nca->payload); 57 ALIGN(nca->payload, 4));
58 *pchecksum = htonl(checksum); 58 *pchecksum = htonl(checksum);
59} 59}
60 60
@@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
309 309
310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) 310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
311{ 311{
312 struct ncsi_cmd_handler *nch = NULL;
312 struct ncsi_request *nr; 313 struct ncsi_request *nr;
314 unsigned char type;
313 struct ethhdr *eh; 315 struct ethhdr *eh;
314 struct ncsi_cmd_handler *nch = NULL;
315 int i, ret; 316 int i, ret;
316 317
318 /* Use OEM generic handler for Netlink request */
319 if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
320 type = NCSI_PKT_CMD_OEM;
321 else
322 type = nca->type;
323
317 /* Search for the handler */ 324 /* Search for the handler */
318 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) { 325 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
319 if (ncsi_cmd_handlers[i].type == nca->type) { 326 if (ncsi_cmd_handlers[i].type == type) {
320 if (ncsi_cmd_handlers[i].handler) 327 if (ncsi_cmd_handlers[i].handler)
321 nch = &ncsi_cmd_handlers[i]; 328 nch = &ncsi_cmd_handlers[i];
322 else 329 else
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 5254004f2b42..dacabff9c467 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || 47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { 48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
49 netdev_dbg(nr->ndp->ndev.dev, 49 netdev_dbg(nr->ndp->ndev.dev,
50 "NCSI: non zero response/reason code\n"); 50 "NCSI: non zero response/reason code %04xh, %04xh\n",
51 ntohs(h->code), ntohs(h->reason));
51 return -EPERM; 52 return -EPERM;
52 } 53 }
53 54
@@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
55 * sender doesn't support checksum according to NCSI 56 * sender doesn't support checksum according to NCSI
56 * specification. 57 * specification.
57 */ 58 */
58 pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); 59 pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
59 if (ntohl(*pchecksum) == 0) 60 if (ntohl(*pchecksum) == 0)
60 return 0; 61 return 0;
61 62
@@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
63 sizeof(*h) + payload - 4); 64 sizeof(*h) + payload - 4);
64 65
65 if (*pchecksum != htonl(checksum)) { 66 if (*pchecksum != htonl(checksum)) {
66 netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n"); 67 netdev_dbg(nr->ndp->ndev.dev,
68 "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
69 *pchecksum, htonl(checksum));
67 return -EINVAL; 70 return -EINVAL;
68 } 71 }
69 72
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 060a4ed46d5e..01705ad74a9a 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
149 return nft_chain_validate_hooks(ctx->chain, hook_mask); 149 return nft_chain_validate_hooks(ctx->chain, hook_mask);
150} 150}
151 151
152static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
153 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
154 .len = NFT_NAME_MAXLEN - 1 },
155};
156
152static int nft_flow_offload_init(const struct nft_ctx *ctx, 157static int nft_flow_offload_init(const struct nft_ctx *ctx,
153 const struct nft_expr *expr, 158 const struct nft_expr *expr,
154 const struct nlattr * const tb[]) 159 const struct nlattr * const tb[])
@@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
207static struct nft_expr_type nft_flow_offload_type __read_mostly = { 212static struct nft_expr_type nft_flow_offload_type __read_mostly = {
208 .name = "flow_offload", 213 .name = "flow_offload",
209 .ops = &nft_flow_offload_ops, 214 .ops = &nft_flow_offload_ops,
215 .policy = nft_flow_offload_policy,
210 .maxattr = NFTA_FLOW_MAX, 216 .maxattr = NFTA_FLOW_MAX,
211 .owner = THIS_MODULE, 217 .owner = THIS_MODULE,
212}; 218};
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index d0ab1adf5bff..5aab6df74e0f 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
54 nfnl_acct_put(info->nfacct); 54 nfnl_acct_put(info->nfacct);
55} 55}
56 56
57static struct xt_match nfacct_mt_reg __read_mostly = { 57static struct xt_match nfacct_mt_reg[] __read_mostly = {
58 .name = "nfacct", 58 {
59 .family = NFPROTO_UNSPEC, 59 .name = "nfacct",
60 .checkentry = nfacct_mt_checkentry, 60 .revision = 0,
61 .match = nfacct_mt, 61 .family = NFPROTO_UNSPEC,
62 .destroy = nfacct_mt_destroy, 62 .checkentry = nfacct_mt_checkentry,
63 .matchsize = sizeof(struct xt_nfacct_match_info), 63 .match = nfacct_mt,
64 .usersize = offsetof(struct xt_nfacct_match_info, nfacct), 64 .destroy = nfacct_mt_destroy,
65 .me = THIS_MODULE, 65 .matchsize = sizeof(struct xt_nfacct_match_info),
66 .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
67 .me = THIS_MODULE,
68 },
69 {
70 .name = "nfacct",
71 .revision = 1,
72 .family = NFPROTO_UNSPEC,
73 .checkentry = nfacct_mt_checkentry,
74 .match = nfacct_mt,
75 .destroy = nfacct_mt_destroy,
76 .matchsize = sizeof(struct xt_nfacct_match_info_v1),
77 .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
78 .me = THIS_MODULE,
79 },
66}; 80};
67 81
68static int __init nfacct_mt_init(void) 82static int __init nfacct_mt_init(void)
69{ 83{
70 return xt_register_match(&nfacct_mt_reg); 84 return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
71} 85}
72 86
73static void __exit nfacct_mt_exit(void) 87static void __exit nfacct_mt_exit(void)
74{ 88{
75 xt_unregister_match(&nfacct_mt_reg); 89 xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
76} 90}
77 91
78module_init(nfacct_mt_init); 92module_init(nfacct_mt_init);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 848c6eb55064..d8da6477d6be 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
67 struct md_mark mark; 67 struct md_mark mark;
68 struct md_labels labels; 68 struct md_labels labels;
69 char timeout[CTNL_TIMEOUT_NAME_MAX]; 69 char timeout[CTNL_TIMEOUT_NAME_MAX];
70 struct nf_ct_timeout *nf_ct_timeout;
70#if IS_ENABLED(CONFIG_NF_NAT) 71#if IS_ENABLED(CONFIG_NF_NAT)
71 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
72#endif 73#endif
@@ -697,6 +698,14 @@ static bool skb_nfct_cached(struct net *net,
697 if (help && rcu_access_pointer(help->helper) != info->helper) 698 if (help && rcu_access_pointer(help->helper) != info->helper)
698 return false; 699 return false;
699 } 700 }
701 if (info->nf_ct_timeout) {
702 struct nf_conn_timeout *timeout_ext;
703
704 timeout_ext = nf_ct_timeout_find(ct);
705 if (!timeout_ext || info->nf_ct_timeout !=
706 rcu_dereference(timeout_ext->timeout))
707 return false;
708 }
700 /* Force conntrack entry direction to the current packet? */ 709 /* Force conntrack entry direction to the current packet? */
701 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 710 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
702 /* Delete the conntrack entry if confirmed, else just release 711 /* Delete the conntrack entry if confirmed, else just release
@@ -1565,7 +1574,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1565 case OVS_CT_ATTR_TIMEOUT: 1574 case OVS_CT_ATTR_TIMEOUT:
1566 memcpy(info->timeout, nla_data(a), nla_len(a)); 1575 memcpy(info->timeout, nla_data(a), nla_len(a));
1567 if (!memchr(info->timeout, '\0', nla_len(a))) { 1576 if (!memchr(info->timeout, '\0', nla_len(a))) {
1568 OVS_NLERR(log, "Invalid conntrack helper"); 1577 OVS_NLERR(log, "Invalid conntrack timeout");
1569 return -EINVAL; 1578 return -EINVAL;
1570 } 1579 }
1571 break; 1580 break;
@@ -1657,6 +1666,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1657 ct_info.timeout)) 1666 ct_info.timeout))
1658 pr_info_ratelimited("Failed to associated timeout " 1667 pr_info_ratelimited("Failed to associated timeout "
1659 "policy `%s'\n", ct_info.timeout); 1668 "policy `%s'\n", ct_info.timeout);
1669 else
1670 ct_info.nf_ct_timeout = rcu_dereference(
1671 nf_ct_timeout_find(ct_info.ct)->timeout);
1672
1660 } 1673 }
1661 1674
1662 if (helper) { 1675 if (helper) {
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ec05d91aa9a2..45acab2de0cf 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
291 void *buffer) 291 void *buffer)
292{ 292{
293 struct rds_info_rdma_connection *iinfo = buffer; 293 struct rds_info_rdma_connection *iinfo = buffer;
294 struct rds_ib_connection *ic; 294 struct rds_ib_connection *ic = conn->c_transport_data;
295 295
296 /* We will only ever look at IB transports */ 296 /* We will only ever look at IB transports */
297 if (conn->c_trans != &rds_ib_transport) 297 if (conn->c_trans != &rds_ib_transport)
@@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
301 301
302 iinfo->src_addr = conn->c_laddr.s6_addr32[3]; 302 iinfo->src_addr = conn->c_laddr.s6_addr32[3];
303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; 303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
304 iinfo->tos = conn->c_tos; 304 if (ic) {
305 iinfo->tos = conn->c_tos;
306 iinfo->sl = ic->i_sl;
307 }
305 308
306 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); 309 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
307 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); 310 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
308 if (rds_conn_state(conn) == RDS_CONN_UP) { 311 if (rds_conn_state(conn) == RDS_CONN_UP) {
309 struct rds_ib_device *rds_ibdev; 312 struct rds_ib_device *rds_ibdev;
310 313
311 ic = conn->c_transport_data;
312
313 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, 314 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
314 (union ib_gid *)&iinfo->dst_gid); 315 (union ib_gid *)&iinfo->dst_gid);
315 316
@@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
329 void *buffer) 330 void *buffer)
330{ 331{
331 struct rds6_info_rdma_connection *iinfo6 = buffer; 332 struct rds6_info_rdma_connection *iinfo6 = buffer;
332 struct rds_ib_connection *ic; 333 struct rds_ib_connection *ic = conn->c_transport_data;
333 334
334 /* We will only ever look at IB transports */ 335 /* We will only ever look at IB transports */
335 if (conn->c_trans != &rds_ib_transport) 336 if (conn->c_trans != &rds_ib_transport)
@@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
337 338
338 iinfo6->src_addr = conn->c_laddr; 339 iinfo6->src_addr = conn->c_laddr;
339 iinfo6->dst_addr = conn->c_faddr; 340 iinfo6->dst_addr = conn->c_faddr;
341 if (ic) {
342 iinfo6->tos = conn->c_tos;
343 iinfo6->sl = ic->i_sl;
344 }
340 345
341 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); 346 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
342 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); 347 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
344 if (rds_conn_state(conn) == RDS_CONN_UP) { 349 if (rds_conn_state(conn) == RDS_CONN_UP) {
345 struct rds_ib_device *rds_ibdev; 350 struct rds_ib_device *rds_ibdev;
346 351
347 ic = conn->c_transport_data;
348 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, 352 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
349 (union ib_gid *)&iinfo6->dst_gid); 353 (union ib_gid *)&iinfo6->dst_gid);
350 rds_ibdev = ic->rds_ibdev; 354 rds_ibdev = ic->rds_ibdev;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 303c6ee8bdb7..f2b558e8b5ea 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -220,6 +220,7 @@ struct rds_ib_connection {
220 /* Send/Recv vectors */ 220 /* Send/Recv vectors */
221 int i_scq_vector; 221 int i_scq_vector;
222 int i_rcq_vector; 222 int i_rcq_vector;
223 u8 i_sl;
223}; 224};
224 225
225/* This assumes that atomic_t is at least 32 bits */ 226/* This assumes that atomic_t is at least 32 bits */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fddaa09f7b0d..233f1368162b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
152 RDS_PROTOCOL_MINOR(conn->c_version), 152 RDS_PROTOCOL_MINOR(conn->c_version),
153 ic->i_flowctl ? ", flow control" : ""); 153 ic->i_flowctl ? ", flow control" : "");
154 154
155 /* receive sl from the peer */
156 ic->i_sl = ic->i_cm_id->route.path_rec->sl;
157
155 atomic_set(&ic->i_cq_quiesce, 0); 158 atomic_set(&ic->i_cq_quiesce, 0);
156 159
157 /* Init rings and fill recv. this needs to wait until protocol 160 /* Init rings and fill recv. this needs to wait until protocol
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9986d6065c4d..5f741e51b4ba 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
43static struct rdma_cm_id *rds6_rdma_listen_id; 43static struct rdma_cm_id *rds6_rdma_listen_id;
44#endif 44#endif
45 45
46/* Per IB specification 7.7.3, service level is a 4-bit field. */
47#define TOS_TO_SL(tos) ((tos) & 0xF)
48
46static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, 49static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
47 struct rdma_cm_event *event, 50 struct rdma_cm_event *event,
48 bool isv6) 51 bool isv6)
@@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
97 struct rds_ib_connection *ibic; 100 struct rds_ib_connection *ibic;
98 101
99 ibic = conn->c_transport_data; 102 ibic = conn->c_transport_data;
100 if (ibic && ibic->i_cm_id == cm_id) 103 if (ibic && ibic->i_cm_id == cm_id) {
104 cm_id->route.path_rec[0].sl =
105 TOS_TO_SL(conn->c_tos);
101 ret = trans->cm_initiate_connect(cm_id, isv6); 106 ret = trans->cm_initiate_connect(cm_id, isv6);
102 else 107 } else {
103 rds_conn_drop(conn); 108 rds_conn_drop(conn);
109 }
104 } 110 }
105 break; 111 break;
106 112
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index f0de323d15d6..6c8f09c1ce51 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
76 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 struct smc_connection *conn = &smc->conn; 77 struct smc_connection *conn = &smc->conn;
78 struct sock *sk = &smc->sk; 78 struct sock *sk = &smc->sk;
79 bool noblock;
80 long timeo; 79 long timeo;
81 int rc = 0; 80 int rc = 0;
82 81
83 /* similar to sk_stream_wait_memory */ 82 /* similar to sk_stream_wait_memory */
84 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
85 noblock = timeo ? false : true;
86 add_wait_queue(sk_sleep(sk), &wait); 84 add_wait_queue(sk_sleep(sk), &wait);
87 while (1) { 85 while (1) {
88 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
97 break; 95 break;
98 } 96 }
99 if (!timeo) { 97 if (!timeo) {
100 if (noblock) 98 /* ensure EPOLLOUT is subsequently generated */
101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
102 rc = -EAGAIN; 100 rc = -EAGAIN;
103 break; 101 break;
104 } 102 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d8679b6027e9..a07b516e503a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task)
1970static void 1970static void
1971call_bind_status(struct rpc_task *task) 1971call_bind_status(struct rpc_task *task)
1972{ 1972{
1973 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1973 int status = -EIO; 1974 int status = -EIO;
1974 1975
1975 if (rpc_task_transmitted(task)) { 1976 if (rpc_task_transmitted(task)) {
@@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task)
1977 return; 1978 return;
1978 } 1979 }
1979 1980
1980 if (task->tk_status >= 0) { 1981 dprint_status(task);
1981 dprint_status(task); 1982 trace_rpc_bind_status(task);
1983 if (task->tk_status >= 0)
1984 goto out_next;
1985 if (xprt_bound(xprt)) {
1982 task->tk_status = 0; 1986 task->tk_status = 0;
1983 task->tk_action = call_connect; 1987 goto out_next;
1984 return;
1985 } 1988 }
1986 1989
1987 trace_rpc_bind_status(task);
1988 switch (task->tk_status) { 1990 switch (task->tk_status) {
1989 case -ENOMEM: 1991 case -ENOMEM:
1990 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1992 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task)
2003 task->tk_rebind_retry--; 2005 task->tk_rebind_retry--;
2004 rpc_delay(task, 3*HZ); 2006 rpc_delay(task, 3*HZ);
2005 goto retry_timeout; 2007 goto retry_timeout;
2008 case -ENOBUFS:
2009 rpc_delay(task, HZ >> 2);
2010 goto retry_timeout;
2006 case -EAGAIN: 2011 case -EAGAIN:
2007 goto retry_timeout; 2012 goto retry_timeout;
2008 case -ETIMEDOUT: 2013 case -ETIMEDOUT:
@@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task)
2026 case -ENETDOWN: 2031 case -ENETDOWN:
2027 case -EHOSTUNREACH: 2032 case -EHOSTUNREACH:
2028 case -ENETUNREACH: 2033 case -ENETUNREACH:
2029 case -ENOBUFS:
2030 case -EPIPE: 2034 case -EPIPE:
2031 dprintk("RPC: %5u remote rpcbind unreachable: %d\n", 2035 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2032 task->tk_pid, task->tk_status); 2036 task->tk_pid, task->tk_status);
@@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task)
2043 2047
2044 rpc_call_rpcerror(task, status); 2048 rpc_call_rpcerror(task, status);
2045 return; 2049 return;
2046 2050out_next:
2051 task->tk_action = call_connect;
2052 return;
2047retry_timeout: 2053retry_timeout:
2048 task->tk_status = 0; 2054 task->tk_status = 0;
2049 task->tk_action = call_bind; 2055 task->tk_action = call_bind;
@@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task)
2090static void 2096static void
2091call_connect_status(struct rpc_task *task) 2097call_connect_status(struct rpc_task *task)
2092{ 2098{
2099 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2093 struct rpc_clnt *clnt = task->tk_client; 2100 struct rpc_clnt *clnt = task->tk_client;
2094 int status = task->tk_status; 2101 int status = task->tk_status;
2095 2102
@@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task)
2099 } 2106 }
2100 2107
2101 dprint_status(task); 2108 dprint_status(task);
2102
2103 trace_rpc_connect_status(task); 2109 trace_rpc_connect_status(task);
2110
2111 if (task->tk_status == 0) {
2112 clnt->cl_stats->netreconn++;
2113 goto out_next;
2114 }
2115 if (xprt_connected(xprt)) {
2116 task->tk_status = 0;
2117 goto out_next;
2118 }
2119
2104 task->tk_status = 0; 2120 task->tk_status = 0;
2105 switch (status) { 2121 switch (status) {
2106 case -ECONNREFUSED: 2122 case -ECONNREFUSED:
@@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task)
2117 case -ENETDOWN: 2133 case -ENETDOWN:
2118 case -ENETUNREACH: 2134 case -ENETUNREACH:
2119 case -EHOSTUNREACH: 2135 case -EHOSTUNREACH:
2120 case -EADDRINUSE:
2121 case -ENOBUFS:
2122 case -EPIPE: 2136 case -EPIPE:
2123 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2137 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2124 task->tk_rqstp->rq_connect_cookie); 2138 task->tk_rqstp->rq_connect_cookie);
@@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task)
2127 /* retry with existing socket, after a delay */ 2141 /* retry with existing socket, after a delay */
2128 rpc_delay(task, 3*HZ); 2142 rpc_delay(task, 3*HZ);
2129 /* fall through */ 2143 /* fall through */
2144 case -EADDRINUSE:
2130 case -ENOTCONN: 2145 case -ENOTCONN:
2131 case -EAGAIN: 2146 case -EAGAIN:
2132 case -ETIMEDOUT: 2147 case -ETIMEDOUT:
2133 goto out_retry; 2148 goto out_retry;
2134 case 0: 2149 case -ENOBUFS:
2135 clnt->cl_stats->netreconn++; 2150 rpc_delay(task, HZ >> 2);
2136 task->tk_action = call_transmit; 2151 goto out_retry;
2137 return;
2138 } 2152 }
2139 rpc_call_rpcerror(task, status); 2153 rpc_call_rpcerror(task, status);
2140 return; 2154 return;
2155out_next:
2156 task->tk_action = call_transmit;
2157 return;
2141out_retry: 2158out_retry:
2142 /* Check for timeouts before looping back to call_bind */ 2159 /* Check for timeouts before looping back to call_bind */
2143 task->tk_action = call_bind; 2160 task->tk_action = call_bind;
@@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task)
2365 case -ECONNABORTED: 2382 case -ECONNABORTED:
2366 case -ENOTCONN: 2383 case -ENOTCONN:
2367 rpc_force_rebind(clnt); 2384 rpc_force_rebind(clnt);
2368 /* fall through */ 2385 break;
2369 case -EADDRINUSE: 2386 case -EADDRINUSE:
2370 rpc_delay(task, 3*HZ); 2387 rpc_delay(task, 3*HZ);
2371 /* fall through */ 2388 /* fall through */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 783748dc5e6f..2e71f5455c6c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1408 status = -EBADMSG; 1408 status = -EBADMSG;
1409 goto out_dequeue; 1409 goto out_dequeue;
1410 } 1410 }
1411 if (task->tk_ops->rpc_call_prepare_transmit) {
1412 task->tk_ops->rpc_call_prepare_transmit(task,
1413 task->tk_calldata);
1414 status = task->tk_status;
1415 if (status < 0)
1416 goto out_dequeue;
1417 }
1418 if (RPC_SIGNALLED(task)) { 1411 if (RPC_SIGNALLED(task)) {
1419 status = -ERESTARTSYS; 1412 status = -ERESTARTSYS;
1420 goto out_dequeue; 1413 goto out_dequeue;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 646107af9f41..5311d0ae2454 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
2788 2788
2789 /* When last_request->processed becomes true this will be rescheduled */ 2789 /* When last_request->processed becomes true this will be rescheduled */
2790 if (lr && !lr->processed) { 2790 if (lr && !lr->processed) {
2791 reg_process_hint(lr); 2791 pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2792 return; 2792 return;
2793 } 2793 }
2794 2794
diff --git a/net/wireless/util.c b/net/wireless/util.c
index c99939067bb0..92cb2cbb179b 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -242,25 +242,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
242 242
243 switch (params->cipher) { 243 switch (params->cipher) {
244 case WLAN_CIPHER_SUITE_TKIP: 244 case WLAN_CIPHER_SUITE_TKIP:
245 /* Extended Key ID can only be used with CCMP/GCMP ciphers */
246 if ((pairwise && key_idx) ||
247 params->mode != NL80211_KEY_RX_TX)
248 return -EINVAL;
249 break;
245 case WLAN_CIPHER_SUITE_CCMP: 250 case WLAN_CIPHER_SUITE_CCMP:
246 case WLAN_CIPHER_SUITE_CCMP_256: 251 case WLAN_CIPHER_SUITE_CCMP_256:
247 case WLAN_CIPHER_SUITE_GCMP: 252 case WLAN_CIPHER_SUITE_GCMP:
248 case WLAN_CIPHER_SUITE_GCMP_256: 253 case WLAN_CIPHER_SUITE_GCMP_256:
249 /* IEEE802.11-2016 allows only 0 and - when using Extended Key 254 /* IEEE802.11-2016 allows only 0 and - when supporting
250 * ID - 1 as index for pairwise keys. 255 * Extended Key ID - 1 as index for pairwise keys.
251 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when 256 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
252 * the driver supports Extended Key ID. 257 * the driver supports Extended Key ID.
253 * @NL80211_KEY_SET_TX can't be set when installing and 258 * @NL80211_KEY_SET_TX can't be set when installing and
254 * validating a key. 259 * validating a key.
255 */ 260 */
256 if (params->mode == NL80211_KEY_NO_TX) { 261 if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
257 if (!wiphy_ext_feature_isset(&rdev->wiphy, 262 params->mode == NL80211_KEY_SET_TX)
258 NL80211_EXT_FEATURE_EXT_KEY_ID)) 263 return -EINVAL;
259 return -EINVAL; 264 if (wiphy_ext_feature_isset(&rdev->wiphy,
260 else if (!pairwise || key_idx < 0 || key_idx > 1) 265 NL80211_EXT_FEATURE_EXT_KEY_ID)) {
266 if (pairwise && (key_idx < 0 || key_idx > 1))
261 return -EINVAL; 267 return -EINVAL;
262 } else if ((pairwise && key_idx) || 268 } else if (pairwise && key_idx) {
263 params->mode == NL80211_KEY_SET_TX) {
264 return -EINVAL; 269 return -EINVAL;
265 } 270 }
266 break; 271 break;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a0607969f8c0..0e0062127124 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -375,7 +375,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
375 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 375 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
376 if (!umem->pages) { 376 if (!umem->pages) {
377 err = -ENOMEM; 377 err = -ENOMEM;
378 goto out_account; 378 goto out_pin;
379 } 379 }
380 380
381 for (i = 0; i < umem->npgs; i++) 381 for (i = 0; i < umem->npgs; i++)
@@ -383,6 +383,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
383 383
384 return 0; 384 return 0;
385 385
386out_pin:
387 xdp_umem_unpin_pages(umem);
386out_account: 388out_account:
387 xdp_umem_unaccount_pages(umem); 389 xdp_umem_unaccount_pages(umem);
388 return err; 390 return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ca637a72697..ec94f5795ea4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3269 struct flowi4 *fl4 = &fl->u.ip4; 3269 struct flowi4 *fl4 = &fl->u.ip4;
3270 int oif = 0; 3270 int oif = 0;
3271 3271
3272 if (skb_dst(skb)) 3272 if (skb_dst(skb) && skb_dst(skb)->dev)
3273 oif = skb_dst(skb)->dev->ifindex; 3273 oif = skb_dst(skb)->dev->ifindex;
3274 3274
3275 memset(fl4, 0, sizeof(struct flowi4)); 3275 memset(fl4, 0, sizeof(struct flowi4));
@@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3387 3387
3388 nexthdr = nh[nhoff]; 3388 nexthdr = nh[nhoff];
3389 3389
3390 if (skb_dst(skb)) 3390 if (skb_dst(skb) && skb_dst(skb)->dev)
3391 oif = skb_dst(skb)->dev->ifindex; 3391 oif = skb_dst(skb)->dev->ifindex;
3392 3392
3393 memset(fl6, 0, sizeof(struct flowi6)); 3393 memset(fl6, 0, sizeof(struct flowi6));
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 7737b2670064..6d9592f0ae1d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1835 if (cptr->type == USER_CLIENT) { 1835 if (cptr->type == USER_CLIENT) {
1836 info->input_pool = cptr->data.user.fifo_pool_size; 1836 info->input_pool = cptr->data.user.fifo_pool_size;
1837 info->input_free = info->input_pool; 1837 info->input_free = info->input_pool;
1838 if (cptr->data.user.fifo) 1838 info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1839 info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
1840 } else { 1839 } else {
1841 info->input_pool = 0; 1840 info->input_pool = 0;
1842 info->input_free = 0; 1841 info->input_free = 0;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index ea69261f269a..eaaa8b5830bb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
263 263
264 return 0; 264 return 0;
265} 265}
266
267/* get the number of unused cells safely */
268int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
269{
270 unsigned long flags;
271 int cells;
272
273 if (!f)
274 return 0;
275
276 snd_use_lock_use(&f->use_lock);
277 spin_lock_irqsave(&f->lock, flags);
278 cells = snd_seq_unused_cells(f->pool);
279 spin_unlock_irqrestore(&f->lock, flags);
280 snd_use_lock_free(&f->use_lock);
281 return cells;
282}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index edc68743943d..b56a7b897c9c 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
53/* resize pool in fifo */ 53/* resize pool in fifo */
54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize); 54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
55 55
56/* get the number of unused cells safely */
57int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
56 58
57#endif 59#endif
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9ea39348cdf5..7c6d1c277d4d 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
248 unsigned int channels = params_channels(hw_params); 248 unsigned int channels = params_channels(hw_params);
249 249
250 mutex_lock(&oxfw->mutex); 250 mutex_lock(&oxfw->mutex);
251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
252 rate, channels); 252 rate, channels);
253 if (err >= 0) 253 if (err >= 0)
254 ++oxfw->substreams_count; 254 ++oxfw->substreams_count;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0d51823d7270..6d1fb7c11f17 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1180 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 14298ef45b21..968d3caab6ac 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -611,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
611 611
612/* update LED status via GPIO */ 612/* update LED status via GPIO */
613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask, 613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
614 bool enabled) 614 bool led_on)
615{ 615{
616 struct conexant_spec *spec = codec->spec; 616 struct conexant_spec *spec = codec->spec;
617 unsigned int oldval = spec->gpio_led; 617 unsigned int oldval = spec->gpio_led;
618 618
619 if (spec->mute_led_polarity) 619 if (spec->mute_led_polarity)
620 enabled = !enabled; 620 led_on = !led_on;
621 621
622 if (enabled) 622 if (led_on)
623 spec->gpio_led &= ~mask;
624 else
625 spec->gpio_led |= mask; 623 spec->gpio_led |= mask;
624 else
625 spec->gpio_led &= ~mask;
626 codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
627 mask, led_on, spec->gpio_led);
626 if (spec->gpio_led != oldval) 628 if (spec->gpio_led != oldval)
627 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 629 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
628 spec->gpio_led); 630 spec->gpio_led);
@@ -633,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
633{ 635{
634 struct hda_codec *codec = private_data; 636 struct hda_codec *codec = private_data;
635 struct conexant_spec *spec = codec->spec; 637 struct conexant_spec *spec = codec->spec;
636 638 /* muted -> LED on */
637 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); 639 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
638} 640}
639 641
640/* turn on/off mic-mute LED via GPIO per capture hook */ 642/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -656,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
656 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 }, 658 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
657 {} 659 {}
658 }; 660 };
659 codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
660 661
661 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 662 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
662 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook; 663 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 2c03e0f6bf72..f70211e6b174 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
550 line6pcm->volume_monitor = 255; 550 line6pcm->volume_monitor = 255;
551 line6pcm->line6 = line6; 551 line6pcm->line6 = line6;
552 552
553 spin_lock_init(&line6pcm->out.lock);
554 spin_lock_init(&line6pcm->in.lock);
555 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
556
557 line6->line6pcm = line6pcm;
558
559 pcm->private_data = line6pcm;
560 pcm->private_free = line6_cleanup_pcm;
561
553 line6pcm->max_packet_size_in = 562 line6pcm->max_packet_size_in =
554 usb_maxpacket(line6->usbdev, 563 usb_maxpacket(line6->usbdev,
555 usb_rcvisocpipe(line6->usbdev, ep_read), 0); 564 usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
562 return -EINVAL; 571 return -EINVAL;
563 } 572 }
564 573
565 spin_lock_init(&line6pcm->out.lock);
566 spin_lock_init(&line6pcm->in.lock);
567 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
568
569 line6->line6pcm = line6pcm;
570
571 pcm->private_data = line6pcm;
572 pcm->private_free = line6_cleanup_pcm;
573
574 err = line6_create_audio_out_urbs(line6pcm); 574 err = line6_create_audio_out_urbs(line6pcm);
575 if (err < 0) 575 if (err < 0)
576 return err; 576 return err;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index b5927c3d5bc0..eceab19766db 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -739,7 +739,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
739 struct uac_mixer_unit_descriptor *desc) 739 struct uac_mixer_unit_descriptor *desc)
740{ 740{
741 int mu_channels; 741 int mu_channels;
742 void *c;
743 742
744 if (desc->bLength < sizeof(*desc)) 743 if (desc->bLength < sizeof(*desc))
745 return -EINVAL; 744 return -EINVAL;
@@ -762,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
762 break; 761 break;
763 } 762 }
764 763
765 if (!mu_channels)
766 return 0;
767
768 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
769 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
770 return 0; /* no bmControls -> skip */
771
772 return mu_channels; 764 return mu_channels;
773} 765}
774 766
@@ -2009,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
2009 * Mixer Unit 2001 * Mixer Unit
2010 */ 2002 */
2011 2003
2004/* check whether the given in/out overflows bmMixerControls matrix */
2005static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
2006 int protocol, int num_ins, int num_outs)
2007{
2008 u8 *hdr = (u8 *)desc;
2009 u8 *c = uac_mixer_unit_bmControls(desc, protocol);
2010 size_t rest; /* remaining bytes after bmMixerControls */
2011
2012 switch (protocol) {
2013 case UAC_VERSION_1:
2014 default:
2015 rest = 1; /* iMixer */
2016 break;
2017 case UAC_VERSION_2:
2018 rest = 2; /* bmControls + iMixer */
2019 break;
2020 case UAC_VERSION_3:
2021 rest = 6; /* bmControls + wMixerDescrStr */
2022 break;
2023 }
2024
2025 /* overflow? */
2026 return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
2027}
2028
2012/* 2029/*
2013 * build a mixer unit control 2030 * build a mixer unit control
2014 * 2031 *
@@ -2137,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2137 if (err < 0) 2154 if (err < 0)
2138 return err; 2155 return err;
2139 num_ins += iterm.channels; 2156 num_ins += iterm.channels;
2157 if (mixer_bitmap_overflow(desc, state->mixer->protocol,
2158 num_ins, num_outs))
2159 break;
2140 for (; ich < num_ins; ich++) { 2160 for (; ich < num_ins; ich++) {
2141 int och, ich_has_controls = 0; 2161 int och, ich_has_controls = 0;
2142 2162
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 199fa157a411..27dcb3743690 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
1155{ 1155{
1156 struct usb_mixer_interface *mixer; 1156 struct usb_mixer_interface *mixer;
1157 struct usb_mixer_elem_info *cval; 1157 struct usb_mixer_elem_info *cval;
1158 int unitid = 12; /* SamleRate ExtensionUnit ID */ 1158 int unitid = 12; /* SampleRate ExtensionUnit ID */
1159 1159
1160 list_for_each_entry(mixer, &chip->mixer_list, list) { 1160 list_for_each_entry(mixer, &chip->mixer_list, list) {
1161 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); 1161 if (mixer->id_elems[unitid]) {
1162 if (cval) { 1162 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, 1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
1164 cval->control << 8, 1164 cval->control << 8,
1165 samplerate_id); 1165 samplerate_id);
1166 snd_usb_mixer_notify_id(mixer, unitid); 1166 snd_usb_mixer_notify_id(mixer, unitid);
1167 break;
1167 } 1168 }
1168 break;
1169 } 1169 }
1170} 1170}
1171 1171
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 75b96929f76c..e4bbf79de956 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
339 ep = 0x81; 339 ep = 0x81;
340 ifnum = 2; 340 ifnum = 2;
341 goto add_sync_ep_from_ifnum; 341 goto add_sync_ep_from_ifnum;
342 case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
342 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ 343 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
343 ep = 0x81; 344 ep = 0x81;
344 ifnum = 1; 345 ifnum = 1;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 66f04a4846a5..43fdbbfe41bb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
363 if (fd < 0) 363 if (fd < 0)
364 return -1; 364 return -1;
365 365
366 return show_prog(fd); 366 err = show_prog(fd);
367 close(fd);
368 return err;
367 } 369 }
368 370
369 if (argc) 371 if (argc)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index f5597503c771..e9ef4ca6a655 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
809 int sn_offset = 0; 809 int sn_offset = 0;
810 int error = 0; 810 int error = 0;
811 char *buffer; 811 char *buffer;
812 struct hv_kvp_ipaddr_value *ip_buffer; 812 struct hv_kvp_ipaddr_value *ip_buffer = NULL;
813 char cidr_mask[5]; /* /xyz */ 813 char cidr_mask[5]; /* /xyz */
814 int weight; 814 int weight;
815 int i; 815 int i;
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 29001f944db7..d69c541e2039 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) 34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
35TEST_GEN_FILES = $(BPF_OBJ_FILES) 35TEST_GEN_FILES = $(BPF_OBJ_FILES)
36 36
37BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
38TEST_FILES = $(BTF_C_FILES)
39
37# Also test sub-register code-gen if LLVM has eBPF v3 processor support which 40# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
38# contains both ALU32 and JMP32 instructions. 41# contains both ALU32 and JMP32 instructions.
39SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ 42SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
@@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
68TEST_PROGS_EXTENDED := with_addr.sh \ 71TEST_PROGS_EXTENDED := with_addr.sh \
69 with_tunnels.sh \ 72 with_tunnels.sh \
70 tcp_client.py \ 73 tcp_client.py \
71 tcp_server.py 74 tcp_server.py \
75 test_xdp_vlan.sh
72 76
73# Compile but not part of 'make run_tests' 77# Compile but not part of 'make run_tests'
74TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 78TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index f7a0744db31e..5dc109f4c097 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
34CONFIG_MPLS_ROUTING=m 34CONFIG_MPLS_ROUTING=m
35CONFIG_MPLS_IPTUNNEL=m 35CONFIG_MPLS_IPTUNNEL=m
36CONFIG_IPV6_SIT=m 36CONFIG_IPV6_SIT=m
37CONFIG_BPF_JIT=y
diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c
index 8f850823d35f..6e75dd3cb14f 100644
--- a/tools/testing/selftests/bpf/test_btf_dump.c
+++ b/tools/testing/selftests/bpf/test_btf_dump.c
@@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
97 } 97 }
98 98
99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); 99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
100 if (access(test_file, R_OK) == -1)
101 /*
102 * When the test is run with O=, kselftest copies TEST_FILES
103 * without preserving the directory structure.
104 */
105 snprintf(test_file, sizeof(test_file), "%s.c",
106 test_case->name);
100 /* 107 /*
101 * Diff test output and expected test output, contained between 108 * Diff test output and expected test output, contained between
102 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. 109 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 2fc4625c1a15..655729004391 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -20,9 +20,9 @@ int main(int argc, char **argv)
20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
22 BPF_FUNC_get_local_storage), 22 BPF_FUNC_get_local_storage),
23 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 23 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), 24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
25 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 25 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
26 26
27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ 27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
@@ -30,7 +30,7 @@ int main(int argc, char **argv)
30 BPF_FUNC_get_local_storage), 30 BPF_FUNC_get_local_storage),
31 BPF_MOV64_IMM(BPF_REG_1, 1), 31 BPF_MOV64_IMM(BPF_REG_1, 1),
32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 33 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), 34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
36 BPF_EXIT_INSN(), 36 BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fb679ac3d4b0..0e6652733462 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_endian.h"
16#include "bpf_rlimit.h" 17#include "bpf_rlimit.h"
17#include "bpf_util.h" 18#include "bpf_util.h"
18 19
@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
232 /* if (ip == expected && port == expected) */ 233 /* if (ip == expected && port == expected) */
233 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 234 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
234 offsetof(struct bpf_sock, src_ip6[3])), 235 offsetof(struct bpf_sock, src_ip6[3])),
235 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), 236 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
237 __bpf_constant_ntohl(0x00000001), 4),
236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 238 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
237 offsetof(struct bpf_sock, src_port)), 239 offsetof(struct bpf_sock, src_port)),
238 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), 240 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
261 /* if (ip == expected && port == expected) */ 263 /* if (ip == expected && port == expected) */
262 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 264 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
263 offsetof(struct bpf_sock, src_ip4)), 265 offsetof(struct bpf_sock, src_ip4)),
264 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), 266 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
267 __bpf_constant_ntohl(0x7F000001), 4),
265 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 268 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
266 offsetof(struct bpf_sock, src_port)), 269 offsetof(struct bpf_sock, src_port)),
267 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), 270 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
index 4059014d93ea..4912d23844bc 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
220struct hv_enlightened_vmcs *current_evmcs; 220struct hv_enlightened_vmcs *current_evmcs;
221struct hv_vp_assist_page *current_vp_assist; 221struct hv_vp_assist_page *current_vp_assist;
222 222
223int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
224
223static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 225static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
224{ 226{
225 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 227 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6cb34a0fa200..0a5e487dbc50 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", 1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1061 r); 1061 r);
1062 1062
1063 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); 1063 if (kvm_check_cap(KVM_CAP_XCRS)) {
1064 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", 1064 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1065 r); 1065 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1066 r);
1067 }
1066 1068
1067 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); 1069 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1068 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", 1070 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1103 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", 1105 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1104 r); 1106 r);
1105 1107
1106 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); 1108 if (kvm_check_cap(KVM_CAP_XCRS)) {
1107 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", 1109 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1108 r); 1110 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1111 r);
1112 }
1109 1113
1110 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); 1114 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1111 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", 1115 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 204f847bd065..9cef0455b819 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -12,6 +12,26 @@
12 12
13bool enable_evmcs; 13bool enable_evmcs;
14 14
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{
17 uint16_t evmcs_ver;
18
19 struct kvm_enable_cap enable_evmcs_cap = {
20 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
21 .args[0] = (unsigned long)&evmcs_ver
22 };
23
24 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
25
26 /* KVM should return supported EVMCS version range */
27 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
28 (evmcs_ver & 0xff) > 0,
29 "Incorrect EVMCS version range: %x:%x\n",
30 evmcs_ver & 0xff, evmcs_ver >> 8);
31
32 return evmcs_ver;
33}
34
15/* Allocate memory regions for nested VMX tests. 35/* Allocate memory regions for nested VMX tests.
16 * 36 *
17 * Input Args: 37 * Input Args:
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index f95c08343b48..92915e6408e7 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -79,11 +79,6 @@ int main(int argc, char *argv[])
79 struct kvm_x86_state *state; 79 struct kvm_x86_state *state;
80 struct ucall uc; 80 struct ucall uc;
81 int stage; 81 int stage;
82 uint16_t evmcs_ver;
83 struct kvm_enable_cap enable_evmcs_cap = {
84 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
85 .args[0] = (unsigned long)&evmcs_ver
86 };
87 82
88 /* Create VM */ 83 /* Create VM */
89 vm = vm_create_default(VCPU_ID, 0, guest_code); 84 vm = vm_create_default(VCPU_ID, 0, guest_code);
@@ -96,13 +91,7 @@ int main(int argc, char *argv[])
96 exit(KSFT_SKIP); 91 exit(KSFT_SKIP);
97 } 92 }
98 93
99 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 94 vcpu_enable_evmcs(vm, VCPU_ID);
100
101 /* KVM should return supported EVMCS version range */
102 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
103 (evmcs_ver & 0xff) > 0,
104 "Incorrect EVMCS version range: %x:%x\n",
105 evmcs_ver & 0xff, evmcs_ver >> 8);
106 95
107 run = vcpu_state(vm, VCPU_ID); 96 run = vcpu_state(vm, VCPU_ID);
108 97
@@ -146,7 +135,7 @@ int main(int argc, char *argv[])
146 kvm_vm_restart(vm, O_RDWR); 135 kvm_vm_restart(vm, O_RDWR);
147 vm_vcpu_add(vm, VCPU_ID); 136 vm_vcpu_add(vm, VCPU_ID);
148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 137 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
149 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 138 vcpu_enable_evmcs(vm, VCPU_ID);
150 vcpu_load_state(vm, VCPU_ID, state); 139 vcpu_load_state(vm, VCPU_ID, state);
151 run = vcpu_state(vm, VCPU_ID); 140 run = vcpu_state(vm, VCPU_ID);
152 free(state); 141 free(state);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index f72b3043db0e..ee59831fbc98 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -18,6 +18,7 @@
18#include "test_util.h" 18#include "test_util.h"
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "processor.h" 20#include "processor.h"
21#include "vmx.h"
21 22
22#define VCPU_ID 0 23#define VCPU_ID 0
23 24
@@ -106,12 +107,7 @@ int main(int argc, char *argv[])
106{ 107{
107 struct kvm_vm *vm; 108 struct kvm_vm *vm;
108 int rv; 109 int rv;
109 uint16_t evmcs_ver;
110 struct kvm_cpuid2 *hv_cpuid_entries; 110 struct kvm_cpuid2 *hv_cpuid_entries;
111 struct kvm_enable_cap enable_evmcs_cap = {
112 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
113 .args[0] = (unsigned long)&evmcs_ver
114 };
115 111
116 /* Tell stdout not to buffer its content */ 112 /* Tell stdout not to buffer its content */
117 setbuf(stdout, NULL); 113 setbuf(stdout, NULL);
@@ -136,14 +132,14 @@ int main(int argc, char *argv[])
136 132
137 free(hv_cpuid_entries); 133 free(hv_cpuid_entries);
138 134
139 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 135 if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
140
141 if (rv) {
142 fprintf(stderr, 136 fprintf(stderr,
143 "Enlightened VMCS is unsupported, skip related test\n"); 137 "Enlightened VMCS is unsupported, skip related test\n");
144 goto vm_free; 138 goto vm_free;
145 } 139 }
146 140
141 vcpu_enable_evmcs(vm, VCPU_ID);
142
147 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); 143 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
148 if (!hv_cpuid_entries) 144 if (!hv_cpuid_entries)
149 return 1; 145 return 1;
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 40050e44ec0a..f9334bd3cce9 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -99,8 +99,8 @@ int main(int argc, char *argv[])
99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); 99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, 100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
102 test_msr_platform_info_disabled(vm);
103 test_msr_platform_info_enabled(vm); 102 test_msr_platform_info_enabled(vm);
103 test_msr_platform_info_disabled(vm);
104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); 104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
105 105
106 kvm_vm_free(vm); 106 kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index ed7218d166da..853e370e8a39 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -25,24 +25,17 @@
25#define VMCS12_REVISION 0x11e57ed0 25#define VMCS12_REVISION 0x11e57ed0
26#define VCPU_ID 5 26#define VCPU_ID 5
27 27
28bool have_evmcs;
29
28void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) 30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
29{ 31{
30 volatile struct kvm_run *run;
31
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); 32 vcpu_nested_state_set(vm, VCPU_ID, state, false);
33 run = vcpu_state(vm, VCPU_ID);
34 vcpu_run(vm, VCPU_ID);
35 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
36 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
37 run->exit_reason,
38 exit_reason_str(run->exit_reason));
39} 33}
40 34
41void test_nested_state_expect_errno(struct kvm_vm *vm, 35void test_nested_state_expect_errno(struct kvm_vm *vm,
42 struct kvm_nested_state *state, 36 struct kvm_nested_state *state,
43 int expected_errno) 37 int expected_errno)
44{ 38{
45 volatile struct kvm_run *run;
46 int rv; 39 int rv;
47 40
48 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); 41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
50 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
51 strerror(expected_errno), expected_errno, rv, strerror(errno), 44 strerror(expected_errno), expected_errno, rv, strerror(errno),
52 errno); 45 errno);
53 run = vcpu_state(vm, VCPU_ID);
54 vcpu_run(vm, VCPU_ID);
55 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
56 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
57 run->exit_reason,
58 exit_reason_str(run->exit_reason));
59} 46}
60 47
61void test_nested_state_expect_einval(struct kvm_vm *vm, 48void test_nested_state_expect_einval(struct kvm_vm *vm,
@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
90{ 77{
91 memset(state, 0, size); 78 memset(state, 0, size);
92 state->flags = KVM_STATE_NESTED_GUEST_MODE | 79 state->flags = KVM_STATE_NESTED_GUEST_MODE |
93 KVM_STATE_NESTED_RUN_PENDING | 80 KVM_STATE_NESTED_RUN_PENDING;
94 KVM_STATE_NESTED_EVMCS; 81 if (have_evmcs)
82 state->flags |= KVM_STATE_NESTED_EVMCS;
95 state->format = 0; 83 state->format = 0;
96 state->size = size; 84 state->size = size;
97 state->hdr.vmx.vmxon_pa = 0x1000; 85 state->hdr.vmx.vmxon_pa = 0x1000;
@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
141 /* 129 /*
142 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without 130 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
143 * setting the nested state but flags other than eVMCS must be clear. 131 * setting the nested state but flags other than eVMCS must be clear.
132 * The eVMCS flag can be set if the enlightened VMCS capability has
133 * been enabled.
144 */ 134 */
145 set_default_vmx_state(state, state_sz); 135 set_default_vmx_state(state, state_sz);
146 state->hdr.vmx.vmxon_pa = -1ull; 136 state->hdr.vmx.vmxon_pa = -1ull;
147 state->hdr.vmx.vmcs12_pa = -1ull; 137 state->hdr.vmx.vmcs12_pa = -1ull;
148 test_nested_state_expect_einval(vm, state); 138 test_nested_state_expect_einval(vm, state);
149 139
150 state->flags = KVM_STATE_NESTED_EVMCS; 140 state->flags &= KVM_STATE_NESTED_EVMCS;
141 if (have_evmcs) {
142 test_nested_state_expect_einval(vm, state);
143 vcpu_enable_evmcs(vm, VCPU_ID);
144 }
151 test_nested_state(vm, state); 145 test_nested_state(vm, state);
152 146
153 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ 147 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
@@ -232,6 +226,8 @@ int main(int argc, char *argv[])
232 struct kvm_nested_state state; 226 struct kvm_nested_state state;
233 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 227 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
234 228
229 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
230
235 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { 231 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
236 printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); 232 printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
237 exit(KSFT_SKIP); 233 exit(KSFT_SKIP);
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index a8a6a0c883f1..6af5c91337f2 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
86 unsigned int len; 86 unsigned int len;
87 int mask; 87 int mask;
88 88
89 /* Detect an already handled MMIO return */
90 if (unlikely(!vcpu->mmio_needed))
91 return 0;
92
93 vcpu->mmio_needed = 0;
94
89 if (!run->mmio.is_write) { 95 if (!run->mmio.is_write) {
90 len = run->mmio.len; 96 len = run->mmio.len;
91 if (len > sizeof(unsigned long)) 97 if (len > sizeof(unsigned long))
@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 run->mmio.is_write = is_write; 194 run->mmio.is_write = is_write;
189 run->mmio.phys_addr = fault_ipa; 195 run->mmio.phys_addr = fault_ipa;
190 run->mmio.len = len; 196 run->mmio.len = len;
197 vcpu->mmio_needed = 1;
191 198
192 if (!ret) { 199 if (!ret) {
193 /* We handled the access successfully in the kernel. */ 200 /* We handled the access successfully in the kernel. */
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index bdbc297d06fb..e621b5d45b27 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -8,6 +8,7 @@
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h> 10#include <kvm/arm_vgic.h>
11#include <asm/kvm_emulate.h>
11#include <asm/kvm_mmu.h> 12#include <asm/kvm_mmu.h>
12#include "vgic.h" 13#include "vgic.h"
13 14
@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
164 irq->vcpu = NULL; 165 irq->vcpu = NULL;
165 irq->target_vcpu = vcpu0; 166 irq->target_vcpu = vcpu0;
166 kref_init(&irq->refcount); 167 kref_init(&irq->refcount);
167 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { 168 switch (dist->vgic_model) {
169 case KVM_DEV_TYPE_ARM_VGIC_V2:
168 irq->targets = 0; 170 irq->targets = 0;
169 irq->group = 0; 171 irq->group = 0;
170 } else { 172 break;
173 case KVM_DEV_TYPE_ARM_VGIC_V3:
171 irq->mpidr = 0; 174 irq->mpidr = 0;
172 irq->group = 1; 175 irq->group = 1;
176 break;
177 default:
178 kfree(dist->spis);
179 return -EINVAL;
173 } 180 }
174 } 181 }
175 return 0; 182 return 0;
@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
209 irq->intid = i; 216 irq->intid = i;
210 irq->vcpu = NULL; 217 irq->vcpu = NULL;
211 irq->target_vcpu = vcpu; 218 irq->target_vcpu = vcpu;
212 irq->targets = 1U << vcpu->vcpu_id;
213 kref_init(&irq->refcount); 219 kref_init(&irq->refcount);
214 if (vgic_irq_is_sgi(i)) { 220 if (vgic_irq_is_sgi(i)) {
215 /* SGIs */ 221 /* SGIs */
@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
219 /* PPIs */ 225 /* PPIs */
220 irq->config = VGIC_CONFIG_LEVEL; 226 irq->config = VGIC_CONFIG_LEVEL;
221 } 227 }
222
223 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
224 irq->group = 1;
225 else
226 irq->group = 0;
227 } 228 }
228 229
229 if (!irqchip_in_kernel(vcpu->kvm)) 230 if (!irqchip_in_kernel(vcpu->kvm))
@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
286 287
287 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 288 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
288 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 289 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
289 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 290 switch (dist->vgic_model) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
290 irq->group = 1; 292 irq->group = 1;
291 else 293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
292 irq->group = 0; 296 irq->group = 0;
297 irq->targets = 1U << idx;
298 break;
299 default:
300 ret = -EINVAL;
301 goto out;
302 }
293 } 303 }
294 } 304 }
295 305