summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-19 01:15:42 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-19 01:15:42 -0400
commit7ffc95e90e305c6803991ec2a2f4e442236efc77 (patch)
tree5018f8310d14bb4d8dd25813ae69827c557060a9
parent1e296b5be40d309a1585c14bc55da6ff6a29ecf0 (diff)
parentd1abaeb3be7b5fa6d7a1fbbd2e14e3310005c4c1 (diff)
Merge 5.3-rc5 into usb-next
We need the usb fixes in here as well for other patches to build on. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--Documentation/devicetree/bindings/Makefile4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt30
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml3
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c14
-rw-r--r--arch/arm64/kernel/ftrace.c22
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/dma-common.c17
-rw-r--r--arch/riscv/configs/defconfig2
-rw-r--r--arch/riscv/configs/rv32_defconfig3
-rw-r--r--arch/riscv/include/asm/switch_to.h8
-rw-r--r--arch/riscv/include/asm/tlbflush.h11
-rw-r--r--arch/riscv/kernel/process.c11
-rw-r--r--arch/sh/kernel/disassemble.c5
-rw-r--r--arch/sh/kernel/hw_breakpoint.c1
-rw-r--r--arch/x86/include/asm/bootparam_utils.h63
-rw-r--r--arch/x86/kernel/apic/probe_32.c3
-rw-r--r--arch/x86/kernel/cpu/umwait.c39
-rw-r--r--arch/x86/math-emu/errors.c5
-rw-r--r--arch/x86/math-emu/fpu_trig.c2
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--drivers/auxdisplay/Kconfig5
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h (renamed from include/misc/charlcd.h)5
-rw-r--r--drivers/auxdisplay/hd44780.c3
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c11
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c22
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c4
-rw-r--r--drivers/hv/hv_trace.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.h2
-rw-r--r--drivers/hwtracing/intel_th/pti.h2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c18
-rw-r--r--drivers/i2c/busses/i2c-rcar.c11
-rw-r--r--drivers/i2c/busses/i2c-stm32.h2
-rw-r--r--drivers/iio/adc/max9611.c2
-rw-r--r--drivers/iio/frequency/adf4371.c8
-rw-r--r--drivers/infiniband/core/counters.c6
-rw-r--r--drivers/infiniband/core/nldev.c8
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c11
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c24
-rw-r--r--drivers/infiniband/sw/siw/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/siw.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c16
-rw-r--r--drivers/iommu/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/dma-iommu.c25
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c2
-rw-r--r--drivers/iommu/intel-iommu.c11
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/habanalabs/device.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c72
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/habanalabs.h9
-rw-r--r--drivers/misc/habanalabs/hw_queue.c14
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h13
-rw-r--r--drivers/misc/habanalabs/irq.c27
-rw-r--r--drivers/misc/habanalabs/memory.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/multipath.c76
-rw-r--r--drivers/nvme/host/nvme.h21
-rw-r--r--drivers/nvme/host/pci.c16
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h3
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c23
-rw-r--r--drivers/soundwire/Kconfig7
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/core/buffer.c10
-rw-r--r--drivers/usb/core/file.c10
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c28
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--fs/afs/cmservice.c10
-rw-r--r--fs/afs/dir.c89
-rw-r--r--fs/afs/file.c12
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--fs/block_dev.c49
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c71
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/io_uring.c20
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c29
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c3
-rw-r--r--fs/xfs/xfs_log.c5
-rw-r--r--include/asm-generic/5level-fixup.h21
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/uapi/rdma/siw-abi.h3
-rw-r--r--kernel/configs.c16
-rw-r--r--kernel/dma/direct.c10
-rw-r--r--kernel/dma/mapping.c19
-rw-r--r--kernel/dma/remap.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c14
-rw-r--r--mm/huge_memory.c51
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memcontrol.c59
-rw-r--r--mm/mempolicy.c134
-rw-r--r--mm/memremap.c24
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/usercopy.c2
-rw-r--r--mm/vmalloc.c12
-rw-r--r--mm/vmscan.c13
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/z3fold.c14
-rw-r--r--samples/auxdisplay/cfag12864b-example.c2
-rw-r--r--scripts/coccinelle/api/atomic_as_refcounter.cocci1
-rw-r--r--security/keys/trusted.c13
-rw-r--r--sound/pci/hda/hda_generic.c21
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_conexant.c15
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/usb/mixer.c37
-rwxr-xr-xtools/hv/hv_get_dhcp_info.sh2
-rw-r--r--tools/hv/hv_kvp_daemon.c8
-rwxr-xr-xtools/hv/hv_set_ifconfig.sh2
-rw-r--r--tools/hv/hv_vss_daemon.c4
-rw-r--r--tools/hv/lsvmbus75
173 files changed, 1441 insertions, 814 deletions
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6b0dfd5c17ba..5138a2f6232a 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@
19 19
20DT_DOCS = $(shell \ 20DT_DOCS = $(shell \
21 cd $(srctree)/$(src) && \ 21 cd $(srctree)/$(src) && \
22 find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ 22 find * \( -name '*.yaml' ! \
23 -name $(DT_TMP_SCHEMA) ! \
24 -name '*.example.dt.yaml' \) \
23 ) 25 )
24 26
25DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 27DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 2d41fb96ce0a..5b88fae0307d 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7,18 +7,6 @@ Required properties:
7- phy-mode : See ethernet.txt file in the same directory 7- phy-mode : See ethernet.txt file in the same directory
8 8
9Optional properties: 9Optional properties:
10- phy-reset-gpios : Should specify the gpio for phy reset
11- phy-reset-duration : Reset duration in milliseconds. Should present
12 only if property "phy-reset-gpios" is available. Missing the property
13 will have the duration be 1 millisecond. Numbers greater than 1000 are
14 invalid and 1 millisecond will be used instead.
15- phy-reset-active-high : If present then the reset sequence using the GPIO
16 specified in the "phy-reset-gpios" property is reversed (H=reset state,
17 L=operation state).
18- phy-reset-post-delay : Post reset delay in milliseconds. If present then
19 a delay of phy-reset-post-delay milliseconds will be observed after the
20 phy-reset-gpios has been toggled. Can be omitted thus no delay is
21 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
22- phy-supply : regulator that powers the Ethernet PHY. 10- phy-supply : regulator that powers the Ethernet PHY.
23- phy-handle : phandle to the PHY device connected to this device. 11- phy-handle : phandle to the PHY device connected to this device.
24- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. 12- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
@@ -47,11 +35,27 @@ Optional properties:
47 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse 35 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
48 per second interrupt associated with 1588 precision time protocol(PTP). 36 per second interrupt associated with 1588 precision time protocol(PTP).
49 37
50
51Optional subnodes: 38Optional subnodes:
52- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes 39- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
53 according to phy.txt in the same directory 40 according to phy.txt in the same directory
54 41
42Deprecated optional properties:
43 To avoid these, create a phy node according to phy.txt in the same
44 directory, and point the fec's "phy-handle" property to it. Then use
45 the phy's reset binding, again described by phy.txt.
46- phy-reset-gpios : Should specify the gpio for phy reset
47- phy-reset-duration : Reset duration in milliseconds. Should present
48 only if property "phy-reset-gpios" is available. Missing the property
49 will have the duration be 1 millisecond. Numbers greater than 1000 are
50 invalid and 1 millisecond will be used instead.
51- phy-reset-active-high : If present then the reset sequence using the GPIO
52 specified in the "phy-reset-gpios" property is reversed (H=reset state,
53 L=operation state).
54- phy-reset-post-delay : Post reset delay in milliseconds. If present then
55 a delay of phy-reset-post-delay milliseconds will be observed after the
56 phy-reset-gpios has been toggled. Can be omitted thus no delay is
57 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
58
55Example: 59Example:
56 60
57ethernet@83fec000 { 61ethernet@83fec000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 91d3e78b3395..400df2da018a 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -37,7 +37,8 @@ properties:
37 hwlocks: true 37 hwlocks: true
38 38
39 st,syscfg: 39 st,syscfg:
40 $ref: "/schemas/types.yaml#/definitions/phandle-array" 40 allOf:
41 - $ref: "/schemas/types.yaml#/definitions/phandle-array"
41 description: Should be phandle/offset/mask 42 description: Should be phandle/offset/mask
42 items: 43 items:
43 - description: Phandle to the syscon node which includes IRQ mux selection. 44 - description: Phandle to the syscon node which includes IRQ mux selection.
diff --git a/MAINTAINERS b/MAINTAINERS
index 01e101d28a43..8c343c2f4ce1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6436,6 +6436,14 @@ S: Maintained
6436F: drivers/perf/fsl_imx8_ddr_perf.c 6436F: drivers/perf/fsl_imx8_ddr_perf.c
6437F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt 6437F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
6438 6438
6439FREESCALE IMX I2C DRIVER
6440M: Oleksij Rempel <o.rempel@pengutronix.de>
6441R: Pengutronix Kernel Team <kernel@pengutronix.de>
6442L: linux-i2c@vger.kernel.org
6443S: Maintained
6444F: drivers/i2c/busses/i2c-imx.c
6445F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
6446
6439FREESCALE IMX LPI2C DRIVER 6447FREESCALE IMX LPI2C DRIVER
6440M: Dong Aisheng <aisheng.dong@nxp.com> 6448M: Dong Aisheng <aisheng.dong@nxp.com>
6441L: linux-i2c@vger.kernel.org 6449L: linux-i2c@vger.kernel.org
@@ -7447,7 +7455,7 @@ F: drivers/net/hyperv/
7447F: drivers/scsi/storvsc_drv.c 7455F: drivers/scsi/storvsc_drv.c
7448F: drivers/uio/uio_hv_generic.c 7456F: drivers/uio/uio_hv_generic.c
7449F: drivers/video/fbdev/hyperv_fb.c 7457F: drivers/video/fbdev/hyperv_fb.c
7450F: drivers/iommu/hyperv_iommu.c 7458F: drivers/iommu/hyperv-iommu.c
7451F: net/vmw_vsock/hyperv_transport.c 7459F: net/vmw_vsock/hyperv_transport.c
7452F: include/clocksource/hyperv_timer.h 7460F: include/clocksource/hyperv_timer.h
7453F: include/linux/hyperv.h 7461F: include/linux/hyperv.h
@@ -8059,6 +8067,13 @@ T: git git://git.code.sf.net/p/intel-sas/isci
8059S: Supported 8067S: Supported
8060F: drivers/scsi/isci/ 8068F: drivers/scsi/isci/
8061 8069
8070INTEL CPU family model numbers
8071M: Tony Luck <tony.luck@intel.com>
8072M: x86@kernel.org
8073L: linux-kernel@vger.kernel.org
8074S: Supported
8075F: arch/x86/include/asm/intel-family.h
8076
8062INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 8077INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
8063M: Jani Nikula <jani.nikula@linux.intel.com> 8078M: Jani Nikula <jani.nikula@linux.intel.com>
8064M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> 8079M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
@@ -8410,7 +8425,6 @@ L: linux-xfs@vger.kernel.org
8410L: linux-fsdevel@vger.kernel.org 8425L: linux-fsdevel@vger.kernel.org
8411T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git 8426T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
8412S: Supported 8427S: Supported
8413F: fs/iomap.c
8414F: fs/iomap/ 8428F: fs/iomap/
8415F: include/linux/iomap.h 8429F: include/linux/iomap.h
8416 8430
diff --git a/Makefile b/Makefile
index 1b23f95db176..9fa18613566f 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc4 5EXTRAVERSION = -rc5
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6774b03aa405..d42557ee69c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
2406 unsigned long attrs) 2406 unsigned long attrs)
2407{ 2407{
2408 if (!dev_is_dma_coherent(dev)) 2408 return __get_dma_pgprot(attrs, prot);
2409 return __get_dma_pgprot(attrs, prot);
2410 return prot;
2411} 2409}
2412 2410
2413void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2411void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d19d14ba9ae4..b1fdc486aed8 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
184}; 184};
185 185
186static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 186static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
187 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 187 /*
188 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 188 * We already refuse to boot CPUs that don't support our configured
189 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 189 * page size, so we can only detect mismatches for a page size other
190 * than the one we're currently using. Unfortunately, SoCs like this
191 * exist in the wild so, even though we don't like it, we'll have to go
192 * along with it and treat them as non-strict.
193 */
194 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
195 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
196 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
197
190 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 198 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
191 /* Linux shouldn't care about secure memory */ 199 /* Linux shouldn't care about secure memory */
192 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), 200 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 1285c7b2947f..171773257974 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
73 73
74 if (offset < -SZ_128M || offset >= SZ_128M) { 74 if (offset < -SZ_128M || offset >= SZ_128M) {
75#ifdef CONFIG_ARM64_MODULE_PLTS 75#ifdef CONFIG_ARM64_MODULE_PLTS
76 struct plt_entry trampoline; 76 struct plt_entry trampoline, *dst;
77 struct module *mod; 77 struct module *mod;
78 78
79 /* 79 /*
@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
106 * to check if the actual opcodes are in fact identical, 106 * to check if the actual opcodes are in fact identical,
107 * regardless of the offset in memory so use memcmp() instead. 107 * regardless of the offset in memory so use memcmp() instead.
108 */ 108 */
109 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 109 dst = mod->arch.ftrace_trampoline;
110 if (memcmp(mod->arch.ftrace_trampoline, &trampoline, 110 trampoline = get_plt_entry(addr, dst);
111 sizeof(trampoline))) { 111 if (memcmp(dst, &trampoline, sizeof(trampoline))) {
112 if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { 112 if (plt_entry_is_initialized(dst)) {
113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 /* point the trampoline to our ftrace entry point */ 117 /* point the trampoline to our ftrace entry point */
118 module_disable_ro(mod); 118 module_disable_ro(mod);
119 *mod->arch.ftrace_trampoline = trampoline; 119 *dst = trampoline;
120 module_enable_ro(mod, true); 120 module_enable_ro(mod, true);
121 121
122 /* update trampoline before patching in the branch */ 122 /*
123 smp_wmb(); 123 * Ensure updated trampoline is visible to instruction
124 * fetch before we patch in the branch.
125 */
126 __flush_icache_range((unsigned long)&dst[0],
127 (unsigned long)&dst[1]);
124 } 128 }
125 addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 129 addr = (unsigned long)dst;
126#else /* CONFIG_ARM64_MODULE_PLTS */ 130#else /* CONFIG_ARM64_MODULE_PLTS */
127 return -EINVAL; 131 return -EINVAL;
128#endif /* CONFIG_ARM64_MODULE_PLTS */ 132#endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 1d3f0b5a9940..bd2b039f43a6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -14,9 +14,7 @@
14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
15 unsigned long attrs) 15 unsigned long attrs)
16{ 16{
17 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) 17 return pgprot_writecombine(prot);
18 return pgprot_writecombine(prot);
19 return prot;
20} 18}
21 19
22void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 20void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 77f6ebf97113..d8dcd8820369 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -121,7 +121,6 @@ config PPC
121 select ARCH_32BIT_OFF_T if PPC32 121 select ARCH_32BIT_OFF_T if PPC32
122 select ARCH_HAS_DEBUG_VIRTUAL 122 select ARCH_HAS_DEBUG_VIRTUAL
123 select ARCH_HAS_DEVMEM_IS_ALLOWED 123 select ARCH_HAS_DEVMEM_IS_ALLOWED
124 select ARCH_HAS_DMA_MMAP_PGPROT
125 select ARCH_HAS_ELF_RANDOMIZE 124 select ARCH_HAS_ELF_RANDOMIZE
126 select ARCH_HAS_FORTIFY_SOURCE 125 select ARCH_HAS_FORTIFY_SOURCE
127 select ARCH_HAS_GCOV_PROFILE_ALL 126 select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ea0c69236789..56dfa7a2a6f2 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
49 signal.o sysfs.o cacheinfo.o time.o \ 49 signal.o sysfs.o cacheinfo.o time.o \
50 prom.o traps.o setup-common.o \ 50 prom.o traps.o setup-common.o \
51 udbg.o misc.o io.o misc_$(BITS).o \ 51 udbg.o misc.o io.o misc_$(BITS).o \
52 of_platform.o prom_parse.o \ 52 of_platform.o prom_parse.o
53 dma-common.o
54obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 53obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
55 signal_64.o ptrace32.o \ 54 signal_64.o ptrace32.o \
56 paca.o nvram_64.o firmware.o 55 paca.o nvram_64.o firmware.o
diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
deleted file mode 100644
index dc7ef6b17b69..000000000000
--- a/arch/powerpc/kernel/dma-common.c
+++ /dev/null
@@ -1,17 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contains common dma routines for all powerpc platforms.
4 *
5 * Copyright (C) 2019 Shawn Anastasio.
6 */
7
8#include <linux/mm.h>
9#include <linux/dma-noncoherent.h>
10
11pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
12 unsigned long attrs)
13{
14 if (!dev_is_dma_coherent(dev))
15 return pgprot_noncached(prot);
16 return prot;
17}
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 93205c0bf71d..3efff552a261 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -54,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
54CONFIG_SERIAL_OF_PLATFORM=y 54CONFIG_SERIAL_OF_PLATFORM=y
55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
56CONFIG_HVC_RISCV_SBI=y 56CONFIG_HVC_RISCV_SBI=y
57CONFIG_HW_RANDOM=y
58CONFIG_HW_RANDOM_VIRTIO=y
57CONFIG_SPI=y 59CONFIG_SPI=y
58CONFIG_SPI_SIFIVE=y 60CONFIG_SPI_SIFIVE=y
59# CONFIG_PTP_1588_CLOCK is not set 61# CONFIG_PTP_1588_CLOCK is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index d5449ef805a3..7da93e494445 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
34CONFIG_PCI_HOST_GENERIC=y 34CONFIG_PCI_HOST_GENERIC=y
35CONFIG_PCIE_XILINX=y 35CONFIG_PCIE_XILINX=y
36CONFIG_DEVTMPFS=y 36CONFIG_DEVTMPFS=y
37CONFIG_DEVTMPFS_MOUNT=y
37CONFIG_BLK_DEV_LOOP=y 38CONFIG_BLK_DEV_LOOP=y
38CONFIG_VIRTIO_BLK=y 39CONFIG_VIRTIO_BLK=y
39CONFIG_BLK_DEV_SD=y 40CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
53CONFIG_SERIAL_OF_PLATFORM=y 54CONFIG_SERIAL_OF_PLATFORM=y
54CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
55CONFIG_HVC_RISCV_SBI=y 56CONFIG_HVC_RISCV_SBI=y
57CONFIG_HW_RANDOM=y
58CONFIG_HW_RANDOM_VIRTIO=y
56# CONFIG_PTP_1588_CLOCK is not set 59# CONFIG_PTP_1588_CLOCK is not set
57CONFIG_DRM=y 60CONFIG_DRM=y
58CONFIG_DRM_RADEON=y 61CONFIG_DRM_RADEON=y
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 853b65ef656d..f0227bdce0f0 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
16 16
17static inline void __fstate_clean(struct pt_regs *regs) 17static inline void __fstate_clean(struct pt_regs *regs)
18{ 18{
19 regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; 19 regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
20}
21
22static inline void fstate_off(struct task_struct *task,
23 struct pt_regs *regs)
24{
25 regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
20} 26}
21 27
22static inline void fstate_save(struct task_struct *task, 28static inline void fstate_save(struct task_struct *task,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 687dd19735a7..4d9bbe8438bf 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
53} 53}
54 54
55#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) 55#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
56#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) 56
57#define flush_tlb_range(vma, start, end) \ 57#define flush_tlb_range(vma, start, end) \
58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) 58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
59#define flush_tlb_mm(mm) \ 59
60static inline void flush_tlb_page(struct vm_area_struct *vma,
61 unsigned long addr)
62{
63 flush_tlb_range(vma, addr, addr + PAGE_SIZE);
64}
65
66#define flush_tlb_mm(mm) \
60 remote_sfence_vma(mm_cpumask(mm), 0, -1) 67 remote_sfence_vma(mm_cpumask(mm), 0, -1)
61 68
62#endif /* CONFIG_SMP */ 69#endif /* CONFIG_SMP */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index f23794bd1e90..fb3a082362eb 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
64 unsigned long sp) 64 unsigned long sp)
65{ 65{
66 regs->sstatus = SR_SPIE; 66 regs->sstatus = SR_SPIE;
67 if (has_fpu) 67 if (has_fpu) {
68 regs->sstatus |= SR_FS_INITIAL; 68 regs->sstatus |= SR_FS_INITIAL;
69 /*
70 * Restore the initial value to the FP register
71 * before starting the user program.
72 */
73 fstate_restore(current, regs);
74 }
69 regs->sepc = pc; 75 regs->sepc = pc;
70 regs->sp = sp; 76 regs->sp = sp;
71 set_fs(USER_DS); 77 set_fs(USER_DS);
@@ -75,10 +81,11 @@ void flush_thread(void)
75{ 81{
76#ifdef CONFIG_FPU 82#ifdef CONFIG_FPU
77 /* 83 /*
78 * Reset FPU context 84 * Reset FPU state and context
79 * frm: round to nearest, ties to even (IEEE default) 85 * frm: round to nearest, ties to even (IEEE default)
80 * fflags: accrued exceptions cleared 86 * fflags: accrued exceptions cleared
81 */ 87 */
88 fstate_off(current, task_pt_regs(current));
82 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate)); 89 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
83#endif 90#endif
84} 91}
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index defebf1a9c8a..845543780cc5 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
475 printk("dbr"); 475 printk("dbr");
476 break; 476 break;
477 case FD_REG_N: 477 case FD_REG_N:
478 if (0)
479 goto d_reg_n;
480 case F_REG_N: 478 case F_REG_N:
481 printk("fr%d", rn); 479 printk("fr%d", rn);
482 break; 480 break;
@@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
488 printk("xd%d", rn & ~1); 486 printk("xd%d", rn & ~1);
489 break; 487 break;
490 } 488 }
491 d_reg_n: 489 /* else, fall through */
492 case D_REG_N: 490 case D_REG_N:
493 printk("dr%d", rn); 491 printk("dr%d", rn);
494 break; 492 break;
@@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
497 printk("xd%d", rm & ~1); 495 printk("xd%d", rm & ~1);
498 break; 496 break;
499 } 497 }
498 /* else, fall through */
500 case D_REG_M: 499 case D_REG_M:
501 printk("dr%d", rm); 500 printk("dr%d", rm);
502 break; 501 break;
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 3bd010b4c55f..f10d64311127 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
157 switch (sh_type) { 157 switch (sh_type) {
158 case SH_BREAKPOINT_READ: 158 case SH_BREAKPOINT_READ:
159 *gen_type = HW_BREAKPOINT_R; 159 *gen_type = HW_BREAKPOINT_R;
160 break;
160 case SH_BREAKPOINT_WRITE: 161 case SH_BREAKPOINT_WRITE:
161 *gen_type = HW_BREAKPOINT_W; 162 *gen_type = HW_BREAKPOINT_W;
162 break; 163 break;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 101eb944f13c..f5e90a849bca 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -18,6 +18,20 @@
18 * Note: efi_info is commonly left uninitialized, but that field has a 18 * Note: efi_info is commonly left uninitialized, but that field has a
19 * private magic, so it is better to leave it unchanged. 19 * private magic, so it is better to leave it unchanged.
20 */ 20 */
21
22#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
23
24#define BOOT_PARAM_PRESERVE(struct_member) \
25 { \
26 .start = offsetof(struct boot_params, struct_member), \
27 .len = sizeof_mbr(struct boot_params, struct_member), \
28 }
29
30struct boot_params_to_save {
31 unsigned int start;
32 unsigned int len;
33};
34
21static void sanitize_boot_params(struct boot_params *boot_params) 35static void sanitize_boot_params(struct boot_params *boot_params)
22{ 36{
23 /* 37 /*
@@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
35 * problems again. 49 * problems again.
36 */ 50 */
37 if (boot_params->sentinel) { 51 if (boot_params->sentinel) {
38 /* fields in boot_params are left uninitialized, clear them */ 52 static struct boot_params scratch;
39 boot_params->acpi_rsdp_addr = 0; 53 char *bp_base = (char *)boot_params;
40 memset(&boot_params->ext_ramdisk_image, 0, 54 char *save_base = (char *)&scratch;
41 (char *)&boot_params->efi_info - 55 int i;
42 (char *)&boot_params->ext_ramdisk_image); 56
43 memset(&boot_params->kbd_status, 0, 57 const struct boot_params_to_save to_save[] = {
44 (char *)&boot_params->hdr - 58 BOOT_PARAM_PRESERVE(screen_info),
45 (char *)&boot_params->kbd_status); 59 BOOT_PARAM_PRESERVE(apm_bios_info),
46 memset(&boot_params->_pad7[0], 0, 60 BOOT_PARAM_PRESERVE(tboot_addr),
47 (char *)&boot_params->edd_mbr_sig_buffer[0] - 61 BOOT_PARAM_PRESERVE(ist_info),
48 (char *)&boot_params->_pad7[0]); 62 BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
49 memset(&boot_params->_pad8[0], 0, 63 BOOT_PARAM_PRESERVE(hd0_info),
50 (char *)&boot_params->eddbuf[0] - 64 BOOT_PARAM_PRESERVE(hd1_info),
51 (char *)&boot_params->_pad8[0]); 65 BOOT_PARAM_PRESERVE(sys_desc_table),
52 memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9)); 66 BOOT_PARAM_PRESERVE(olpc_ofw_header),
67 BOOT_PARAM_PRESERVE(efi_info),
68 BOOT_PARAM_PRESERVE(alt_mem_k),
69 BOOT_PARAM_PRESERVE(scratch),
70 BOOT_PARAM_PRESERVE(e820_entries),
71 BOOT_PARAM_PRESERVE(eddbuf_entries),
72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
73 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
74 BOOT_PARAM_PRESERVE(e820_table),
75 BOOT_PARAM_PRESERVE(eddbuf),
76 };
77
78 memset(&scratch, 0, sizeof(scratch));
79
80 for (i = 0; i < ARRAY_SIZE(to_save); i++) {
81 memcpy(save_base + to_save[i].start,
82 bp_base + to_save[i].start, to_save[i].len);
83 }
84
85 memcpy(boot_params, save_base, sizeof(*boot_params));
53 } 86 }
54} 87}
55 88
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1492799b8f43..ee2d91e382f1 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -184,7 +184,8 @@ void __init default_setup_apic_routing(void)
184 def_to_bigsmp = 0; 184 def_to_bigsmp = 0;
185 break; 185 break;
186 } 186 }
187 /* If P4 and above fall through */ 187 /* P4 and above */
188 /* fall through */
188 case X86_VENDOR_HYGON: 189 case X86_VENDOR_HYGON:
189 case X86_VENDOR_AMD: 190 case X86_VENDOR_AMD:
190 def_to_bigsmp = 1; 191 def_to_bigsmp = 1;
diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c
index 6a204e7336c1..32b4dc9030aa 100644
--- a/arch/x86/kernel/cpu/umwait.c
+++ b/arch/x86/kernel/cpu/umwait.c
@@ -18,6 +18,12 @@
18static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); 18static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
19 19
20/* 20/*
21 * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
22 * hardware or BIOS before kernel boot.
23 */
24static u32 orig_umwait_control_cached __ro_after_init;
25
26/*
21 * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in 27 * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
22 * the sysfs write functions. 28 * the sysfs write functions.
23 */ 29 */
@@ -53,6 +59,23 @@ static int umwait_cpu_online(unsigned int cpu)
53} 59}
54 60
55/* 61/*
62 * The CPU hotplug callback sets the control MSR to the original control
63 * value.
64 */
65static int umwait_cpu_offline(unsigned int cpu)
66{
67 /*
68 * This code is protected by the CPU hotplug already and
69 * orig_umwait_control_cached is never changed after it caches
70 * the original control MSR value in umwait_init(). So there
71 * is no race condition here.
72 */
73 wrmsr(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached, 0);
74
75 return 0;
76}
77
78/*
56 * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which 79 * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
57 * is the only active CPU at this time. The MSR is set up on the APs via the 80 * is the only active CPU at this time. The MSR is set up on the APs via the
58 * CPU hotplug callback. 81 * CPU hotplug callback.
@@ -185,8 +208,22 @@ static int __init umwait_init(void)
185 if (!boot_cpu_has(X86_FEATURE_WAITPKG)) 208 if (!boot_cpu_has(X86_FEATURE_WAITPKG))
186 return -ENODEV; 209 return -ENODEV;
187 210
211 /*
212 * Cache the original control MSR value before the control MSR is
213 * changed. This is the only place where orig_umwait_control_cached
214 * is modified.
215 */
216 rdmsrl(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached);
217
188 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online", 218 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "umwait:online",
189 umwait_cpu_online, NULL); 219 umwait_cpu_online, umwait_cpu_offline);
220 if (ret < 0) {
221 /*
222 * On failure, the control MSR on all CPUs has the
223 * original control value.
224 */
225 return ret;
226 }
190 227
191 register_syscore_ops(&umwait_syscore_ops); 228 register_syscore_ops(&umwait_syscore_ops);
192 229
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 6b468517ab71..73dc66d887f3 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -178,13 +178,15 @@ void FPU_printall(void)
178 for (i = 0; i < 8; i++) { 178 for (i = 0; i < 8; i++) {
179 FPU_REG *r = &st(i); 179 FPU_REG *r = &st(i);
180 u_char tagi = FPU_gettagi(i); 180 u_char tagi = FPU_gettagi(i);
181
181 switch (tagi) { 182 switch (tagi) {
182 case TAG_Empty: 183 case TAG_Empty:
183 continue; 184 continue;
184 break;
185 case TAG_Zero: 185 case TAG_Zero:
186 case TAG_Special: 186 case TAG_Special:
187 /* Update tagi for the printk below */
187 tagi = FPU_Special(r); 188 tagi = FPU_Special(r);
189 /* fall through */
188 case TAG_Valid: 190 case TAG_Valid:
189 printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i, 191 printk("st(%d) %c .%04lx %04lx %04lx %04lx e%+-6d ", i,
190 getsign(r) ? '-' : '+', 192 getsign(r) ? '-' : '+',
@@ -198,7 +200,6 @@ void FPU_printall(void)
198 printk("Whoops! Error in errors.c: tag%d is %d ", i, 200 printk("Whoops! Error in errors.c: tag%d is %d ", i,
199 tagi); 201 tagi);
200 continue; 202 continue;
201 break;
202 } 203 }
203 printk("%s\n", tag_desc[(int)(unsigned)tagi]); 204 printk("%s\n", tag_desc[(int)(unsigned)tagi]);
204 } 205 }
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 783c509f957a..127ea54122d7 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -1352,7 +1352,7 @@ static void fyl2xp1(FPU_REG *st0_ptr, u_char st0_tag)
1352 case TW_Denormal: 1352 case TW_Denormal:
1353 if (denormal_operand() < 0) 1353 if (denormal_operand() < 0)
1354 return; 1354 return;
1355 1355 /* fall through */
1356 case TAG_Zero: 1356 case TAG_Zero:
1357 case TAG_Valid: 1357 case TAG_Valid:
1358 setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr)); 1358 setsign(st0_ptr, getsign(st0_ptr) ^ getsign(st1_ptr));
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 5cb8a62e091c..7c3106093c75 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -511,6 +511,7 @@ void cpu_reset(void)
511 "add %2, %2, %7\n\t" 511 "add %2, %2, %7\n\t"
512 "addi %0, %0, -1\n\t" 512 "addi %0, %0, -1\n\t"
513 "bnez %0, 1b\n\t" 513 "bnez %0, 1b\n\t"
514 "isync\n\t"
514 /* Jump to identity mapping */ 515 /* Jump to identity mapping */
515 "jx %3\n" 516 "jx %3\n"
516 "2:\n\t" 517 "2:\n\t"
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f78d3287dd82..0835f4d8d42e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1958 rq = blk_mq_get_request(q, bio, &data); 1958 rq = blk_mq_get_request(q, bio, &data);
1959 if (unlikely(!rq)) { 1959 if (unlikely(!rq)) {
1960 rq_qos_cleanup(q, bio); 1960 rq_qos_cleanup(q, bio);
1961 1961 if (bio->bi_opf & REQ_NOWAIT)
1962 cookie = BLK_QC_T_NONE;
1963 if (bio->bi_opf & REQ_NOWAIT_INLINE)
1964 cookie = BLK_QC_T_EAGAIN;
1965 else if (bio->bi_opf & REQ_NOWAIT)
1966 bio_wouldblock_error(bio); 1962 bio_wouldblock_error(bio);
1967 return cookie; 1963 return BLK_QC_T_NONE;
1968 } 1964 }
1969 1965
1970 trace_block_getrq(q, bio, bio->bi_opf); 1966 trace_block_getrq(q, bio, bio->bi_opf);
@@ -2666,8 +2662,6 @@ void blk_mq_release(struct request_queue *q)
2666 struct blk_mq_hw_ctx *hctx, *next; 2662 struct blk_mq_hw_ctx *hctx, *next;
2667 int i; 2663 int i;
2668 2664
2669 cancel_delayed_work_sync(&q->requeue_work);
2670
2671 queue_for_each_hw_ctx(q, hctx, i) 2665 queue_for_each_hw_ctx(q, hctx, i)
2672 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 2666 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2673 2667
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 977c659dcd18..9bfa3ea4ed63 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
892 892
893 blk_free_queue_stats(q->stats); 893 blk_free_queue_stats(q->stats);
894 894
895 if (queue_is_mq(q))
896 cancel_delayed_work_sync(&q->requeue_work);
897
895 blk_exit_queue(q); 898 blk_exit_queue(q);
896 899
897 blk_queue_free_zone_bitmaps(q); 900 blk_queue_free_zone_bitmaps(q);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index dd61fdd400f0..68489d1f00bb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
448choice 448choice
449 prompt "Backlight initial state" 449 prompt "Backlight initial state"
450 default CHARLCD_BL_FLASH 450 default CHARLCD_BL_FLASH
451 ---help---
452 Select the initial backlight state on boot or module load.
453
454 Previously, there was no option for this: the backlight flashed
455 briefly on init. Now you can also turn it off/on.
451 456
452 config CHARLCD_BL_OFF 457 config CHARLCD_BL_OFF
453 bool "Off" 458 bool "Off"
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 92745efefb54..bef6b85778b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -20,7 +20,7 @@
20 20
21#include <generated/utsrelease.h> 21#include <generated/utsrelease.h>
22 22
23#include <misc/charlcd.h> 23#include "charlcd.h"
24 24
25#define LCD_MINOR 156 25#define LCD_MINOR 156
26 26
diff --git a/include/misc/charlcd.h b/drivers/auxdisplay/charlcd.h
index 8cf6c18b0adb..00911ad0f3de 100644
--- a/include/misc/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -6,6 +6,9 @@
6 * Copyright (C) 2016-2017 Glider bvba 6 * Copyright (C) 2016-2017 Glider bvba
7 */ 7 */
8 8
9#ifndef _CHARLCD_H
10#define _CHARLCD_H
11
9struct charlcd { 12struct charlcd {
10 const struct charlcd_ops *ops; 13 const struct charlcd_ops *ops;
11 const unsigned char *char_conv; /* Optional */ 14 const unsigned char *char_conv; /* Optional */
@@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
37int charlcd_unregister(struct charlcd *lcd); 40int charlcd_unregister(struct charlcd *lcd);
38 41
39void charlcd_poke(struct charlcd *lcd); 42void charlcd_poke(struct charlcd *lcd);
43
44#endif /* CHARLCD_H */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index ab15b64707ad..bcbe13092327 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -14,8 +14,7 @@
14#include <linux/property.h> 14#include <linux/property.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <misc/charlcd.h> 17#include "charlcd.h"
18
19 18
20enum hd44780_pin { 19enum hd44780_pin {
21 /* Order does matter due to writing to GPIO array subsets! */ 20 /* Order does matter due to writing to GPIO array subsets! */
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e06de63497cf..85965953683e 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -55,7 +55,7 @@
55#include <linux/io.h> 55#include <linux/io.h>
56#include <linux/uaccess.h> 56#include <linux/uaccess.h>
57 57
58#include <misc/charlcd.h> 58#include "charlcd.h"
59 59
60#define KEYPAD_MINOR 185 60#define KEYPAD_MINOR 185
61 61
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
1617 return; 1617 return;
1618 1618
1619err_lcd_unreg: 1619err_lcd_unreg:
1620 if (scan_timer.function)
1621 del_timer_sync(&scan_timer);
1620 if (lcd.enabled) 1622 if (lcd.enabled)
1621 charlcd_unregister(lcd.charlcd); 1623 charlcd_unregister(lcd.charlcd);
1622err_unreg_device: 1624err_unreg_device:
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index a4984136c19d..0fd6f97ee523 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@ config REGMAP_IRQ
44 44
45config REGMAP_SOUNDWIRE 45config REGMAP_SOUNDWIRE
46 tristate 46 tristate
47 depends on SOUNDWIRE_BUS 47 depends on SOUNDWIRE
48 48
49config REGMAP_SCCB 49config REGMAP_SCCB
50 tristate 50 tristate
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3ac6a5d18071..b90dbcd99c03 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
965 } 965 }
966 } 966 }
967 967
968 err = -ENOMEM;
968 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 969 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
969 req = kzalloc(sizeof(*req), GFP_KERNEL); 970 req = kzalloc(sizeof(*req), GFP_KERNEL);
970 if (!req) 971 if (!req)
@@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
987 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); 988 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
988 if (err) { 989 if (err) {
989 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); 990 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
990 return err; 991 goto fail;
991 } 992 }
992 993
993 return 0; 994 return 0;
@@ -1007,8 +1008,7 @@ fail:
1007 } 1008 }
1008 kfree(req); 1009 kfree(req);
1009 } 1010 }
1010 return -ENOMEM; 1011 return err;
1011
1012} 1012}
1013 1013
1014static int connect_ring(struct backend_info *be) 1014static int connect_ring(struct backend_info *be)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8dda62367816..c28ebf2810f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
2528 } 2528 }
2529 2529
2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); 2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
2531 if (ret) 2531 if (ret < 0)
2532 break; 2532 break;
2533 } 2533 }
2534 2534
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
50 50
51struct dw_edma_region { 51struct dw_edma_region {
52 phys_addr_t paddr; 52 phys_addr_t paddr;
53 dma_addr_t vaddr; 53 void __iomem *vaddr;
54 size_t sz; 54 size_t sz;
55}; 55};
56 56
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
130 chip->id = pdev->devfn; 130 chip->id = pdev->devfn;
131 chip->irq = pdev->irq; 131 chip->irq = pdev->irq;
132 132
133 dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; 133 dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
134 dw->rg_region.vaddr += pdata->rg_off; 134 dw->rg_region.vaddr += pdata->rg_off;
135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; 135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
136 dw->rg_region.paddr += pdata->rg_off; 136 dw->rg_region.paddr += pdata->rg_off;
137 dw->rg_region.sz = pdata->rg_sz; 137 dw->rg_region.sz = pdata->rg_sz;
138 138
139 dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; 139 dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
140 dw->ll_region.vaddr += pdata->ll_off; 140 dw->ll_region.vaddr += pdata->ll_off;
141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; 141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
142 dw->ll_region.paddr += pdata->ll_off; 142 dw->ll_region.paddr += pdata->ll_off;
143 dw->ll_region.sz = pdata->ll_sz; 143 dw->ll_region.sz = pdata->ll_sz;
144 144
145 dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; 145 dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
146 dw->dt_region.vaddr += pdata->dt_off; 146 dw->dt_region.vaddr += pdata->dt_off;
147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; 147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
148 dw->dt_region.paddr += pdata->dt_off; 148 dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
158 pci_dbg(pdev, "Mode:\t%s\n", 158 pci_dbg(pdev, "Mode:\t%s\n",
159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); 159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
160 160
161 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 161 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz, 162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
163 &dw->rg_region.vaddr, &dw->rg_region.paddr); 163 dw->rg_region.vaddr, &dw->rg_region.paddr);
164 164
165 pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 165 pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz, 166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
167 &dw->ll_region.vaddr, &dw->ll_region.paddr); 167 dw->ll_region.vaddr, &dw->ll_region.paddr);
168 168
169 pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 169 pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz, 170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
171 &dw->dt_region.vaddr, &dw->dt_region.paddr); 171 dw->dt_region.vaddr, &dw->dt_region.paddr);
172 172
173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); 173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
174 174
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
25 25
26static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) 26static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27{ 27{
28 return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; 28 return dw->rg_region.vaddr;
29} 29}
30 30
31#define SET(dw, name, value) \ 31#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
192static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) 192static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
193{ 193{
194 struct dw_edma_burst *child; 194 struct dw_edma_burst *child;
195 struct dw_edma_v0_lli *lli; 195 struct dw_edma_v0_lli __iomem *lli;
196 struct dw_edma_v0_llp *llp; 196 struct dw_edma_v0_llp __iomem *llp;
197 u32 control = 0, i = 0; 197 u32 control = 0, i = 0;
198 u64 sar, dar, addr;
199 int j; 198 int j;
200 199
201 lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; 200 lli = chunk->ll_region.vaddr;
202 201
203 if (chunk->cb) 202 if (chunk->cb)
204 control = DW_EDMA_V0_CB; 203 control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
214 /* Transfer size */ 213 /* Transfer size */
215 SET_LL(&lli[i].transfer_size, child->sz); 214 SET_LL(&lli[i].transfer_size, child->sz);
216 /* SAR - low, high */ 215 /* SAR - low, high */
217 sar = cpu_to_le64(child->sar); 216 SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
218 SET_LL(&lli[i].sar_low, lower_32_bits(sar)); 217 SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
219 SET_LL(&lli[i].sar_high, upper_32_bits(sar));
220 /* DAR - low, high */ 218 /* DAR - low, high */
221 dar = cpu_to_le64(child->dar); 219 SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
222 SET_LL(&lli[i].dar_low, lower_32_bits(dar)); 220 SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
223 SET_LL(&lli[i].dar_high, upper_32_bits(dar));
224 i++; 221 i++;
225 } 222 }
226 223
227 llp = (struct dw_edma_v0_llp *)&lli[i]; 224 llp = (void __iomem *)&lli[i];
228 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; 225 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
229 if (!chunk->cb) 226 if (!chunk->cb)
230 control |= DW_EDMA_V0_CB; 227 control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
232 /* Channel control */ 229 /* Channel control */
233 SET_LL(&llp->control, control); 230 SET_LL(&llp->control, control);
234 /* Linked list - low, high */ 231 /* Linked list - low, high */
235 addr = cpu_to_le64(chunk->ll_region.paddr); 232 SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
236 SET_LL(&llp->llp_low, lower_32_bits(addr)); 233 SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
237 SET_LL(&llp->llp_high, upper_32_bits(addr));
238} 234}
239 235
240void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) 236void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
242 struct dw_edma_chan *chan = chunk->chan; 238 struct dw_edma_chan *chan = chunk->chan;
243 struct dw_edma *dw = chan->chip->dw; 239 struct dw_edma *dw = chan->chip->dw;
244 u32 tmp; 240 u32 tmp;
245 u64 llp;
246 241
247 dw_edma_v0_core_write_chunk(chunk); 242 dw_edma_v0_core_write_chunk(chunk);
248 243
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
262 SET_CH(dw, chan->dir, chan->id, ch_control1, 257 SET_CH(dw, chan->dir, chan->id, ch_control1,
263 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); 258 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
264 /* Linked list - low, high */ 259 /* Linked list - low, high */
265 llp = cpu_to_le64(chunk->ll_region.paddr); 260 SET_CH(dw, chan->dir, chan->id, llp_low,
266 SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); 261 lower_32_bits(chunk->ll_region.paddr));
267 SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); 262 SET_CH(dw, chan->dir, chan->id, llp_high,
263 upper_32_bits(chunk->ll_region.paddr));
268 } 264 }
269 /* Doorbell */ 265 /* Doorbell */
270 SET_RW(dw, chan->dir, doorbell, 266 SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
14#include "dw-edma-core.h" 14#include "dw-edma-core.h"
15 15
16#define REGS_ADDR(name) \ 16#define REGS_ADDR(name) \
17 ((dma_addr_t *)&regs->name) 17 ((void __force *)&regs->name)
18#define REGISTER(name) \ 18#define REGISTER(name) \
19 { #name, REGS_ADDR(name) } 19 { #name, REGS_ADDR(name) }
20 20
@@ -40,36 +40,37 @@
40 40
41static struct dentry *base_dir; 41static struct dentry *base_dir;
42static struct dw_edma *dw; 42static struct dw_edma *dw;
43static struct dw_edma_v0_regs *regs; 43static struct dw_edma_v0_regs __iomem *regs;
44 44
45static struct { 45static struct {
46 void *start; 46 void __iomem *start;
47 void *end; 47 void __iomem *end;
48} lim[2][EDMA_V0_MAX_NR_CH]; 48} lim[2][EDMA_V0_MAX_NR_CH];
49 49
50struct debugfs_entries { 50struct debugfs_entries {
51 char name[24]; 51 const char *name;
52 dma_addr_t *reg; 52 dma_addr_t *reg;
53}; 53};
54 54
55static int dw_edma_debugfs_u32_get(void *data, u64 *val) 55static int dw_edma_debugfs_u32_get(void *data, u64 *val)
56{ 56{
57 void __iomem *reg = (void __force __iomem *)data;
57 if (dw->mode == EDMA_MODE_LEGACY && 58 if (dw->mode == EDMA_MODE_LEGACY &&
58 data >= (void *)&regs->type.legacy.ch) { 59 reg >= (void __iomem *)&regs->type.legacy.ch) {
59 void *ptr = (void *)&regs->type.legacy.ch; 60 void __iomem *ptr = &regs->type.legacy.ch;
60 u32 viewport_sel = 0; 61 u32 viewport_sel = 0;
61 unsigned long flags; 62 unsigned long flags;
62 u16 ch; 63 u16 ch;
63 64
64 for (ch = 0; ch < dw->wr_ch_cnt; ch++) 65 for (ch = 0; ch < dw->wr_ch_cnt; ch++)
65 if (lim[0][ch].start >= data && data < lim[0][ch].end) { 66 if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
66 ptr += (data - lim[0][ch].start); 67 ptr += (reg - lim[0][ch].start);
67 goto legacy_sel_wr; 68 goto legacy_sel_wr;
68 } 69 }
69 70
70 for (ch = 0; ch < dw->rd_ch_cnt; ch++) 71 for (ch = 0; ch < dw->rd_ch_cnt; ch++)
71 if (lim[1][ch].start >= data && data < lim[1][ch].end) { 72 if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
72 ptr += (data - lim[1][ch].start); 73 ptr += (reg - lim[1][ch].start);
73 goto legacy_sel_rd; 74 goto legacy_sel_rd;
74 } 75 }
75 76
@@ -86,7 +87,7 @@ legacy_sel_wr:
86 87
87 raw_spin_unlock_irqrestore(&dw->lock, flags); 88 raw_spin_unlock_irqrestore(&dw->lock, flags);
88 } else { 89 } else {
89 *val = readl(data); 90 *val = readl(reg);
90 } 91 }
91 92
92 return 0; 93 return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
105 } 106 }
106} 107}
107 108
108static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, 109static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
109 struct dentry *dir) 110 struct dentry *dir)
110{ 111{
111 int nr_entries; 112 int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
288 if (!dw) 289 if (!dw)
289 return; 290 return;
290 291
291 regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; 292 regs = dw->rg_region.vaddr;
292 if (!regs) 293 if (!regs)
293 return; 294 return;
294 295
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
142 * when the DMA hw is powered off. 142 * when the DMA hw is powered off.
143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
144 */ 144 */
145static u32 d40_backup_regs[] = { 145static __maybe_unused u32 d40_backup_regs[] = {
146 D40_DREG_LCPA, 146 D40_DREG_LCPA,
147 D40_DREG_LCLA, 147 D40_DREG_LCLA,
148 D40_DREG_PRMSE, 148 D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
211 211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213 213
214static u32 d40_backup_regs_chan[] = { 214static __maybe_unused u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG, 215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT, 216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR, 217 D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..1311de74bfdd 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
1366 1366
1367 chan = &dmadev->chan[id]; 1367 chan = &dmadev->chan[id];
1368 if (!chan) { 1368 if (!chan) {
1369 dev_err(chan2dev(chan), "MDMA channel not initialized\n"); 1369 dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
1370 goto exit; 1370 goto exit;
1371 } 1371 }
1372 1372
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..b33cf6e8ab8e 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
712 return chan; 712 return chan;
713} 713}
714 714
715static int tegra_adma_runtime_suspend(struct device *dev) 715static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
716{ 716{
717 struct tegra_adma *tdma = dev_get_drvdata(dev); 717 struct tegra_adma *tdma = dev_get_drvdata(dev);
718 struct tegra_adma_chan_regs *ch_reg; 718 struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +744,7 @@ clk_disable:
744 return 0; 744 return 0;
745} 745}
746 746
747static int tegra_adma_runtime_resume(struct device *dev) 747static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
748{ 748{
749 struct tegra_adma *tdma = dev_get_drvdata(dev); 749 struct tegra_adma *tdma = dev_get_drvdata(dev);
750 struct tegra_adma_chan_regs *ch_reg; 750 struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..ba27802efcd0 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1234 if (src_icg) { 1234 if (src_icg) {
1235 d->ccr |= CCR_SRC_AMODE_DBLIDX; 1235 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1236 d->ei = 1; 1236 d->ei = 1;
1237 d->fi = src_icg; 1237 d->fi = src_icg + 1;
1238 } else if (xt->src_inc) { 1238 } else if (xt->src_inc) {
1239 d->ccr |= CCR_SRC_AMODE_POSTINC; 1239 d->ccr |= CCR_SRC_AMODE_POSTINC;
1240 d->fi = 0; 1240 d->fi = 0;
@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1249 if (dst_icg) { 1249 if (dst_icg) {
1250 d->ccr |= CCR_DST_AMODE_DBLIDX; 1250 d->ccr |= CCR_DST_AMODE_DBLIDX;
1251 sg->ei = 1; 1251 sg->ei = 1;
1252 sg->fi = dst_icg; 1252 sg->fi = dst_icg + 1;
1253 } else if (xt->dst_inc) { 1253 } else if (xt->dst_inc) {
1254 d->ccr |= CCR_DST_AMODE_POSTINC; 1254 d->ccr |= CCR_DST_AMODE_POSTINC;
1255 sg->fi = 0; 1255 sg->fi = 0;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 1db780c0f07b..3caae7f2cf56 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -927,17 +927,33 @@ fail:
927 return status; 927 return status;
928} 928}
929 929
930#define GET_EFI_CONFIG_TABLE(bits) \
931static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
932 efi_guid_t guid) \
933{ \
934 efi_system_table_##bits##_t *sys_table; \
935 efi_config_table_##bits##_t *tables; \
936 int i; \
937 \
938 sys_table = (typeof(sys_table))_sys_table; \
939 tables = (typeof(tables))(unsigned long)sys_table->tables; \
940 \
941 for (i = 0; i < sys_table->nr_tables; i++) { \
942 if (efi_guidcmp(tables[i].guid, guid) != 0) \
943 continue; \
944 \
945 return (void *)(unsigned long)tables[i].table; \
946 } \
947 \
948 return NULL; \
949}
950GET_EFI_CONFIG_TABLE(32)
951GET_EFI_CONFIG_TABLE(64)
952
930void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) 953void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
931{ 954{
932 efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables; 955 if (efi_is_64bit())
933 int i; 956 return get_efi_config_table64(sys_table, guid);
934 957 else
935 for (i = 0; i < sys_table->nr_tables; i++) { 958 return get_efi_config_table32(sys_table, guid);
936 if (efi_guidcmp(tables[i].guid, guid) != 0)
937 continue;
938
939 return (void *)tables[i].table;
940 }
941
942 return NULL;
943} 959}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1cf639a51178..04b8ac4432c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4869,7 +4869,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4869 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4869 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4870 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4870 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4871 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4871 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4872 WREG32(mmSQ_CMD, value); 4872 WREG32_SOC15(GC, 0, mmSQ_CMD, value);
4873} 4873}
4874 4874
4875static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4875static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fa20201eef3a..cbc480a33376 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h>
26 27
27#include "dm_services.h" 28#include "dm_services.h"
28 29
@@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
1171 1172
1172struct dc_state *dc_create_state(struct dc *dc) 1173struct dc_state *dc_create_state(struct dc *dc)
1173{ 1174{
1174 struct dc_state *context = kzalloc(sizeof(struct dc_state), 1175 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1175 GFP_KERNEL); 1176 GFP_KERNEL);
1176 1177
1177 if (!context) 1178 if (!context)
1178 return NULL; 1179 return NULL;
@@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
1192struct dc_state *dc_copy_state(struct dc_state *src_ctx) 1193struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1193{ 1194{
1194 int i, j; 1195 int i, j;
1195 struct dc_state *new_ctx = kmemdup(src_ctx, 1196 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1196 sizeof(struct dc_state), GFP_KERNEL);
1197 1197
1198 if (!new_ctx) 1198 if (!new_ctx)
1199 return NULL; 1199 return NULL;
1200 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1200 1201
1201 for (i = 0; i < MAX_PIPES; i++) { 1202 for (i = 0; i < MAX_PIPES; i++) {
1202 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 1203 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
1230{ 1231{
1231 struct dc_state *context = container_of(kref, struct dc_state, refcount); 1232 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1232 dc_resource_state_destruct(context); 1233 dc_resource_state_destruct(context);
1233 kfree(context); 1234 kvfree(context);
1234} 1235}
1235 1236
1236void dc_release_state(struct dc_state *context) 1237void dc_release_state(struct dc_state *context)
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..a5d1494a3dc4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
131 131
132 132
133 /* Enable extended register access */ 133 /* Enable extended register access */
134 ast_enable_mmio(dev);
135 ast_open_key(ast); 134 ast_open_key(ast);
135 ast_enable_mmio(dev);
136 136
137 /* Find out whether P2A works or whether to use device-tree */ 137 /* Find out whether P2A works or whether to use device-tree */
138 ast_detect_config_mode(dev, &scu_rev); 138 ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
576{ 576{
577 struct ast_private *ast = dev->dev_private; 577 struct ast_private *ast = dev->dev_private;
578 578
579 /* enable standard VGA decode */
580 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
581
579 ast_release_firmware(dev); 582 ast_release_firmware(dev);
580 kfree(ast->dp501_fw_addr); 583 kfree(ast->dp501_fw_addr);
581 ast_mode_fini(dev); 584 ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..a1cb020e07e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
604 return -EINVAL; 604 return -EINVAL;
605 ast_open_key(ast); 605 ast_open_key(ast);
606 606
607 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 607 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
608 608
609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); 609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); 610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..c1d1ac51d1c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
46{ 46{
47 struct ast_private *ast = dev->dev_private; 47 struct ast_private *ast = dev->dev_private;
48 48
49 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 49 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
50} 50}
51 51
52 52
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9f3fd7d96a69..75baff657e43 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1528 if (!intel_gvt_ggtt_validate_range(vgpu, 1528 if (!intel_gvt_ggtt_validate_range(vgpu,
1529 workload->wa_ctx.indirect_ctx.guest_gma, 1529 workload->wa_ctx.indirect_ctx.guest_gma,
1530 workload->wa_ctx.indirect_ctx.size)) { 1530 workload->wa_ctx.indirect_ctx.size)) {
1531 kmem_cache_free(s->workloads, workload);
1532 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", 1531 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1533 workload->wa_ctx.indirect_ctx.guest_gma); 1532 workload->wa_ctx.indirect_ctx.guest_gma);
1533 kmem_cache_free(s->workloads, workload);
1534 return ERR_PTR(-EINVAL); 1534 return ERR_PTR(-EINVAL);
1535 } 1535 }
1536 } 1536 }
@@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1542 if (!intel_gvt_ggtt_validate_range(vgpu, 1542 if (!intel_gvt_ggtt_validate_range(vgpu,
1543 workload->wa_ctx.per_ctx.guest_gma, 1543 workload->wa_ctx.per_ctx.guest_gma,
1544 CACHELINE_BYTES)) { 1544 CACHELINE_BYTES)) {
1545 kmem_cache_free(s->workloads, workload);
1546 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", 1545 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1547 workload->wa_ctx.per_ctx.guest_gma); 1546 workload->wa_ctx.per_ctx.guest_gma);
1547 kmem_cache_free(s->workloads, workload);
1548 return ERR_PTR(-EINVAL); 1548 return ERR_PTR(-EINVAL);
1549 } 1549 }
1550 } 1550 }
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 126703816794..5c36c75232e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
772 int slots; 772 int slots;
773 773
774 /* When restoring duplicated states, we need to make sure that the 774 if (crtc_state->mode_changed || crtc_state->connectors_changed) {
775 * bw remains the same and avoid recalculating it, as the connector's 775 /*
776 * bpc may have changed after the state was duplicated 776 * When restoring duplicated states, we need to make sure that
777 */ 777 * the bw remains the same and avoid recalculating it, as the
778 if (!state->duplicated) 778 * connector's bpc may have changed after the state was
779 asyh->dp.pbn = 779 * duplicated
780 drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, 780 */
781 connector->display_info.bpc * 3); 781 if (!state->duplicated) {
782 const int bpp = connector->display_info.bpc * 3;
783 const int clock = crtc_state->adjusted_mode.clock;
784
785 asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
786 }
782 787
783 if (crtc_state->mode_changed) {
784 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, 788 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
785 mstc->port, 789 mstc->port,
786 asyh->dp.pbn); 790 asyh->dp.pbn);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..671c90f34ede 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
95 rmb(); /* for list_empty to work without lock */ 95 rmb(); /* for list_empty to work without lock */
96 96
97 if (list_empty(&entity->list) || 97 if (list_empty(&entity->list) ||
98 spsc_queue_peek(&entity->job_queue) == NULL) 98 spsc_queue_count(&entity->job_queue) == 0)
99 return true; 99 return true;
100 100
101 return false; 101 return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
281 /* Consumption of existing IBs wasn't completed. Forcefully 281 /* Consumption of existing IBs wasn't completed. Forcefully
282 * remove them here. 282 * remove them here.
283 */ 283 */
284 if (spsc_queue_peek(&entity->job_queue)) { 284 if (spsc_queue_count(&entity->job_queue)) {
285 if (sched) { 285 if (sched) {
286 /* Park the kernel for a moment to make sure it isn't processing 286 /* Park the kernel for a moment to make sure it isn't processing
287 * our enity. 287 * our enity.
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 999f80a63bff..e70783e33680 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2 2
3#undef TRACE_SYSTEM 3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM hyperv 4#define TRACE_SYSTEM hyperv
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 574c16004cb2..13d9b141daaa 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures 3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
4 * 4 *
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index e9381babc84c..7dfc0431333b 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Intel(R) Trace Hub PTI output data structures 3 * Intel(R) Trace Hub PTI output data structures
4 * 4 *
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 35b302d983e0..959d4912ec0d 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -69,6 +69,7 @@ struct em_i2c_device {
69 struct completion msg_done; 69 struct completion msg_done;
70 struct clk *sclk; 70 struct clk *sclk;
71 struct i2c_client *slave; 71 struct i2c_client *slave;
72 int irq;
72}; 73};
73 74
74static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) 75static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
339 340
340 writeb(0, priv->base + I2C_OFS_SVA0); 341 writeb(0, priv->base + I2C_OFS_SVA0);
341 342
343 /*
344 * Wait for interrupt to finish. New slave irqs cannot happen because we
345 * cleared the slave address and, thus, only extension codes will be
346 * detected which do not use the slave ptr.
347 */
348 synchronize_irq(priv->irq);
342 priv->slave = NULL; 349 priv->slave = NULL;
343 350
344 return 0; 351 return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
355{ 362{
356 struct em_i2c_device *priv; 363 struct em_i2c_device *priv;
357 struct resource *r; 364 struct resource *r;
358 int irq, ret; 365 int ret;
359 366
360 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 367 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
361 if (!priv) 368 if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
390 397
391 em_i2c_reset(&priv->adap); 398 em_i2c_reset(&priv->adap);
392 399
393 irq = platform_get_irq(pdev, 0); 400 priv->irq = platform_get_irq(pdev, 0);
394 ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0, 401 ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
395 "em_i2c", priv); 402 "em_i2c", priv);
396 if (ret) 403 if (ret)
397 goto err_clk; 404 goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
401 if (ret) 408 if (ret)
402 goto err_clk; 409 goto err_clk;
403 410
404 dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq); 411 dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
412 priv->irq);
405 413
406 return 0; 414 return 0;
407 415
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b1b8b938d7f4..15f6cde6452f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
273} 273}
274 274
275/* Functions for DMA support */ 275/* Functions for DMA support */
276static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, 276static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
277 dma_addr_t phy_addr) 277 dma_addr_t phy_addr)
278{ 278{
279 struct imx_i2c_dma *dma; 279 struct imx_i2c_dma *dma;
280 struct dma_slave_config dma_sconfig; 280 struct dma_slave_config dma_sconfig;
@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
283 283
284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
285 if (!dma) 285 if (!dma)
286 return -ENOMEM; 286 return;
287 287
288 dma->chan_tx = dma_request_chan(dev, "tx"); 288 dma->chan_tx = dma_request_chan(dev, "tx");
289 if (IS_ERR(dma->chan_tx)) { 289 if (IS_ERR(dma->chan_tx)) {
@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", 328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); 329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
330 330
331 return 0; 331 return;
332 332
333fail_rx: 333fail_rx:
334 dma_release_channel(dma->chan_rx); 334 dma_release_channel(dma->chan_rx);
@@ -336,8 +336,6 @@ fail_tx:
336 dma_release_channel(dma->chan_tx); 336 dma_release_channel(dma->chan_tx);
337fail_al: 337fail_al:
338 devm_kfree(dev, dma); 338 devm_kfree(dev, dma);
339 /* return successfully if there is no dma support */
340 return ret == -ENODEV ? 0 : ret;
341} 339}
342 340
343static void i2c_imx_dma_callback(void *arg) 341static void i2c_imx_dma_callback(void *arg)
@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
1165 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); 1163 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
1166 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", 1164 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
1167 i2c_imx->adapter.name); 1165 i2c_imx->adapter.name);
1166 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1168 1167
1169 /* Init DMA config if supported */ 1168 /* Init DMA config if supported */
1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr); 1169 i2c_imx_dma_request(i2c_imx, phy_addr);
1171 if (ret < 0)
1172 goto del_adapter;
1173 1170
1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1175 return 0; /* Return OK */ 1171 return 0; /* Return OK */
1176 1172
1177del_adapter:
1178 i2c_del_adapter(&i2c_imx->adapter);
1179clk_notifier_unregister: 1173clk_notifier_unregister:
1180 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); 1174 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
1181rpm_disable: 1175rpm_disable:
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d39a4606f72d..531c01100b56 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
139 enum dma_data_direction dma_direction; 139 enum dma_data_direction dma_direction;
140 140
141 struct reset_control *rstc; 141 struct reset_control *rstc;
142 int irq;
142}; 143};
143 144
144#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) 145#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
861 862
862 WARN_ON(!priv->slave); 863 WARN_ON(!priv->slave);
863 864
865 /* disable irqs and ensure none is running before clearing ptr */
864 rcar_i2c_write(priv, ICSIER, 0); 866 rcar_i2c_write(priv, ICSIER, 0);
865 rcar_i2c_write(priv, ICSCR, 0); 867 rcar_i2c_write(priv, ICSCR, 0);
866 868
869 synchronize_irq(priv->irq);
867 priv->slave = NULL; 870 priv->slave = NULL;
868 871
869 pm_runtime_put(rcar_i2c_priv_to_dev(priv)); 872 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
918 struct i2c_adapter *adap; 921 struct i2c_adapter *adap;
919 struct device *dev = &pdev->dev; 922 struct device *dev = &pdev->dev;
920 struct i2c_timings i2c_t; 923 struct i2c_timings i2c_t;
921 int irq, ret; 924 int ret;
922 925
923 /* Otherwise logic will break because some bytes must always use PIO */ 926 /* Otherwise logic will break because some bytes must always use PIO */
924 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length"); 927 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
@@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
984 pm_runtime_put(dev); 987 pm_runtime_put(dev);
985 988
986 989
987 irq = platform_get_irq(pdev, 0); 990 priv->irq = platform_get_irq(pdev, 0);
988 ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv); 991 ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
989 if (ret < 0) { 992 if (ret < 0) {
990 dev_err(dev, "cannot get irq %d\n", irq); 993 dev_err(dev, "cannot get irq %d\n", priv->irq);
991 goto out_pm_disable; 994 goto out_pm_disable;
992 } 995 }
993 996
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index 868755f82f88..2c21893905a3 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * i2c-stm32.h 3 * i2c-stm32.h
4 * 4 *
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 0e3c6529fc4c..da073d72f649 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
480 if (ret) 480 if (ret)
481 return ret; 481 return ret;
482 482
483 regval = ret & MAX9611_TEMP_MASK; 483 regval &= MAX9611_TEMP_MASK;
484 484
485 if ((regval > MAX9611_TEMP_MAX_POS && 485 if ((regval > MAX9611_TEMP_MAX_POS &&
486 regval < MAX9611_TEMP_MIN_NEG) || 486 regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index e48f15cc9ab5..ff82863cbf42 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
276 st->buf[0] = st->integer >> 8; 276 st->buf[0] = st->integer >> 8;
277 st->buf[1] = 0x40; /* REG12 default */ 277 st->buf[1] = 0x40; /* REG12 default */
278 st->buf[2] = 0x00; 278 st->buf[2] = 0x00;
279 st->buf[3] = st->fract2 & 0xFF; 279 st->buf[3] = st->fract1 & 0xFF;
280 st->buf[4] = st->fract2 >> 7; 280 st->buf[4] = st->fract1 >> 8;
281 st->buf[5] = st->fract2 >> 15; 281 st->buf[5] = st->fract1 >> 16;
282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) | 282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
283 ADF4371_FRAC1WORD(st->fract1 >> 23); 283 ADF4371_FRAC1WORD(st->fract1 >> 24);
284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7); 284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
285 st->buf[8] = st->mod2 & 0xFF; 285 st->buf[8] = st->mod2 & 0xFF;
286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8); 286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 45d5164e9574..b79890739a2c 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
38 int ret; 38 int ret;
39 39
40 port_counter = &dev->port_data[port].port_counter; 40 port_counter = &dev->port_data[port].port_counter;
41 if (!port_counter->hstats)
42 return -EOPNOTSUPP;
43
41 mutex_lock(&port_counter->lock); 44 mutex_lock(&port_counter->lock);
42 if (on) { 45 if (on) {
43 ret = __counter_set_mode(&port_counter->mode, 46 ret = __counter_set_mode(&port_counter->mode,
@@ -509,6 +512,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
509 if (!rdma_is_port_valid(dev, port)) 512 if (!rdma_is_port_valid(dev, port))
510 return -EINVAL; 513 return -EINVAL;
511 514
515 if (!dev->port_data[port].port_counter.hstats)
516 return -EOPNOTSUPP;
517
512 qp = rdma_counter_get_qp(dev, qp_num); 518 qp = rdma_counter_get_qp(dev, qp_num);
513 if (!qp) 519 if (!qp)
514 return -ENOENT; 520 return -ENOENT;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 783e465e7c41..87d40d1ecdde 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1952,12 +1952,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
1952 1952
1953 if (fill_nldev_handle(msg, device) || 1953 if (fill_nldev_handle(msg, device) ||
1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1955 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) 1955 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
1956 ret = -EMSGSIZE;
1956 goto err_msg; 1957 goto err_msg;
1958 }
1957 1959
1958 if ((mode == RDMA_COUNTER_MODE_AUTO) && 1960 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
1959 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) 1961 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
1962 ret = -EMSGSIZE;
1960 goto err_msg; 1963 goto err_msg;
1964 }
1961 1965
1962 nlmsg_end(msg, nlh); 1966 nlmsg_end(msg, nlh);
1963 ib_device_put(device); 1967 ib_device_put(device);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2a75c6f8d827..c0e15db34680 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
112 * prevent any further fault handling on this MR. 112 * prevent any further fault handling on this MR.
113 */ 113 */
114 ib_umem_notifier_start_account(umem_odp); 114 ib_umem_notifier_start_account(umem_odp);
115 umem_odp->dying = 1;
116 /* Make sure that the fact the umem is dying is out before we release
117 * all pending page faults. */
118 smp_wmb();
119 complete_all(&umem_odp->notifier_completion); 115 complete_all(&umem_odp->notifier_completion);
120 umem_odp->umem.context->invalidate_range( 116 umem_odp->umem.context->invalidate_range(
121 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); 117 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ec4370f99381..af5bbb35c058 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
2026 event_sub->eventfd = 2026 event_sub->eventfd =
2027 eventfd_ctx_fdget(redirect_fd); 2027 eventfd_ctx_fdget(redirect_fd);
2028 2028
2029 if (IS_ERR(event_sub)) { 2029 if (IS_ERR(event_sub->eventfd)) {
2030 err = PTR_ERR(event_sub->eventfd); 2030 err = PTR_ERR(event_sub->eventfd);
2031 event_sub->eventfd = NULL; 2031 event_sub->eventfd = NULL;
2032 goto err; 2032 goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2644 struct devx_async_event_file *ev_file = filp->private_data; 2644 struct devx_async_event_file *ev_file = filp->private_data;
2645 struct devx_event_subscription *event_sub, *event_sub_tmp; 2645 struct devx_event_subscription *event_sub, *event_sub_tmp;
2646 struct devx_async_event_data *entry, *tmp; 2646 struct devx_async_event_data *entry, *tmp;
2647 struct mlx5_ib_dev *dev = ev_file->dev;
2647 2648
2648 mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock); 2649 mutex_lock(&dev->devx_event_table.event_xa_lock);
2649 /* delete the subscriptions which are related to this FD */ 2650 /* delete the subscriptions which are related to this FD */
2650 list_for_each_entry_safe(event_sub, event_sub_tmp, 2651 list_for_each_entry_safe(event_sub, event_sub_tmp,
2651 &ev_file->subscribed_events_list, file_list) { 2652 &ev_file->subscribed_events_list, file_list) {
2652 devx_cleanup_subscription(ev_file->dev, event_sub); 2653 devx_cleanup_subscription(dev, event_sub);
2653 if (event_sub->eventfd) 2654 if (event_sub->eventfd)
2654 eventfd_ctx_put(event_sub->eventfd); 2655 eventfd_ctx_put(event_sub->eventfd);
2655 2656
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2658 kfree_rcu(event_sub, rcu); 2659 kfree_rcu(event_sub, rcu);
2659 } 2660 }
2660 2661
2661 mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock); 2662 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2662 2663
2663 /* free the pending events allocation */ 2664 /* free the pending events allocation */
2664 if (!ev_file->omit_data) { 2665 if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2670 } 2671 }
2671 2672
2672 uverbs_close_fd(filp); 2673 uverbs_close_fd(filp);
2673 put_device(&ev_file->dev->ib_dev.dev); 2674 put_device(&dev->ib_dev.dev);
2674 return 0; 2675 return 0;
2675} 2676}
2676 2677
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 81da82050d05..1d257d1b3b0d 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -579,7 +579,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
579 u32 flags) 579 u32 flags)
580{ 580{
581 int npages = 0, current_seq, page_shift, ret, np; 581 int npages = 0, current_seq, page_shift, ret, np;
582 bool implicit = false;
583 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 582 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
584 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 583 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
585 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; 584 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -594,7 +593,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
594 if (IS_ERR(odp)) 593 if (IS_ERR(odp))
595 return PTR_ERR(odp); 594 return PTR_ERR(odp);
596 mr = odp->private; 595 mr = odp->private;
597 implicit = true;
598 } else { 596 } else {
599 odp = odp_mr; 597 odp = odp_mr;
600 } 598 }
@@ -682,19 +680,15 @@ next_mr:
682 680
683out: 681out:
684 if (ret == -EAGAIN) { 682 if (ret == -EAGAIN) {
685 if (implicit || !odp->dying) { 683 unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
686 unsigned long timeout = 684
687 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); 685 if (!wait_for_completion_timeout(&odp->notifier_completion,
688 686 timeout)) {
689 if (!wait_for_completion_timeout( 687 mlx5_ib_warn(
690 &odp->notifier_completion, 688 dev,
691 timeout)) { 689 "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
692 mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", 690 current_seq, odp->notifiers_seq,
693 current_seq, odp->notifiers_seq, odp->notifiers_count); 691 odp->notifiers_count);
694 }
695 } else {
696 /* The MR is being killed, kill the QP as well. */
697 ret = -EFAULT;
698 } 692 }
699 } 693 }
700 694
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index dace276aea14..b622fc62f2cd 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,6 @@
1config RDMA_SIW 1config RDMA_SIW
2 tristate "Software RDMA over TCP/IP (iWARP) driver" 2 tristate "Software RDMA over TCP/IP (iWARP) driver"
3 depends on INET && INFINIBAND && LIBCRC32C && 64BIT 3 depends on INET && INFINIBAND && LIBCRC32C
4 select DMA_VIRT_OPS 4 select DMA_VIRT_OPS
5 help 5 help
6 This driver implements the iWARP RDMA transport over 6 This driver implements the iWARP RDMA transport over
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 03fd7b2f595f..77b1aabf6ff3 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -214,7 +214,7 @@ struct siw_wqe {
214struct siw_cq { 214struct siw_cq {
215 struct ib_cq base_cq; 215 struct ib_cq base_cq;
216 spinlock_t lock; 216 spinlock_t lock;
217 u64 *notify; 217 struct siw_cq_ctrl *notify;
218 struct siw_cqe *queue; 218 struct siw_cqe *queue;
219 u32 cq_put; 219 u32 cq_put;
220 u32 cq_get; 220 u32 cq_get;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index d0f140daf659..05a92f997f60 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
160 160
161out_err: 161out_err:
162 siw_cpu_info.num_nodes = 0; 162 siw_cpu_info.num_nodes = 0;
163 while (i) { 163 while (--i >= 0)
164 kfree(siw_cpu_info.tx_valid_cpus[i]); 164 kfree(siw_cpu_info.tx_valid_cpus[i]);
165 siw_cpu_info.tx_valid_cpus[i--] = NULL;
166 }
167 kfree(siw_cpu_info.tx_valid_cpus); 165 kfree(siw_cpu_info.tx_valid_cpus);
168 siw_cpu_info.tx_valid_cpus = NULL; 166 siw_cpu_info.tx_valid_cpus = NULL;
169 167
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index e27bd5b35b96..0990307c5d2c 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1013,18 +1013,24 @@ out:
1013 */ 1013 */
1014static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) 1014static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
1015{ 1015{
1016 u64 cq_notify; 1016 u32 cq_notify;
1017 1017
1018 if (!cq->base_cq.comp_handler) 1018 if (!cq->base_cq.comp_handler)
1019 return false; 1019 return false;
1020 1020
1021 cq_notify = READ_ONCE(*cq->notify); 1021 /* Read application shared notification state */
1022 cq_notify = READ_ONCE(cq->notify->flags);
1022 1023
1023 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || 1024 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
1024 ((cq_notify & SIW_NOTIFY_SOLICITED) && 1025 ((cq_notify & SIW_NOTIFY_SOLICITED) &&
1025 (flags & SIW_WQE_SOLICITED))) { 1026 (flags & SIW_WQE_SOLICITED))) {
1026 /* dis-arm CQ */ 1027 /*
1027 smp_store_mb(*cq->notify, SIW_NOTIFY_NOT); 1028 * CQ notification is one-shot: Since the
1029 * current CQE causes user notification,
1030 * the CQ gets dis-aremd and must be re-aremd
1031 * by the user for a new notification.
1032 */
1033 WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
1028 1034
1029 return true; 1035 return true;
1030 } 1036 }
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 32dc79d0e898..e7f3a2379d9d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1049 1049
1050 spin_lock_init(&cq->lock); 1050 spin_lock_init(&cq->lock);
1051 1051
1052 cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify; 1052 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1053 1053
1054 if (udata) { 1054 if (udata) {
1055 struct siw_uresp_create_cq uresp = {}; 1055 struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags); 1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1142 1142
1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1144 /* CQ event for next solicited completion */ 1144 /*
1145 smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED); 1145 * Enable CQ event for next solicited completion.
1146 * and make it visible to all associated producers.
1147 */
1148 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1146 else 1149 else
1147 /* CQ event for any signalled completion */ 1150 /*
1148 smp_store_mb(*cq->notify, SIW_NOTIFY_ALL); 1151 * Enable CQ event for any signalled completion.
1152 * and make it visible to all associated producers.
1153 */
1154 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1149 1155
1150 if (flags & IB_CQ_REPORT_MISSED_EVENTS) 1156 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1151 return cq->cq_put - cq->cq_get; 1157 return cq->cq_put - cq->cq_get;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..c5c93e48b4db 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
1186 ste_live = true; 1186 ste_live = true;
1187 break; 1187 break;
1188 case STRTAB_STE_0_CFG_ABORT: 1188 case STRTAB_STE_0_CFG_ABORT:
1189 if (disable_bypass) 1189 BUG_ON(!disable_bypass);
1190 break; 1190 break;
1191 default: 1191 default:
1192 BUG(); /* STE corruption */ 1192 BUG(); /* STE corruption */
1193 } 1193 }
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..d991d40f797f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
459{ 459{
460 struct iommu_domain *domain = iommu_get_dma_domain(dev); 460 struct iommu_domain *domain = iommu_get_dma_domain(dev);
461 struct iommu_dma_cookie *cookie = domain->iova_cookie; 461 struct iommu_dma_cookie *cookie = domain->iova_cookie;
462 size_t iova_off = 0; 462 struct iova_domain *iovad = &cookie->iovad;
463 size_t iova_off = iova_offset(iovad, phys);
463 dma_addr_t iova; 464 dma_addr_t iova;
464 465
465 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { 466 size = iova_align(iovad, size + iova_off);
466 iova_off = iova_offset(&cookie->iovad, phys);
467 size = iova_align(&cookie->iovad, size + iova_off);
468 }
469 467
470 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 468 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
471 if (!iova) 469 if (!iova)
@@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
574 struct iova_domain *iovad = &cookie->iovad; 572 struct iova_domain *iovad = &cookie->iovad;
575 bool coherent = dev_is_dma_coherent(dev); 573 bool coherent = dev_is_dma_coherent(dev);
576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 574 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
577 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 575 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 576 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
579 struct page **pages; 577 struct page **pages;
580 struct sg_table sgt; 578 struct sg_table sgt;
@@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
764 * - and wouldn't make the resulting output segment too long 762 * - and wouldn't make the resulting output segment too long
765 */ 763 */
766 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 764 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
767 (cur_len + s_length <= max_len)) { 765 (max_len - cur_len >= s_length)) {
768 /* ...then concatenate it with the previous one */ 766 /* ...then concatenate it with the previous one */
769 cur_len += s_length; 767 cur_len += s_length;
770 } else { 768 } else {
@@ -975,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
975 return NULL; 973 return NULL;
976 974
977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 975 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
978 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 976 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
979 977
980 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 978 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
981 VM_USERMAP, prot, __builtin_return_address(0)); 979 VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1035 unsigned long pfn, off = vma->vm_pgoff; 1033 unsigned long pfn, off = vma->vm_pgoff;
1036 int ret; 1034 int ret;
1037 1035
1038 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 1036 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1039 1037
1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1038 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1041 return ret; 1039 return ret;
@@ -1147,16 +1145,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1147 if (!msi_page) 1145 if (!msi_page)
1148 return NULL; 1146 return NULL;
1149 1147
1150 iova = __iommu_dma_map(dev, msi_addr, size, prot); 1148 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1151 if (iova == DMA_MAPPING_ERROR) 1149 if (!iova)
1152 goto out_free_page; 1150 goto out_free_page;
1153 1151
1152 if (iommu_map(domain, iova, msi_addr, size, prot))
1153 goto out_free_iova;
1154
1154 INIT_LIST_HEAD(&msi_page->list); 1155 INIT_LIST_HEAD(&msi_page->list);
1155 msi_page->phys = msi_addr; 1156 msi_page->phys = msi_addr;
1156 msi_page->iova = iova; 1157 msi_page->iova = iova;
1157 list_add(&msi_page->list, &cookie->msi_page_list); 1158 list_add(&msi_page->list, &cookie->msi_page_list);
1158 return msi_page; 1159 return msi_page;
1159 1160
1161out_free_iova:
1162 iommu_dma_free_iova(cookie, iova, size);
1160out_free_page: 1163out_free_page:
1161 kfree(msi_page); 1164 kfree(msi_page);
1162 return NULL; 1165 return NULL;
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 2b25d9c59336..471f05d452e0 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
235 tbl_wlk.ctx_entry = context; 235 tbl_wlk.ctx_entry = context;
236 m->private = &tbl_wlk; 236 m->private = &tbl_wlk;
237 237
238 if (pasid_supported(iommu) && is_pasid_enabled(context)) { 238 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
240 pasid_dir_size = get_pasid_dir_size(context); 240 pasid_dir_size = get_pasid_dir_size(context);
241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); 241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bdaed2da8a55..12d094d08c0a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
3449 dmar_domain = to_dmar_domain(domain); 3449 dmar_domain = to_dmar_domain(domain);
3450 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 3450 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
3451 } 3451 }
3452 dmar_remove_one_dev_info(dev);
3452 get_private_domain_for_dev(dev); 3453 get_private_domain_for_dev(dev);
3453 } 3454 }
3454 3455
@@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4790 4791
4791 /* free the private domain */ 4792 /* free the private domain */
4792 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && 4793 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
4793 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) 4794 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
4795 list_empty(&domain->devices))
4794 domain_exit(info->domain); 4796 domain_exit(info->domain);
4795 4797
4796 free_devinfo_mem(info); 4798 free_devinfo_mem(info);
@@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
4803 4805
4804 spin_lock_irqsave(&device_domain_lock, flags); 4806 spin_lock_irqsave(&device_domain_lock, flags);
4805 info = dev->archdata.iommu; 4807 info = dev->archdata.iommu;
4806 __dmar_remove_one_dev_info(info); 4808 if (info)
4809 __dmar_remove_one_dev_info(info);
4807 spin_unlock_irqrestore(&device_domain_lock, flags); 4810 spin_unlock_irqrestore(&device_domain_lock, flags);
4808} 4811}
4809 4812
@@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
5281 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) { 5284 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
5282 ret = iommu_request_dm_for_dev(dev); 5285 ret = iommu_request_dm_for_dev(dev);
5283 if (ret) { 5286 if (ret) {
5287 dmar_remove_one_dev_info(dev);
5284 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5288 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5285 domain_add_dev_info(si_domain, dev); 5289 domain_add_dev_info(si_domain, dev);
5286 dev_info(dev, 5290 dev_info(dev,
@@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
5291 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) { 5295 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
5292 ret = iommu_request_dma_domain_for_dev(dev); 5296 ret = iommu_request_dma_domain_for_dev(dev);
5293 if (ret) { 5297 if (ret) {
5298 dmar_remove_one_dev_info(dev);
5294 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5299 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5295 if (!get_private_domain_for_dev(dev)) { 5300 if (!get_private_domain_for_dev(dev)) {
5296 dev_warn(dev, 5301 dev_warn(dev,
@@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
5316 if (!iommu) 5321 if (!iommu)
5317 return; 5322 return;
5318 5323
5324 dmar_remove_one_dev_info(dev);
5325
5319 iommu_group_remove_device(dev); 5326 iommu_group_remove_device(dev);
5320 5327
5321 iommu_device_unlink(&iommu->iommu, dev); 5328 iommu_device_unlink(&iommu->iommu, dev);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5da59c1..11ec048929e8 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
253 */ 253 */
254 254
255 pixsize = vout->bpp * vout->vrfb_bpp; 255 pixsize = vout->bpp * vout->vrfb_bpp;
256 dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - 256 dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
257 (vout->pix.width * vout->bpp)) + 1;
258 257
259 xt->src_start = vout->buf_phy_addr[vb->i]; 258 xt->src_start = vout->buf_phy_addr[vb->i];
260 xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; 259 xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6abfc8e92fcc..16900357afc2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -465,6 +465,7 @@ config PCI_ENDPOINT_TEST
465 465
466config XILINX_SDFEC 466config XILINX_SDFEC
467 tristate "Xilinx SDFEC 16" 467 tristate "Xilinx SDFEC 16"
468 depends on HAS_IOMEM
468 help 469 help
469 This option enables support for the Xilinx SDFEC (Soft Decision 470 This option enables support for the Xilinx SDFEC (Soft Decision
470 Forward Error Correction) driver. This enables a char driver 471 Forward Error Correction) driver. This enables a char driver
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 0c4894dd9c02..7a8f9d0b71b5 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -970,7 +970,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
971 if (rc) { 971 if (rc) {
972 dev_err(hdev->dev, "failed to initialize kernel context\n"); 972 dev_err(hdev->dev, "failed to initialize kernel context\n");
973 goto free_ctx; 973 kfree(hdev->kernel_ctx);
974 goto mmu_fini;
974 } 975 }
975 976
976 rc = hl_cb_pool_init(hdev); 977 rc = hl_cb_pool_init(hdev);
@@ -1053,8 +1054,6 @@ release_ctx:
1053 if (hl_ctx_put(hdev->kernel_ctx) != 1) 1054 if (hl_ctx_put(hdev->kernel_ctx) != 1)
1054 dev_err(hdev->dev, 1055 dev_err(hdev->dev,
1055 "kernel ctx is still alive on initialization failure\n"); 1056 "kernel ctx is still alive on initialization failure\n");
1056free_ctx:
1057 kfree(hdev->kernel_ctx);
1058mmu_fini: 1057mmu_fini:
1059 hl_mmu_fini(hdev); 1058 hl_mmu_fini(hdev);
1060eq_fini: 1059eq_fini:
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index a0e181714891..271c5c8f53b4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -2729,9 +2729,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE); 2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2730} 2730}
2731 2731
2732void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) 2732void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2733{ 2733{
2734 /* Not needed in Goya */ 2734 /* The QMANs are on the SRAM so need to copy to IO space */
2735 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2735} 2736}
2736 2737
2737static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, 2738static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
@@ -3313,9 +3314,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3313 int rc; 3314 int rc;
3314 3315
3315 dev_dbg(hdev->dev, "DMA packet details:\n"); 3316 dev_dbg(hdev->dev, "DMA packet details:\n");
3316 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3317 dev_dbg(hdev->dev, "source == 0x%llx\n",
3317 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3318 le64_to_cpu(user_dma_pkt->src_addr));
3318 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3319 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3320 le64_to_cpu(user_dma_pkt->dst_addr));
3321 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3319 3322
3320 ctl = le32_to_cpu(user_dma_pkt->ctl); 3323 ctl = le32_to_cpu(user_dma_pkt->ctl);
3321 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> 3324 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
@@ -3344,9 +3347,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3344 struct packet_lin_dma *user_dma_pkt) 3347 struct packet_lin_dma *user_dma_pkt)
3345{ 3348{
3346 dev_dbg(hdev->dev, "DMA packet details:\n"); 3349 dev_dbg(hdev->dev, "DMA packet details:\n");
3347 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3350 dev_dbg(hdev->dev, "source == 0x%llx\n",
3348 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3351 le64_to_cpu(user_dma_pkt->src_addr));
3349 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3352 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3353 le64_to_cpu(user_dma_pkt->dst_addr));
3354 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3350 3355
3351 /* 3356 /*
3352 * WA for HW-23. 3357 * WA for HW-23.
@@ -3386,7 +3391,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
3386 3391
3387 dev_dbg(hdev->dev, "WREG32 packet details:\n"); 3392 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3388 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); 3393 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3389 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); 3394 dev_dbg(hdev->dev, "value == 0x%x\n",
3395 le32_to_cpu(wreg_pkt->value));
3390 3396
3391 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { 3397 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3392 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", 3398 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
@@ -3428,12 +3434,13 @@ static int goya_validate_cb(struct hl_device *hdev,
3428 while (cb_parsed_length < parser->user_cb_size) { 3434 while (cb_parsed_length < parser->user_cb_size) {
3429 enum packet_id pkt_id; 3435 enum packet_id pkt_id;
3430 u16 pkt_size; 3436 u16 pkt_size;
3431 void *user_pkt; 3437 struct goya_packet *user_pkt;
3432 3438
3433 user_pkt = (void *) (uintptr_t) 3439 user_pkt = (struct goya_packet *) (uintptr_t)
3434 (parser->user_cb->kernel_address + cb_parsed_length); 3440 (parser->user_cb->kernel_address + cb_parsed_length);
3435 3441
3436 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3442 pkt_id = (enum packet_id) (
3443 (le64_to_cpu(user_pkt->header) &
3437 PACKET_HEADER_PACKET_ID_MASK) >> 3444 PACKET_HEADER_PACKET_ID_MASK) >>
3438 PACKET_HEADER_PACKET_ID_SHIFT); 3445 PACKET_HEADER_PACKET_ID_SHIFT);
3439 3446
@@ -3453,7 +3460,8 @@ static int goya_validate_cb(struct hl_device *hdev,
3453 * need to validate here as well because patch_cb() is 3460 * need to validate here as well because patch_cb() is
3454 * not called in MMU path while this function is called 3461 * not called in MMU path while this function is called
3455 */ 3462 */
3456 rc = goya_validate_wreg32(hdev, parser, user_pkt); 3463 rc = goya_validate_wreg32(hdev,
3464 parser, (struct packet_wreg32 *) user_pkt);
3457 break; 3465 break;
3458 3466
3459 case PACKET_WREG_BULK: 3467 case PACKET_WREG_BULK:
@@ -3481,10 +3489,10 @@ static int goya_validate_cb(struct hl_device *hdev,
3481 case PACKET_LIN_DMA: 3489 case PACKET_LIN_DMA:
3482 if (is_mmu) 3490 if (is_mmu)
3483 rc = goya_validate_dma_pkt_mmu(hdev, parser, 3491 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3484 user_pkt); 3492 (struct packet_lin_dma *) user_pkt);
3485 else 3493 else
3486 rc = goya_validate_dma_pkt_no_mmu(hdev, parser, 3494 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3487 user_pkt); 3495 (struct packet_lin_dma *) user_pkt);
3488 break; 3496 break;
3489 3497
3490 case PACKET_MSG_LONG: 3498 case PACKET_MSG_LONG:
@@ -3657,15 +3665,16 @@ static int goya_patch_cb(struct hl_device *hdev,
3657 enum packet_id pkt_id; 3665 enum packet_id pkt_id;
3658 u16 pkt_size; 3666 u16 pkt_size;
3659 u32 new_pkt_size = 0; 3667 u32 new_pkt_size = 0;
3660 void *user_pkt, *kernel_pkt; 3668 struct goya_packet *user_pkt, *kernel_pkt;
3661 3669
3662 user_pkt = (void *) (uintptr_t) 3670 user_pkt = (struct goya_packet *) (uintptr_t)
3663 (parser->user_cb->kernel_address + cb_parsed_length); 3671 (parser->user_cb->kernel_address + cb_parsed_length);
3664 kernel_pkt = (void *) (uintptr_t) 3672 kernel_pkt = (struct goya_packet *) (uintptr_t)
3665 (parser->patched_cb->kernel_address + 3673 (parser->patched_cb->kernel_address +
3666 cb_patched_cur_length); 3674 cb_patched_cur_length);
3667 3675
3668 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3676 pkt_id = (enum packet_id) (
3677 (le64_to_cpu(user_pkt->header) &
3669 PACKET_HEADER_PACKET_ID_MASK) >> 3678 PACKET_HEADER_PACKET_ID_MASK) >>
3670 PACKET_HEADER_PACKET_ID_SHIFT); 3679 PACKET_HEADER_PACKET_ID_SHIFT);
3671 3680
@@ -3680,15 +3689,18 @@ static int goya_patch_cb(struct hl_device *hdev,
3680 3689
3681 switch (pkt_id) { 3690 switch (pkt_id) {
3682 case PACKET_LIN_DMA: 3691 case PACKET_LIN_DMA:
3683 rc = goya_patch_dma_packet(hdev, parser, user_pkt, 3692 rc = goya_patch_dma_packet(hdev, parser,
3684 kernel_pkt, &new_pkt_size); 3693 (struct packet_lin_dma *) user_pkt,
3694 (struct packet_lin_dma *) kernel_pkt,
3695 &new_pkt_size);
3685 cb_patched_cur_length += new_pkt_size; 3696 cb_patched_cur_length += new_pkt_size;
3686 break; 3697 break;
3687 3698
3688 case PACKET_WREG_32: 3699 case PACKET_WREG_32:
3689 memcpy(kernel_pkt, user_pkt, pkt_size); 3700 memcpy(kernel_pkt, user_pkt, pkt_size);
3690 cb_patched_cur_length += pkt_size; 3701 cb_patched_cur_length += pkt_size;
3691 rc = goya_validate_wreg32(hdev, parser, kernel_pkt); 3702 rc = goya_validate_wreg32(hdev, parser,
3703 (struct packet_wreg32 *) kernel_pkt);
3692 break; 3704 break;
3693 3705
3694 case PACKET_WREG_BULK: 3706 case PACKET_WREG_BULK:
@@ -4352,6 +4364,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4352 size_t total_pkt_size; 4364 size_t total_pkt_size;
4353 long result; 4365 long result;
4354 int rc; 4366 int rc;
4367 int irq_num_entries, irq_arr_index;
4368 __le32 *goya_irq_arr;
4355 4369
4356 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) + 4370 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4357 irq_arr_size; 4371 irq_arr_size;
@@ -4369,8 +4383,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4369 if (!pkt) 4383 if (!pkt)
4370 return -ENOMEM; 4384 return -ENOMEM;
4371 4385
4372 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); 4386 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4373 memcpy(&pkt->irqs, irq_arr, irq_arr_size); 4387 pkt->length = cpu_to_le32(irq_num_entries);
4388
4389 /* We must perform any necessary endianness conversation on the irq
4390 * array being passed to the goya hardware
4391 */
4392 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4393 irq_arr_index < irq_num_entries ; irq_arr_index++)
4394 goya_irq_arr[irq_arr_index] =
4395 cpu_to_le32(irq_arr[irq_arr_index]);
4374 4396
4375 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << 4397 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4376 ARMCP_PKT_CTL_OPCODE_SHIFT); 4398 ARMCP_PKT_CTL_OPCODE_SHIFT);
@@ -5042,7 +5064,7 @@ static const struct hl_asic_funcs goya_funcs = {
5042 .resume = goya_resume, 5064 .resume = goya_resume,
5043 .cb_mmap = goya_cb_mmap, 5065 .cb_mmap = goya_cb_mmap,
5044 .ring_doorbell = goya_ring_doorbell, 5066 .ring_doorbell = goya_ring_doorbell,
5045 .flush_pq_write = goya_flush_pq_write, 5067 .pqe_write = goya_pqe_write,
5046 .asic_dma_alloc_coherent = goya_dma_alloc_coherent, 5068 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5047 .asic_dma_free_coherent = goya_dma_free_coherent, 5069 .asic_dma_free_coherent = goya_dma_free_coherent,
5048 .get_int_queue_base = goya_get_int_queue_base, 5070 .get_int_queue_base = goya_get_int_queue_base,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index f8c611883dc1..d7f48c9c41cd 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -177,7 +177,7 @@ int goya_late_init(struct hl_device *hdev);
177void goya_late_fini(struct hl_device *hdev); 177void goya_late_fini(struct hl_device *hdev);
178 178
179void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 179void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
180void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); 180void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
181void goya_update_eq_ci(struct hl_device *hdev, u32 val); 181void goya_update_eq_ci(struct hl_device *hdev, u32 val);
182void goya_restore_phase_topology(struct hl_device *hdev); 182void goya_restore_phase_topology(struct hl_device *hdev);
183int goya_context_switch(struct hl_device *hdev, u32 asid); 183int goya_context_switch(struct hl_device *hdev, u32 asid);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 6a4c64b97f38..ce83adafcf2d 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -441,7 +441,11 @@ enum hl_pll_frequency {
441 * @resume: handles IP specific H/W or SW changes for resume. 441 * @resume: handles IP specific H/W or SW changes for resume.
442 * @cb_mmap: maps a CB. 442 * @cb_mmap: maps a CB.
443 * @ring_doorbell: increment PI on a given QMAN. 443 * @ring_doorbell: increment PI on a given QMAN.
444 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. 444 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
445 * function because the PQs are located in different memory areas
446 * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
447 * writing the PQE must match the destination memory area
448 * properties.
445 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling 449 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
446 * dma_alloc_coherent(). This is ASIC function because 450 * dma_alloc_coherent(). This is ASIC function because
447 * its implementation is not trivial when the driver 451 * its implementation is not trivial when the driver
@@ -510,7 +514,8 @@ struct hl_asic_funcs {
510 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, 514 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
511 u64 kaddress, phys_addr_t paddress, u32 size); 515 u64 kaddress, phys_addr_t paddress, u32 size);
512 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 516 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
513 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); 517 void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
518 struct hl_bd *bd);
514 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size, 519 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
515 dma_addr_t *dma_handle, gfp_t flag); 520 dma_addr_t *dma_handle, gfp_t flag);
516 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, 521 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index e3b5517897ea..5f5673b74985 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
290 struct hl_device *hdev = job->cs->ctx->hdev; 290 struct hl_device *hdev = job->cs->ctx->hdev;
291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
292 struct hl_bd bd; 292 struct hl_bd bd;
293 u64 *pi, *pbd = (u64 *) &bd; 293 __le64 *pi;
294 294
295 bd.ctl = 0; 295 bd.ctl = 0;
296 bd.len = __cpu_to_le32(job->job_cb_size); 296 bd.len = cpu_to_le32(job->job_cb_size);
297 bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb); 297 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
298 298
299 pi = (u64 *) (uintptr_t) (q->kernel_address + 299 pi = (__le64 *) (uintptr_t) (q->kernel_address +
300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); 300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
301 301
302 pi[0] = pbd[0];
303 pi[1] = pbd[1];
304
305 q->pi++; 302 q->pi++;
306 q->pi &= ((q->int_queue_len << 1) - 1); 303 q->pi &= ((q->int_queue_len << 1) - 1);
307 304
308 /* Flush PQ entry write. Relevant only for specific ASICs */ 305 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
309 hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
310 306
311 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 307 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
312} 308}
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index a14407b975e4..ef54bad20509 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -52,6 +52,19 @@ enum goya_dma_direction {
52#define GOYA_PKT_CTL_MB_SHIFT 31 52#define GOYA_PKT_CTL_MB_SHIFT 31
53#define GOYA_PKT_CTL_MB_MASK 0x80000000 53#define GOYA_PKT_CTL_MB_MASK 0x80000000
54 54
55/* All packets have, at least, an 8-byte header, which contains
56 * the packet type. The kernel driver uses the packet header for packet
57 * validation and to perform any necessary required preparation before
58 * sending them off to the hardware.
59 */
60struct goya_packet {
61 __le64 header;
62 /* The rest of the packet data follows. Use the corresponding
63 * packet_XXX struct to deference the data, based on packet type
64 */
65 u8 contents[0];
66};
67
55struct packet_nop { 68struct packet_nop {
56 __le32 reserved; 69 __le32 reserved;
57 __le32 ctl; 70 __le32 ctl;
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index ea9f72ff456c..199791b57caf 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
80 struct hl_cs_job *job; 80 struct hl_cs_job *job;
81 bool shadow_index_valid; 81 bool shadow_index_valid;
82 u16 shadow_index; 82 u16 shadow_index;
83 u32 *cq_entry; 83 struct hl_cq_entry *cq_entry, *cq_base;
84 u32 *cq_base;
85 84
86 if (hdev->disabled) { 85 if (hdev->disabled) {
87 dev_dbg(hdev->dev, 86 dev_dbg(hdev->dev,
@@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
90 return IRQ_HANDLED; 89 return IRQ_HANDLED;
91 } 90 }
92 91
93 cq_base = (u32 *) (uintptr_t) cq->kernel_address; 92 cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
94 93
95 while (1) { 94 while (1) {
96 bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) 95 bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
96 CQ_ENTRY_READY_MASK)
97 >> CQ_ENTRY_READY_SHIFT); 97 >> CQ_ENTRY_READY_SHIFT);
98 98
99 if (!entry_ready) 99 if (!entry_ready)
100 break; 100 break;
101 101
102 cq_entry = (u32 *) &cq_base[cq->ci]; 102 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
103 103
104 /* 104 /* Make sure we read CQ entry contents after we've
105 * Make sure we read CQ entry contents after we've
106 * checked the ownership bit. 105 * checked the ownership bit.
107 */ 106 */
108 dma_rmb(); 107 dma_rmb();
109 108
110 shadow_index_valid = 109 shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
111 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) 110 CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); 111 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
113 112
114 shadow_index = (u16) 113 shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
115 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) 114 CQ_ENTRY_SHADOW_INDEX_MASK)
116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT); 115 >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
117 116
118 queue = &hdev->kernel_queues[cq->hw_queue_id]; 117 queue = &hdev->kernel_queues[cq->hw_queue_id];
@@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
122 queue_work(hdev->cq_wq, &job->finish_work); 121 queue_work(hdev->cq_wq, &job->finish_work);
123 } 122 }
124 123
125 /* 124 /* Update ci of the context's queue. There is no
126 * Update ci of the context's queue. There is no
127 * need to protect it with spinlock because this update is 125 * need to protect it with spinlock because this update is
128 * done only inside IRQ and there is a different IRQ per 126 * done only inside IRQ and there is a different IRQ per
129 * queue 127 * queue
@@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
131 queue->ci = hl_queue_inc_ptr(queue->ci); 129 queue->ci = hl_queue_inc_ptr(queue->ci);
132 130
133 /* Clear CQ entry ready bit */ 131 /* Clear CQ entry ready bit */
134 cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; 132 cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
133 ~CQ_ENTRY_READY_MASK);
135 134
136 cq->ci = hl_cq_inc_ptr(cq->ci); 135 cq->ci = hl_cq_inc_ptr(cq->ci);
137 136
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 42d237cae1dc..365fb0cb8dff 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
1629 dev_dbg(hdev->dev, 1629 dev_dbg(hdev->dev,
1630 "page list 0x%p of asid %d is still alive\n", 1630 "page list 0x%p of asid %d is still alive\n",
1631 phys_pg_list, ctx->asid); 1631 phys_pg_list, ctx->asid);
1632 atomic64_sub(phys_pg_list->total_size,
1633 &hdev->dram_used_mem);
1632 free_phys_pg_pack(hdev, phys_pg_list); 1634 free_phys_pg_pack(hdev, phys_pg_list);
1633 idr_remove(&vm->phys_pg_pack_handles, i); 1635 idr_remove(&vm->phys_pg_pack_handles, i);
1634 } 1636 }
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 03cc788511d5..654bdc41fc99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
3780 default: 3780 default:
3781 /* Kept only for backward compatibility purpose. */ 3781 /* Kept only for backward compatibility purpose. */
3782 params->quad_enable = spansion_quad_enable; 3782 params->quad_enable = spansion_quad_enable;
3783 if (nor->clear_sr_bp)
3784 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
3785 break; 3783 break;
3786 } 3784 }
3787 3785
@@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor)
4035 int err; 4033 int err;
4036 4034
4037 if (nor->clear_sr_bp) { 4035 if (nor->clear_sr_bp) {
4036 if (nor->quad_enable == spansion_quad_enable)
4037 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4038
4038 err = nor->clear_sr_bp(nor); 4039 err = nor->clear_sr_bp(nor);
4039 if (err) { 4040 if (err) {
4040 dev_err(nor->dev, 4041 dev_err(nor->dev,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8f3fbe5ca937..c258a1ce4b28 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1286,6 +1286,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1286 */ 1286 */
1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1288 mutex_lock(&ctrl->scan_lock); 1288 mutex_lock(&ctrl->scan_lock);
1289 mutex_lock(&ctrl->subsys->lock);
1290 nvme_mpath_start_freeze(ctrl->subsys);
1291 nvme_mpath_wait_freeze(ctrl->subsys);
1289 nvme_start_freeze(ctrl); 1292 nvme_start_freeze(ctrl);
1290 nvme_wait_freeze(ctrl); 1293 nvme_wait_freeze(ctrl);
1291 } 1294 }
@@ -1316,6 +1319,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1316 nvme_update_formats(ctrl); 1319 nvme_update_formats(ctrl);
1317 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1320 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1318 nvme_unfreeze(ctrl); 1321 nvme_unfreeze(ctrl);
1322 nvme_mpath_unfreeze(ctrl->subsys);
1323 mutex_unlock(&ctrl->subsys->lock);
1319 mutex_unlock(&ctrl->scan_lock); 1324 mutex_unlock(&ctrl->scan_lock);
1320 } 1325 }
1321 if (effects & NVME_CMD_EFFECTS_CCC) 1326 if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1715,6 +1720,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1715 if (ns->head->disk) { 1720 if (ns->head->disk) {
1716 nvme_update_disk_info(ns->head->disk, ns, id); 1721 nvme_update_disk_info(ns->head->disk, ns, id);
1717 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1722 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1723 revalidate_disk(ns->head->disk);
1718 } 1724 }
1719#endif 1725#endif
1720} 1726}
@@ -2487,6 +2493,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2487 if (ret) { 2493 if (ret) {
2488 dev_err(ctrl->device, 2494 dev_err(ctrl->device,
2489 "failed to register subsystem device.\n"); 2495 "failed to register subsystem device.\n");
2496 put_device(&subsys->dev);
2490 goto out_unlock; 2497 goto out_unlock;
2491 } 2498 }
2492 ida_init(&subsys->ns_ida); 2499 ida_init(&subsys->ns_ida);
@@ -2509,7 +2516,6 @@ out_put_subsystem:
2509 nvme_put_subsystem(subsys); 2516 nvme_put_subsystem(subsys);
2510out_unlock: 2517out_unlock:
2511 mutex_unlock(&nvme_subsystems_lock); 2518 mutex_unlock(&nvme_subsystems_lock);
2512 put_device(&subsys->dev);
2513 return ret; 2519 return ret;
2514} 2520}
2515 2521
@@ -3571,6 +3577,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3571 struct nvme_ns *ns, *next; 3577 struct nvme_ns *ns, *next;
3572 LIST_HEAD(ns_list); 3578 LIST_HEAD(ns_list);
3573 3579
3580 /*
3581 * make sure to requeue I/O to all namespaces as these
3582 * might result from the scan itself and must complete
3583 * for the scan_work to make progress
3584 */
3585 nvme_mpath_clear_ctrl_paths(ctrl);
3586
3574 /* prevent racing with ns scanning */ 3587 /* prevent racing with ns scanning */
3575 flush_work(&ctrl->scan_work); 3588 flush_work(&ctrl->scan_work);
3576 3589
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 4f0d0d12744e..888d4543894e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -12,6 +12,36 @@ module_param(multipath, bool, 0444);
12MODULE_PARM_DESC(multipath, 12MODULE_PARM_DESC(multipath,
13 "turn on native support for multiple controllers per subsystem"); 13 "turn on native support for multiple controllers per subsystem");
14 14
15void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
16{
17 struct nvme_ns_head *h;
18
19 lockdep_assert_held(&subsys->lock);
20 list_for_each_entry(h, &subsys->nsheads, entry)
21 if (h->disk)
22 blk_mq_unfreeze_queue(h->disk->queue);
23}
24
25void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
26{
27 struct nvme_ns_head *h;
28
29 lockdep_assert_held(&subsys->lock);
30 list_for_each_entry(h, &subsys->nsheads, entry)
31 if (h->disk)
32 blk_mq_freeze_queue_wait(h->disk->queue);
33}
34
35void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
36{
37 struct nvme_ns_head *h;
38
39 lockdep_assert_held(&subsys->lock);
40 list_for_each_entry(h, &subsys->nsheads, entry)
41 if (h->disk)
42 blk_freeze_queue_start(h->disk->queue);
43}
44
15/* 45/*
16 * If multipathing is enabled we need to always use the subsystem instance 46 * If multipathing is enabled we need to always use the subsystem instance
17 * number for numbering our devices to avoid conflicts between subsystems that 47 * number for numbering our devices to avoid conflicts between subsystems that
@@ -104,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
104 [NVME_ANA_CHANGE] = "change", 134 [NVME_ANA_CHANGE] = "change",
105}; 135};
106 136
107void nvme_mpath_clear_current_path(struct nvme_ns *ns) 137bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
108{ 138{
109 struct nvme_ns_head *head = ns->head; 139 struct nvme_ns_head *head = ns->head;
140 bool changed = false;
110 int node; 141 int node;
111 142
112 if (!head) 143 if (!head)
113 return; 144 goto out;
114 145
115 for_each_node(node) { 146 for_each_node(node) {
116 if (ns == rcu_access_pointer(head->current_path[node])) 147 if (ns == rcu_access_pointer(head->current_path[node])) {
117 rcu_assign_pointer(head->current_path[node], NULL); 148 rcu_assign_pointer(head->current_path[node], NULL);
149 changed = true;
150 }
118 } 151 }
152out:
153 return changed;
154}
155
156void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
157{
158 struct nvme_ns *ns;
159
160 mutex_lock(&ctrl->scan_lock);
161 list_for_each_entry(ns, &ctrl->namespaces, list)
162 if (nvme_mpath_clear_current_path(ns))
163 kblockd_schedule_work(&ns->head->requeue_work);
164 mutex_unlock(&ctrl->scan_lock);
119} 165}
120 166
121static bool nvme_path_is_disabled(struct nvme_ns *ns) 167static bool nvme_path_is_disabled(struct nvme_ns *ns)
@@ -226,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
226 return ns; 272 return ns;
227} 273}
228 274
275static bool nvme_available_path(struct nvme_ns_head *head)
276{
277 struct nvme_ns *ns;
278
279 list_for_each_entry_rcu(ns, &head->list, siblings) {
280 switch (ns->ctrl->state) {
281 case NVME_CTRL_LIVE:
282 case NVME_CTRL_RESETTING:
283 case NVME_CTRL_CONNECTING:
284 /* fallthru */
285 return true;
286 default:
287 break;
288 }
289 }
290 return false;
291}
292
229static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, 293static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
230 struct bio *bio) 294 struct bio *bio)
231{ 295{
@@ -252,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
252 disk_devt(ns->head->disk), 316 disk_devt(ns->head->disk),
253 bio->bi_iter.bi_sector); 317 bio->bi_iter.bi_sector);
254 ret = direct_make_request(bio); 318 ret = direct_make_request(bio);
255 } else if (!list_empty_careful(&head->list)) { 319 } else if (nvme_available_path(head)) {
256 dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); 320 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
257 321
258 spin_lock_irq(&head->requeue_lock); 322 spin_lock_irq(&head->requeue_lock);
259 bio_list_add(&head->requeue_list, bio); 323 bio_list_add(&head->requeue_list, bio);
260 spin_unlock_irq(&head->requeue_lock); 324 spin_unlock_irq(&head->requeue_lock);
261 } else { 325 } else {
262 dev_warn_ratelimited(dev, "no path - failing I/O\n"); 326 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
263 327
264 bio->bi_status = BLK_STS_IOERR; 328 bio->bi_status = BLK_STS_IOERR;
265 bio_endio(bio); 329 bio_endio(bio);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 26b563f9985b..778b3a0b6adb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -490,6 +490,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
490 return ctrl->ana_log_buf != NULL; 490 return ctrl->ana_log_buf != NULL;
491} 491}
492 492
493void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
494void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
495void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
493void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 496void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
494 struct nvme_ctrl *ctrl, int *flags); 497 struct nvme_ctrl *ctrl, int *flags);
495void nvme_failover_req(struct request *req); 498void nvme_failover_req(struct request *req);
@@ -500,7 +503,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
500int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 503int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
501void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 504void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
502void nvme_mpath_stop(struct nvme_ctrl *ctrl); 505void nvme_mpath_stop(struct nvme_ctrl *ctrl);
503void nvme_mpath_clear_current_path(struct nvme_ns *ns); 506bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
507void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
504struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 508struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
505 509
506static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 510static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -548,7 +552,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
548static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 552static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
549{ 553{
550} 554}
551static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 555static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
556{
557 return false;
558}
559static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
552{ 560{
553} 561}
554static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 562static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -568,6 +576,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
568static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 576static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
569{ 577{
570} 578}
579static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
580{
581}
582static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
583{
584}
585static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
586{
587}
571#endif /* CONFIG_NVME_MULTIPATH */ 588#endif /* CONFIG_NVME_MULTIPATH */
572 589
573#ifdef CONFIG_NVM 590#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index db160cee42ad..6bd9b1033965 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2695,7 +2695,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
2695{ 2695{
2696 struct nvme_dev *dev = data; 2696 struct nvme_dev *dev = data;
2697 2697
2698 nvme_reset_ctrl_sync(&dev->ctrl); 2698 flush_work(&dev->ctrl.reset_work);
2699 flush_work(&dev->ctrl.scan_work); 2699 flush_work(&dev->ctrl.scan_work);
2700 nvme_put_ctrl(&dev->ctrl); 2700 nvme_put_ctrl(&dev->ctrl);
2701} 2701}
@@ -2761,6 +2761,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2761 2761
2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2763 2763
2764 nvme_reset_ctrl(&dev->ctrl);
2764 nvme_get_ctrl(&dev->ctrl); 2765 nvme_get_ctrl(&dev->ctrl);
2765 async_schedule(nvme_async_probe, dev); 2766 async_schedule(nvme_async_probe, dev);
2766 2767
@@ -2846,7 +2847,7 @@ static int nvme_resume(struct device *dev)
2846 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 2847 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
2847 struct nvme_ctrl *ctrl = &ndev->ctrl; 2848 struct nvme_ctrl *ctrl = &ndev->ctrl;
2848 2849
2849 if (pm_resume_via_firmware() || !ctrl->npss || 2850 if (ndev->last_ps == U32_MAX ||
2850 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 2851 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
2851 nvme_reset_ctrl(ctrl); 2852 nvme_reset_ctrl(ctrl);
2852 return 0; 2853 return 0;
@@ -2859,6 +2860,8 @@ static int nvme_suspend(struct device *dev)
2859 struct nvme_ctrl *ctrl = &ndev->ctrl; 2860 struct nvme_ctrl *ctrl = &ndev->ctrl;
2860 int ret = -EBUSY; 2861 int ret = -EBUSY;
2861 2862
2863 ndev->last_ps = U32_MAX;
2864
2862 /* 2865 /*
2863 * The platform does not remove power for a kernel managed suspend so 2866 * The platform does not remove power for a kernel managed suspend so
2864 * use host managed nvme power settings for lowest idle power if 2867 * use host managed nvme power settings for lowest idle power if
@@ -2866,8 +2869,14 @@ static int nvme_suspend(struct device *dev)
2866 * shutdown. But if the firmware is involved after the suspend or the 2869 * shutdown. But if the firmware is involved after the suspend or the
2867 * device does not support any non-default power states, shut down the 2870 * device does not support any non-default power states, shut down the
2868 * device fully. 2871 * device fully.
2872 *
2873 * If ASPM is not enabled for the device, shut down the device and allow
2874 * the PCI bus layer to put it into D3 in order to take the PCIe link
2875 * down, so as to allow the platform to achieve its minimum low-power
2876 * state (which may not be possible if the link is up).
2869 */ 2877 */
2870 if (pm_suspend_via_firmware() || !ctrl->npss) { 2878 if (pm_suspend_via_firmware() || !ctrl->npss ||
2879 !pcie_aspm_enabled(pdev)) {
2871 nvme_dev_disable(ndev, true); 2880 nvme_dev_disable(ndev, true);
2872 return 0; 2881 return 0;
2873 } 2882 }
@@ -2880,7 +2889,6 @@ static int nvme_suspend(struct device *dev)
2880 ctrl->state != NVME_CTRL_ADMIN_ONLY) 2889 ctrl->state != NVME_CTRL_ADMIN_ONLY)
2881 goto unfreeze; 2890 goto unfreeze;
2882 2891
2883 ndev->last_ps = 0;
2884 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 2892 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
2885 if (ret < 0) 2893 if (ret < 0)
2886 goto unfreeze; 2894 goto unfreeze;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a249db528d54..1a6449bc547b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -562,13 +562,17 @@ out_destroy_cm_id:
562 return ret; 562 return ret;
563} 563}
564 564
565static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{
567 rdma_disconnect(queue->cm_id);
568 ib_drain_qp(queue->qp);
569}
570
565static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 571static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{ 572{
567 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) 573 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
568 return; 574 return;
569 575 __nvme_rdma_stop_queue(queue);
570 rdma_disconnect(queue->cm_id);
571 ib_drain_qp(queue->qp);
572} 576}
573 577
574static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 578static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
607 else 611 else
608 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 612 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
609 613
610 if (!ret) 614 if (!ret) {
611 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 615 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
612 else 616 } else {
617 __nvme_rdma_stop_queue(queue);
613 dev_info(ctrl->ctrl.device, 618 dev_info(ctrl->ctrl.device,
614 "failed to connect queue: %d ret=%d\n", idx, ret); 619 "failed to connect queue: %d ret=%d\n", idx, ret);
620 }
615 return ret; 621 return ret;
616} 622}
617 623
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index cd52b9f15376..98613a45bd3b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
675 675
676found: 676found:
677 list_del(&p->entry); 677 list_del(&p->entry);
678 nvmet_port_del_ctrls(port, subsys);
678 nvmet_port_disc_changed(port, subsys); 679 nvmet_port_disc_changed(port, subsys);
679 680
680 if (list_empty(&port->subsystems)) 681 if (list_empty(&port->subsystems))
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index dad0243c7c96..3a67e244e568 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
46 u16 status; 46 u16 status;
47 47
48 switch (errno) { 48 switch (errno) {
49 case 0:
50 status = NVME_SC_SUCCESS;
51 break;
49 case -ENOSPC: 52 case -ENOSPC:
50 req->error_loc = offsetof(struct nvme_rw_command, length); 53 req->error_loc = offsetof(struct nvme_rw_command, length);
51 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; 54 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
@@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
280} 283}
281EXPORT_SYMBOL_GPL(nvmet_unregister_transport); 284EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
282 285
286void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
287{
288 struct nvmet_ctrl *ctrl;
289
290 mutex_lock(&subsys->lock);
291 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
292 if (ctrl->port == port)
293 ctrl->ops->delete_ctrl(ctrl);
294 }
295 mutex_unlock(&subsys->lock);
296}
297
283int nvmet_enable_port(struct nvmet_port *port) 298int nvmet_enable_port(struct nvmet_port *port)
284{ 299{
285 const struct nvmet_fabrics_ops *ops; 300 const struct nvmet_fabrics_ops *ops;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b16dc3981c69..0940c5024a34 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
654 mutex_lock(&nvme_loop_ports_mutex); 654 mutex_lock(&nvme_loop_ports_mutex);
655 list_del_init(&port->entry); 655 list_del_init(&port->entry);
656 mutex_unlock(&nvme_loop_ports_mutex); 656 mutex_unlock(&nvme_loop_ports_mutex);
657
658 /*
659 * Ensure any ctrls that are in the process of being
660 * deleted are in fact deleted before we return
661 * and free the port. This is to prevent active
662 * ctrls from using a port after it's freed.
663 */
664 flush_workqueue(nvme_delete_wq);
657} 665}
658 666
659static const struct nvmet_fabrics_ops nvme_loop_ops = { 667static const struct nvmet_fabrics_ops nvme_loop_ops = {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ee66c610739..c51f8dd01dc4 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
418int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); 418int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
419void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); 419void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
420 420
421void nvmet_port_del_ctrls(struct nvmet_port *port,
422 struct nvmet_subsys *subsys);
423
421int nvmet_enable_port(struct nvmet_port *port); 424int nvmet_enable_port(struct nvmet_port *port);
422void nvmet_disable_port(struct nvmet_port *port); 425void nvmet_disable_port(struct nvmet_port *port);
423 426
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7f84bb4903ca..a296eaf52a5b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
277 * of_irq_parse_one - Resolve an interrupt for a device 277 * of_irq_parse_one - Resolve an interrupt for a device
278 * @device: the device whose interrupt is to be resolved 278 * @device: the device whose interrupt is to be resolved
279 * @index: index of the interrupt to resolve 279 * @index: index of the interrupt to resolve
280 * @out_irq: structure of_irq filled by this function 280 * @out_irq: structure of_phandle_args filled by this function
281 * 281 *
282 * This function resolves an interrupt for a node by walking the interrupt tree, 282 * This function resolves an interrupt for a node by walking the interrupt tree,
283 * finding which interrupt controller node it is attached to, and returning the 283 * finding which interrupt controller node it is attached to, and returning the
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index c1b67dd7cd6e..83c766233181 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
206 for_each_child_of_node(local_fixups, child) { 206 for_each_child_of_node(local_fixups, child) {
207 207
208 for_each_child_of_node(overlay, overlay_child) 208 for_each_child_of_node(overlay, overlay_child)
209 if (!node_name_cmp(child, overlay_child)) 209 if (!node_name_cmp(child, overlay_child)) {
210 of_node_put(overlay_child);
210 break; 211 break;
212 }
211 213
212 if (!overlay_child) 214 if (!overlay_child) {
215 of_node_put(child);
213 return -EINVAL; 216 return -EINVAL;
217 }
214 218
215 err = adjust_local_phandle_references(child, overlay_child, 219 err = adjust_local_phandle_references(child, overlay_child,
216 phandle_delta); 220 phandle_delta);
217 if (err) 221 if (err) {
222 of_node_put(child);
218 return err; 223 return err;
224 }
219 } 225 }
220 226
221 return 0; 227 return 0;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e44af7f4d37f..464f8f92653f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1170module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1170module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1171 NULL, 0644); 1171 NULL, 0644);
1172 1172
1173/**
1174 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1175 * @pdev: Target device.
1176 */
1177bool pcie_aspm_enabled(struct pci_dev *pdev)
1178{
1179 struct pci_dev *bridge = pci_upstream_bridge(pdev);
1180 bool ret;
1181
1182 if (!bridge)
1183 return false;
1184
1185 mutex_lock(&aspm_lock);
1186 ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
1187 mutex_unlock(&aspm_lock);
1188
1189 return ret;
1190}
1191EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1192
1173#ifdef CONFIG_PCIEASPM_DEBUG 1193#ifdef CONFIG_PCIEASPM_DEBUG
1174static ssize_t link_state_show(struct device *dev, 1194static ssize_t link_state_show(struct device *dev,
1175 struct device_attribute *attr, 1195 struct device_attribute *attr,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index faf43b1d3dbe..a7549ae32542 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -10776,12 +10776,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10776 /* This loop sets up all CPUs that are affinitized with a 10776 /* This loop sets up all CPUs that are affinitized with a
10777 * irq vector assigned to the driver. All affinitized CPUs 10777 * irq vector assigned to the driver. All affinitized CPUs
10778 * will get a link to that vectors IRQ and EQ. 10778 * will get a link to that vectors IRQ and EQ.
10779 *
10780 * NULL affinity mask handling:
10781 * If irq count is greater than one, log an error message.
10782 * If the null mask is received for the first irq, find the
10783 * first present cpu, and assign the eq index to ensure at
10784 * least one EQ is assigned.
10779 */ 10785 */
10780 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10786 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10781 /* Get a CPU mask for all CPUs affinitized to this vector */ 10787 /* Get a CPU mask for all CPUs affinitized to this vector */
10782 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10788 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10783 if (!maskp) 10789 if (!maskp) {
10784 continue; 10790 if (phba->cfg_irq_chann > 1)
10791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10792 "3329 No affinity mask found "
10793 "for vector %d (%d)\n",
10794 idx, phba->cfg_irq_chann);
10795 if (!idx) {
10796 cpu = cpumask_first(cpu_present_mask);
10797 cpup = &phba->sli4_hba.cpu_map[cpu];
10798 cpup->eq = idx;
10799 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10800 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10801 }
10802 break;
10803 }
10785 10804
10786 i = 0; 10805 i = 0;
10787 /* Loop through all CPUs associated with vector idx */ 10806 /* Loop through all CPUs associated with vector idx */
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 3a01cfd70fdc..f518273cfbe3 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
4# 4#
5 5
6menuconfig SOUNDWIRE 6menuconfig SOUNDWIRE
7 bool "SoundWire support" 7 tristate "SoundWire support"
8 help 8 help
9 SoundWire is a 2-Pin interface with data and clock line ratified 9 SoundWire is a 2-Pin interface with data and clock line ratified
10 by the MIPI Alliance. SoundWire is used for transporting data 10 by the MIPI Alliance. SoundWire is used for transporting data
@@ -17,17 +17,12 @@ if SOUNDWIRE
17 17
18comment "SoundWire Devices" 18comment "SoundWire Devices"
19 19
20config SOUNDWIRE_BUS
21 tristate
22 select REGMAP_SOUNDWIRE
23
24config SOUNDWIRE_CADENCE 20config SOUNDWIRE_CADENCE
25 tristate 21 tristate
26 22
27config SOUNDWIRE_INTEL 23config SOUNDWIRE_INTEL
28 tristate "Intel SoundWire Master driver" 24 tristate "Intel SoundWire Master driver"
29 select SOUNDWIRE_CADENCE 25 select SOUNDWIRE_CADENCE
30 select SOUNDWIRE_BUS
31 depends on X86 && ACPI && SND_SOC 26 depends on X86 && ACPI && SND_SOC
32 help 27 help
33 SoundWire Intel Master driver. 28 SoundWire Intel Master driver.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index fd99a831b92a..45b7e5001653 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -5,7 +5,7 @@
5 5
6#Bus Objs 6#Bus Objs
7soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o 7soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
8obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o 8obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
9 9
10#Cadence Objs 10#Cadence Objs
11soundwire-cadence-objs := cadence_master.o 11soundwire-cadence-objs := cadence_master.o
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ff4badc9b3de..60e8bdee5c75 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -81,8 +81,8 @@
81 81
82#define CDNS_MCP_INTSET 0x4C 82#define CDNS_MCP_INTSET 0x4C
83 83
84#define CDNS_SDW_SLAVE_STAT 0x50 84#define CDNS_MCP_SLAVE_STAT 0x50
85#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0) 85#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
86 86
87#define CDNS_MCP_SLAVE_INTSTAT0 0x54 87#define CDNS_MCP_SLAVE_INTSTAT0 0x54
88#define CDNS_MCP_SLAVE_INTSTAT1 0x58 88#define CDNS_MCP_SLAVE_INTSTAT1 0x58
@@ -96,8 +96,8 @@
96#define CDNS_MCP_SLAVE_INTMASK0 0x5C 96#define CDNS_MCP_SLAVE_INTMASK0 0x5C
97#define CDNS_MCP_SLAVE_INTMASK1 0x60 97#define CDNS_MCP_SLAVE_INTMASK1 0x60
98 98
99#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0) 99#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
100#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0) 100#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
101 101
102#define CDNS_MCP_PORT_INTSTAT 0x64 102#define CDNS_MCP_PORT_INTSTAT 0x64
103#define CDNS_MCP_PDI_STAT 0x6C 103#define CDNS_MCP_PDI_STAT 0x6C
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee91300..caf4d4df4bd3 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
342static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, 342static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
343 unsigned int flags) 343 unsigned int flags)
344{ 344{
345 int divider, base, prescale; 345 unsigned int divider, base, prescale;
346 346
347 /* This function needs improvment */ 347 /* This function needs improvement */
348 /* Don't know if divider==0 works. */ 348 /* Don't know if divider==0 works. */
349 349
350 for (prescale = 0; prescale < 16; prescale++) { 350 for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
358 divider = (*nanosec) / base; 358 divider = (*nanosec) / base;
359 break; 359 break;
360 case CMDF_ROUND_UP: 360 case CMDF_ROUND_UP:
361 divider = (*nanosec) / base; 361 divider = DIV_ROUND_UP(*nanosec, base);
362 break; 362 break;
363 } 363 }
364 if (divider < 65536) { 364 if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
368 } 368 }
369 369
370 prescale = 15; 370 prescale = 15;
371 base = timer_base * (1 << prescale); 371 base = timer_base * (prescale + 1);
372 divider = 65535; 372 divider = 65535;
373 *nanosec = divider * base; 373 *nanosec = divider * base;
374 return (prescale << 16) | (divider); 374 return (prescale << 16) | (divider);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index b5abfe89190c..df8812c30640 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -454,9 +454,11 @@ err_clk:
454 imx_disable_unprepare_clks(dev); 454 imx_disable_unprepare_clks(dev);
455disable_hsic_regulator: 455disable_hsic_regulator:
456 if (data->hsic_pad_regulator) 456 if (data->hsic_pad_regulator)
457 ret = regulator_disable(data->hsic_pad_regulator); 457 /* don't overwrite original ret (cf. EPROBE_DEFER) */
458 regulator_disable(data->hsic_pad_regulator);
458 if (pdata.flags & CI_HDRC_PMQOS) 459 if (pdata.flags & CI_HDRC_PMQOS)
459 pm_qos_remove_request(&data->pm_qos_req); 460 pm_qos_remove_request(&data->pm_qos_req);
461 data->ci_pdev = NULL;
460 return ret; 462 return ret;
461} 463}
462 464
@@ -469,14 +471,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
469 pm_runtime_disable(&pdev->dev); 471 pm_runtime_disable(&pdev->dev);
470 pm_runtime_put_noidle(&pdev->dev); 472 pm_runtime_put_noidle(&pdev->dev);
471 } 473 }
472 ci_hdrc_remove_device(data->ci_pdev); 474 if (data->ci_pdev)
475 ci_hdrc_remove_device(data->ci_pdev);
473 if (data->override_phy_control) 476 if (data->override_phy_control)
474 usb_phy_shutdown(data->phy); 477 usb_phy_shutdown(data->phy);
475 imx_disable_unprepare_clks(&pdev->dev); 478 if (data->ci_pdev) {
476 if (data->plat_data->flags & CI_HDRC_PMQOS) 479 imx_disable_unprepare_clks(&pdev->dev);
477 pm_qos_remove_request(&data->pm_qos_req); 480 if (data->plat_data->flags & CI_HDRC_PMQOS)
478 if (data->hsic_pad_regulator) 481 pm_qos_remove_request(&data->pm_qos_req);
479 regulator_disable(data->hsic_pad_regulator); 482 if (data->hsic_pad_regulator)
483 regulator_disable(data->hsic_pad_regulator);
484 }
480 485
481 return 0; 486 return 0;
482} 487}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 183b41753c98..62f4fb9b362f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1301,10 +1301,6 @@ made_compressed_probe:
1301 tty_port_init(&acm->port); 1301 tty_port_init(&acm->port);
1302 acm->port.ops = &acm_port_ops; 1302 acm->port.ops = &acm_port_ops;
1303 1303
1304 minor = acm_alloc_minor(acm);
1305 if (minor < 0)
1306 goto alloc_fail1;
1307
1308 ctrlsize = usb_endpoint_maxp(epctrl); 1304 ctrlsize = usb_endpoint_maxp(epctrl);
1309 readsize = usb_endpoint_maxp(epread) * 1305 readsize = usb_endpoint_maxp(epread) *
1310 (quirks == SINGLE_RX_URB ? 1 : 2); 1306 (quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1312,6 +1308,13 @@ made_compressed_probe:
1312 acm->writesize = usb_endpoint_maxp(epwrite) * 20; 1308 acm->writesize = usb_endpoint_maxp(epwrite) * 20;
1313 acm->control = control_interface; 1309 acm->control = control_interface;
1314 acm->data = data_interface; 1310 acm->data = data_interface;
1311
1312 usb_get_intf(acm->control); /* undone in destruct() */
1313
1314 minor = acm_alloc_minor(acm);
1315 if (minor < 0)
1316 goto alloc_fail1;
1317
1315 acm->minor = minor; 1318 acm->minor = minor;
1316 acm->dev = usb_dev; 1319 acm->dev = usb_dev;
1317 if (h.usb_cdc_acm_descriptor) 1320 if (h.usb_cdc_acm_descriptor)
@@ -1458,7 +1461,6 @@ skip_countries:
1458 usb_driver_claim_interface(&acm_driver, data_interface, acm); 1461 usb_driver_claim_interface(&acm_driver, data_interface, acm);
1459 usb_set_intfdata(data_interface, acm); 1462 usb_set_intfdata(data_interface, acm);
1460 1463
1461 usb_get_intf(control_interface);
1462 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, 1464 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1463 &control_interface->dev); 1465 &control_interface->dev);
1464 if (IS_ERR(tty_dev)) { 1466 if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 1359b78a624e..6cf22c27f2d2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -66,9 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
66 char name[16]; 66 char name[16];
67 int i, size; 67 int i, size;
68 68
69 if (!IS_ENABLED(CONFIG_HAS_DMA) || 69 if (hcd->localmem_pool || !hcd_uses_dma(hcd))
70 (!is_device_dma_capable(hcd->self.sysdev) &&
71 !hcd->localmem_pool))
72 return 0; 70 return 0;
73 71
74 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 72 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
@@ -129,8 +127,7 @@ void *hcd_buffer_alloc(
129 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma); 127 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
130 128
131 /* some USB hosts just use PIO */ 129 /* some USB hosts just use PIO */
132 if (!IS_ENABLED(CONFIG_HAS_DMA) || 130 if (!hcd_uses_dma(hcd)) {
133 !is_device_dma_capable(bus->sysdev)) {
134 *dma = ~(dma_addr_t) 0; 131 *dma = ~(dma_addr_t) 0;
135 return kmalloc(size, mem_flags); 132 return kmalloc(size, mem_flags);
136 } 133 }
@@ -160,8 +157,7 @@ void hcd_buffer_free(
160 return; 157 return;
161 } 158 }
162 159
163 if (!IS_ENABLED(CONFIG_HAS_DMA) || 160 if (!hcd_uses_dma(hcd)) {
164 !is_device_dma_capable(bus->sysdev)) {
165 kfree(addr); 161 kfree(addr);
166 return; 162 return;
167 } 163 }
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f73b672..558890ada0e5 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
193 intf->minor = minor; 193 intf->minor = minor;
194 break; 194 break;
195 } 195 }
196 up_write(&minor_rwsem); 196 if (intf->minor < 0) {
197 if (intf->minor < 0) 197 up_write(&minor_rwsem);
198 return -EXFULL; 198 return -EXFULL;
199 }
199 200
200 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
201 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
203 MKDEV(USB_MAJOR, minor), class_driver, 204 MKDEV(USB_MAJOR, minor), class_driver,
204 "%s", kbasename(name)); 205 "%s", kbasename(name));
205 if (IS_ERR(intf->usb_dev)) { 206 if (IS_ERR(intf->usb_dev)) {
206 down_write(&minor_rwsem);
207 usb_minors[minor] = NULL; 207 usb_minors[minor] = NULL;
208 intf->minor = -1; 208 intf->minor = -1;
209 up_write(&minor_rwsem);
210 retval = PTR_ERR(intf->usb_dev); 209 retval = PTR_ERR(intf->usb_dev);
211 } 210 }
211 up_write(&minor_rwsem);
212 return retval; 212 return retval;
213} 213}
214EXPORT_SYMBOL_GPL(usb_register_dev); 214EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
234 return; 234 return;
235 235
236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); 236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
237 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
237 238
238 down_write(&minor_rwsem); 239 down_write(&minor_rwsem);
239 usb_minors[intf->minor] = NULL; 240 usb_minors[intf->minor] = NULL;
240 up_write(&minor_rwsem); 241 up_write(&minor_rwsem);
241 242
242 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
243 intf->usb_dev = NULL; 243 intf->usb_dev = NULL;
244 intf->minor = -1; 244 intf->minor = -1;
245 destroy_usb_class(); 245 destroy_usb_class();
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2ccbc2f83570..8592c0344fe8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1412,7 +1412,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1412 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1412 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1413 if (hcd->self.uses_pio_for_control) 1413 if (hcd->self.uses_pio_for_control)
1414 return ret; 1414 return ret;
1415 if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1415 if (hcd_uses_dma(hcd)) {
1416 if (is_vmalloc_addr(urb->setup_packet)) { 1416 if (is_vmalloc_addr(urb->setup_packet)) {
1417 WARN_ONCE(1, "setup packet is not dma capable\n"); 1417 WARN_ONCE(1, "setup packet is not dma capable\n");
1418 return -EAGAIN; 1418 return -EAGAIN;
@@ -1446,7 +1446,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1446 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1446 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1447 if (urb->transfer_buffer_length != 0 1447 if (urb->transfer_buffer_length != 0
1448 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1448 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1449 if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1449 if (hcd_uses_dma(hcd)) {
1450 if (urb->num_sgs) { 1450 if (urb->num_sgs) {
1451 int n; 1451 int n;
1452 1452
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e844bb7b5676..5adf489428aa 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
2218 (struct usb_cdc_dmm_desc *)buffer; 2218 (struct usb_cdc_dmm_desc *)buffer;
2219 break; 2219 break;
2220 case USB_CDC_MDLM_TYPE: 2220 case USB_CDC_MDLM_TYPE:
2221 if (elength < sizeof(struct usb_cdc_mdlm_desc *)) 2221 if (elength < sizeof(struct usb_cdc_mdlm_desc))
2222 goto next_desc; 2222 goto next_desc;
2223 if (desc) 2223 if (desc)
2224 return -EINVAL; 2224 return -EINVAL;
2225 desc = (struct usb_cdc_mdlm_desc *)buffer; 2225 desc = (struct usb_cdc_mdlm_desc *)buffer;
2226 break; 2226 break;
2227 case USB_CDC_MDLM_DETAIL_TYPE: 2227 case USB_CDC_MDLM_DETAIL_TYPE:
2228 if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) 2228 if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
2229 goto next_desc; 2229 goto next_desc;
2230 if (detail) 2230 if (detail)
2231 return -EINVAL; 2231 return -EINVAL;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index ee144ff8af5b..111787a137ee 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4608,7 +4608,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
4608 4608
4609 buf = urb->transfer_buffer; 4609 buf = urb->transfer_buffer;
4610 4610
4611 if (hcd->self.uses_dma) { 4611 if (hcd_uses_dma(hcd)) {
4612 if (!buf && (urb->transfer_dma & 3)) { 4612 if (!buf && (urb->transfer_dma & 3)) {
4613 dev_err(hsotg->dev, 4613 dev_err(hsotg->dev,
4614 "%s: unaligned transfer with no transfer_buffer", 4614 "%s: unaligned transfer with no transfer_buffer",
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 9118b42c70b6..76883ff4f5bb 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget)
1976 * disconnect callbacks? 1976 * disconnect callbacks?
1977 */ 1977 */
1978 spin_lock_irqsave(&cdev->lock, flags); 1978 spin_lock_irqsave(&cdev->lock, flags);
1979 cdev->suspended = 0;
1979 if (cdev->config) 1980 if (cdev->config)
1980 reset_config(cdev); 1981 reset_config(cdev);
1981 if (cdev->driver->disconnect) 1982 if (cdev->driver->disconnect)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 29cc5693e05c..7c96c4665178 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -261,7 +261,7 @@ struct fsg_common;
261struct fsg_common { 261struct fsg_common {
262 struct usb_gadget *gadget; 262 struct usb_gadget *gadget;
263 struct usb_composite_dev *cdev; 263 struct usb_composite_dev *cdev;
264 struct fsg_dev *fsg, *new_fsg; 264 struct fsg_dev *fsg;
265 wait_queue_head_t io_wait; 265 wait_queue_head_t io_wait;
266 wait_queue_head_t fsg_wait; 266 wait_queue_head_t fsg_wait;
267 267
@@ -290,6 +290,7 @@ struct fsg_common {
290 unsigned int bulk_out_maxpacket; 290 unsigned int bulk_out_maxpacket;
291 enum fsg_state state; /* For exception handling */ 291 enum fsg_state state; /* For exception handling */
292 unsigned int exception_req_tag; 292 unsigned int exception_req_tag;
293 void *exception_arg;
293 294
294 enum data_direction data_dir; 295 enum data_direction data_dir;
295 u32 data_size; 296 u32 data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
391 392
392/* These routines may be called in process context or in_irq */ 393/* These routines may be called in process context or in_irq */
393 394
394static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 395static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
396 void *arg)
395{ 397{
396 unsigned long flags; 398 unsigned long flags;
397 399
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
404 if (common->state <= new_state) { 406 if (common->state <= new_state) {
405 common->exception_req_tag = common->ep0_req_tag; 407 common->exception_req_tag = common->ep0_req_tag;
406 common->state = new_state; 408 common->state = new_state;
409 common->exception_arg = arg;
407 if (common->thread_task) 410 if (common->thread_task)
408 send_sig_info(SIGUSR1, SEND_SIG_PRIV, 411 send_sig_info(SIGUSR1, SEND_SIG_PRIV,
409 common->thread_task); 412 common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
411 spin_unlock_irqrestore(&common->lock, flags); 414 spin_unlock_irqrestore(&common->lock, flags);
412} 415}
413 416
417static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
418{
419 __raise_exception(common, new_state, NULL);
420}
414 421
415/*-------------------------------------------------------------------------*/ 422/*-------------------------------------------------------------------------*/
416 423
@@ -2285,16 +2292,16 @@ reset:
2285static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2292static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2286{ 2293{
2287 struct fsg_dev *fsg = fsg_from_func(f); 2294 struct fsg_dev *fsg = fsg_from_func(f);
2288 fsg->common->new_fsg = fsg; 2295
2289 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2296 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
2290 return USB_GADGET_DELAYED_STATUS; 2297 return USB_GADGET_DELAYED_STATUS;
2291} 2298}
2292 2299
2293static void fsg_disable(struct usb_function *f) 2300static void fsg_disable(struct usb_function *f)
2294{ 2301{
2295 struct fsg_dev *fsg = fsg_from_func(f); 2302 struct fsg_dev *fsg = fsg_from_func(f);
2296 fsg->common->new_fsg = NULL; 2303
2297 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2304 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2298} 2305}
2299 2306
2300 2307
@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
2307 enum fsg_state old_state; 2314 enum fsg_state old_state;
2308 struct fsg_lun *curlun; 2315 struct fsg_lun *curlun;
2309 unsigned int exception_req_tag; 2316 unsigned int exception_req_tag;
2317 struct fsg_dev *new_fsg;
2310 2318
2311 /* 2319 /*
2312 * Clear the existing signals. Anything but SIGUSR1 is converted 2320 * Clear the existing signals. Anything but SIGUSR1 is converted
@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
2360 common->next_buffhd_to_fill = &common->buffhds[0]; 2368 common->next_buffhd_to_fill = &common->buffhds[0];
2361 common->next_buffhd_to_drain = &common->buffhds[0]; 2369 common->next_buffhd_to_drain = &common->buffhds[0];
2362 exception_req_tag = common->exception_req_tag; 2370 exception_req_tag = common->exception_req_tag;
2371 new_fsg = common->exception_arg;
2363 old_state = common->state; 2372 old_state = common->state;
2364 common->state = FSG_STATE_NORMAL; 2373 common->state = FSG_STATE_NORMAL;
2365 2374
@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
2413 break; 2422 break;
2414 2423
2415 case FSG_STATE_CONFIG_CHANGE: 2424 case FSG_STATE_CONFIG_CHANGE:
2416 do_set_interface(common, common->new_fsg); 2425 do_set_interface(common, new_fsg);
2417 if (common->new_fsg) 2426 if (new_fsg)
2418 usb_composite_setup_continue(common->cdev); 2427 usb_composite_setup_continue(common->cdev);
2419 break; 2428 break;
2420 2429
@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2989 2998
2990 DBG(fsg, "unbind\n"); 2999 DBG(fsg, "unbind\n");
2991 if (fsg->common->fsg == fsg) { 3000 if (fsg->common->fsg == fsg) {
2992 fsg->common->new_fsg = NULL; 3001 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2993 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2994 /* FIXME: make interruptible or killable somehow? */ 3002 /* FIXME: make interruptible or killable somehow? */
2995 wait_event(common->fsg_wait, common->fsg != fsg); 3003 wait_event(common->fsg_wait, common->fsg != fsg);
2996 } 3004 }
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 027a25694a68..e098f16c01cb 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/sizes.h> 20#include <linux/sizes.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/string.h>
22#include <linux/sys_soc.h> 23#include <linux/sys_soc.h>
23#include <linux/uaccess.h> 24#include <linux/uaccess.h>
24#include <linux/usb/ch9.h> 25#include <linux/usb/ch9.h>
@@ -2450,9 +2451,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
2450 if (usb3->forced_b_device) 2451 if (usb3->forced_b_device)
2451 return -EBUSY; 2452 return -EBUSY;
2452 2453
2453 if (!strncmp(buf, "host", strlen("host"))) 2454 if (sysfs_streq(buf, "host"))
2454 new_mode_is_host = true; 2455 new_mode_is_host = true;
2455 else if (!strncmp(buf, "peripheral", strlen("peripheral"))) 2456 else if (sysfs_streq(buf, "peripheral"))
2456 new_mode_is_host = false; 2457 new_mode_is_host = false;
2457 else 2458 else
2458 return -EINVAL; 2459 return -EINVAL;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 77cc36efae95..0dbfa5c10703 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1629,6 +1629,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1629 /* see what we found out */ 1629 /* see what we found out */
1630 temp = check_reset_complete(fotg210, wIndex, status_reg, 1630 temp = check_reset_complete(fotg210, wIndex, status_reg,
1631 fotg210_readl(fotg210, status_reg)); 1631 fotg210_readl(fotg210, status_reg));
1632
1633 /* restart schedule */
1634 fotg210->command |= CMD_RUN;
1635 fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1632 } 1636 }
1633 1637
1634 if (!(temp & (PORT_RESUME|PORT_RESET))) { 1638 if (!(temp & (PORT_RESUME|PORT_RESET))) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c1582fbd1150..38e920ac7f82 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, 968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, 969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
970 970
971 /* Motorola devices */
972 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
973 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
974 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
975 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
971 976
972 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 977 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
973 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 978 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
1549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ 1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1550 .driver_info = RSVD(2) }, 1555 .driver_info = RSVD(2) },
1551 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ 1556 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1557 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
1552 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1558 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
1952 .driver_info = RSVD(4) }, 1958 .driver_info = RSVD(4) },
1953 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1959 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1954 .driver_info = RSVD(4) }, 1960 .driver_info = RSVD(4) },
1961 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
1962 .driver_info = RSVD(4) },
1955 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1963 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1956 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1964 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1957 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1965 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1958 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ 1966 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1959 .driver_info = RSVD(4) }, 1967 .driver_info = RSVD(4) },
1968 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
1969 .driver_info = RSVD(4) },
1960 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1970 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1961 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1971 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1962 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1972 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4f1b6f466ff5..b86195e4dc6c 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
505 struct afs_call *call = container_of(work, struct afs_call, work); 505 struct afs_call *call = container_of(work, struct afs_call, work);
506 struct afs_uuid *r = call->request; 506 struct afs_uuid *r = call->request;
507 507
508 struct {
509 __be32 match;
510 } reply;
511
512 _enter(""); 508 _enter("");
513 509
514 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) 510 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
515 reply.match = htonl(0); 511 afs_send_empty_reply(call);
516 else 512 else
517 reply.match = htonl(1); 513 rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
514 1, 1, "K-1");
518 515
519 afs_send_simple_reply(call, &reply, sizeof(reply));
520 afs_put_call(call); 516 afs_put_call(call);
521 _leave(""); 517 _leave("");
522} 518}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e640d67274be..81207dc3c997 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
440 * iterate through the data blob that lists the contents of an AFS directory 440 * iterate through the data blob that lists the contents of an AFS directory
441 */ 441 */
442static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, 442static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
443 struct key *key) 443 struct key *key, afs_dataversion_t *_dir_version)
444{ 444{
445 struct afs_vnode *dvnode = AFS_FS_I(dir); 445 struct afs_vnode *dvnode = AFS_FS_I(dir);
446 struct afs_xdr_dir_page *dbuf; 446 struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
460 req = afs_read_dir(dvnode, key); 460 req = afs_read_dir(dvnode, key);
461 if (IS_ERR(req)) 461 if (IS_ERR(req))
462 return PTR_ERR(req); 462 return PTR_ERR(req);
463 *_dir_version = req->data_version;
463 464
464 /* round the file position up to the next entry boundary */ 465 /* round the file position up to the next entry boundary */
465 ctx->pos += sizeof(union afs_xdr_dirent) - 1; 466 ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
514 */ 515 */
515static int afs_readdir(struct file *file, struct dir_context *ctx) 516static int afs_readdir(struct file *file, struct dir_context *ctx)
516{ 517{
517 return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); 518 afs_dataversion_t dir_version;
519
520 return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
521 &dir_version);
518} 522}
519 523
520/* 524/*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
555 * - just returns the FID the dentry name maps to if found 559 * - just returns the FID the dentry name maps to if found
556 */ 560 */
557static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, 561static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
558 struct afs_fid *fid, struct key *key) 562 struct afs_fid *fid, struct key *key,
563 afs_dataversion_t *_dir_version)
559{ 564{
560 struct afs_super_info *as = dir->i_sb->s_fs_info; 565 struct afs_super_info *as = dir->i_sb->s_fs_info;
561 struct afs_lookup_one_cookie cookie = { 566 struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
568 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 573 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
569 574
570 /* search the directory */ 575 /* search the directory */
571 ret = afs_dir_iterate(dir, &cookie.ctx, key); 576 ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
572 if (ret < 0) { 577 if (ret < 0) {
573 _leave(" = %d [iter]", ret); 578 _leave(" = %d [iter]", ret);
574 return ret; 579 return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
642 struct afs_server *server; 647 struct afs_server *server;
643 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 648 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
644 struct inode *inode = NULL, *ti; 649 struct inode *inode = NULL, *ti;
650 afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
645 int ret, i; 651 int ret, i;
646 652
647 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 653 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
669 cookie->fids[i].vid = as->volume->vid; 675 cookie->fids[i].vid = as->volume->vid;
670 676
671 /* search the directory */ 677 /* search the directory */
672 ret = afs_dir_iterate(dir, &cookie->ctx, key); 678 ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
673 if (ret < 0) { 679 if (ret < 0) {
674 inode = ERR_PTR(ret); 680 inode = ERR_PTR(ret);
675 goto out; 681 goto out;
676 } 682 }
677 683
684 dentry->d_fsdata = (void *)(unsigned long)data_version;
685
678 inode = ERR_PTR(-ENOENT); 686 inode = ERR_PTR(-ENOENT);
679 if (!cookie->found) 687 if (!cookie->found)
680 goto out; 688 goto out;
@@ -968,7 +976,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
968 struct dentry *parent; 976 struct dentry *parent;
969 struct inode *inode; 977 struct inode *inode;
970 struct key *key; 978 struct key *key;
971 long dir_version, de_version; 979 afs_dataversion_t dir_version;
980 long de_version;
972 int ret; 981 int ret;
973 982
974 if (flags & LOOKUP_RCU) 983 if (flags & LOOKUP_RCU)
@@ -1014,20 +1023,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
1014 * on a 32-bit system, we only have 32 bits in the dentry to store the 1023 * on a 32-bit system, we only have 32 bits in the dentry to store the
1015 * version. 1024 * version.
1016 */ 1025 */
1017 dir_version = (long)dir->status.data_version; 1026 dir_version = dir->status.data_version;
1018 de_version = (long)dentry->d_fsdata; 1027 de_version = (long)dentry->d_fsdata;
1019 if (de_version == dir_version) 1028 if (de_version == (long)dir_version)
1020 goto out_valid; 1029 goto out_valid_noupdate;
1021 1030
1022 dir_version = (long)dir->invalid_before; 1031 dir_version = dir->invalid_before;
1023 if (de_version - dir_version >= 0) 1032 if (de_version - (long)dir_version >= 0)
1024 goto out_valid; 1033 goto out_valid;
1025 1034
1026 _debug("dir modified"); 1035 _debug("dir modified");
1027 afs_stat_v(dir, n_reval); 1036 afs_stat_v(dir, n_reval);
1028 1037
1029 /* search the directory for this vnode */ 1038 /* search the directory for this vnode */
1030 ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); 1039 ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
1031 switch (ret) { 1040 switch (ret) {
1032 case 0: 1041 case 0:
1033 /* the filename maps to something */ 1042 /* the filename maps to something */
@@ -1080,7 +1089,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
1080 } 1089 }
1081 1090
1082out_valid: 1091out_valid:
1083 dentry->d_fsdata = (void *)dir_version; 1092 dentry->d_fsdata = (void *)(unsigned long)dir_version;
1093out_valid_noupdate:
1084 dput(parent); 1094 dput(parent);
1085 key_put(key); 1095 key_put(key);
1086 _leave(" = 1 [valid]"); 1096 _leave(" = 1 [valid]");
@@ -1186,6 +1196,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
1186} 1196}
1187 1197
1188/* 1198/*
1199 * Note that a dentry got changed. We need to set d_fsdata to the data version
1200 * number derived from the result of the operation. It doesn't matter if
1201 * d_fsdata goes backwards as we'll just revalidate.
1202 */
1203static void afs_update_dentry_version(struct afs_fs_cursor *fc,
1204 struct dentry *dentry,
1205 struct afs_status_cb *scb)
1206{
1207 if (fc->ac.error == 0)
1208 dentry->d_fsdata =
1209 (void *)(unsigned long)scb->status.data_version;
1210}
1211
1212/*
1189 * create a directory on an AFS filesystem 1213 * create a directory on an AFS filesystem
1190 */ 1214 */
1191static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1215static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -1227,6 +1251,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1227 afs_check_for_remote_deletion(&fc, dvnode); 1251 afs_check_for_remote_deletion(&fc, dvnode);
1228 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1252 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1229 &data_version, &scb[0]); 1253 &data_version, &scb[0]);
1254 afs_update_dentry_version(&fc, dentry, &scb[0]);
1230 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1255 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1231 ret = afs_end_vnode_operation(&fc); 1256 ret = afs_end_vnode_operation(&fc);
1232 if (ret < 0) 1257 if (ret < 0)
@@ -1319,6 +1344,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
1319 1344
1320 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1345 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1321 &data_version, scb); 1346 &data_version, scb);
1347 afs_update_dentry_version(&fc, dentry, scb);
1322 ret = afs_end_vnode_operation(&fc); 1348 ret = afs_end_vnode_operation(&fc);
1323 if (ret == 0) { 1349 if (ret == 0) {
1324 afs_dir_remove_subdir(dentry); 1350 afs_dir_remove_subdir(dentry);
@@ -1458,6 +1484,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
1458 &data_version, &scb[0]); 1484 &data_version, &scb[0]);
1459 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1485 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
1460 &data_version_2, &scb[1]); 1486 &data_version_2, &scb[1]);
1487 afs_update_dentry_version(&fc, dentry, &scb[0]);
1461 ret = afs_end_vnode_operation(&fc); 1488 ret = afs_end_vnode_operation(&fc);
1462 if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) 1489 if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
1463 ret = afs_dir_remove_link(dvnode, dentry, key); 1490 ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1553,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1526 afs_check_for_remote_deletion(&fc, dvnode); 1553 afs_check_for_remote_deletion(&fc, dvnode);
1527 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1554 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1528 &data_version, &scb[0]); 1555 &data_version, &scb[0]);
1556 afs_update_dentry_version(&fc, dentry, &scb[0]);
1529 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1557 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1530 ret = afs_end_vnode_operation(&fc); 1558 ret = afs_end_vnode_operation(&fc);
1531 if (ret < 0) 1559 if (ret < 0)
@@ -1607,6 +1635,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
1607 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1635 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
1608 NULL, &scb[1]); 1636 NULL, &scb[1]);
1609 ihold(&vnode->vfs_inode); 1637 ihold(&vnode->vfs_inode);
1638 afs_update_dentry_version(&fc, dentry, &scb[0]);
1610 d_instantiate(dentry, &vnode->vfs_inode); 1639 d_instantiate(dentry, &vnode->vfs_inode);
1611 1640
1612 mutex_unlock(&vnode->io_lock); 1641 mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1715,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
1686 afs_check_for_remote_deletion(&fc, dvnode); 1715 afs_check_for_remote_deletion(&fc, dvnode);
1687 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1716 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1688 &data_version, &scb[0]); 1717 &data_version, &scb[0]);
1718 afs_update_dentry_version(&fc, dentry, &scb[0]);
1689 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1719 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1690 ret = afs_end_vnode_operation(&fc); 1720 ret = afs_end_vnode_operation(&fc);
1691 if (ret < 0) 1721 if (ret < 0)
@@ -1791,6 +1821,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1791 } 1821 }
1792 } 1822 }
1793 1823
1824 /* This bit is potentially nasty as there's a potential race with
1825 * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
1826 * to reflect it's new parent's new data_version after the op, but
1827 * d_revalidate may see old_dentry between the op having taken place
1828 * and the version being updated.
1829 *
1830 * So drop the old_dentry for now to make other threads go through
1831 * lookup instead - which we hold a lock against.
1832 */
1833 d_drop(old_dentry);
1834
1794 ret = -ERESTARTSYS; 1835 ret = -ERESTARTSYS;
1795 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { 1836 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
1796 afs_dataversion_t orig_data_version; 1837 afs_dataversion_t orig_data_version;
@@ -1802,9 +1843,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1802 if (orig_dvnode != new_dvnode) { 1843 if (orig_dvnode != new_dvnode) {
1803 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { 1844 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
1804 afs_end_vnode_operation(&fc); 1845 afs_end_vnode_operation(&fc);
1805 goto error_rehash; 1846 goto error_rehash_old;
1806 } 1847 }
1807 new_data_version = new_dvnode->status.data_version; 1848 new_data_version = new_dvnode->status.data_version + 1;
1808 } else { 1849 } else {
1809 new_data_version = orig_data_version; 1850 new_data_version = orig_data_version;
1810 new_scb = &scb[0]; 1851 new_scb = &scb[0];
@@ -1827,7 +1868,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1827 } 1868 }
1828 ret = afs_end_vnode_operation(&fc); 1869 ret = afs_end_vnode_operation(&fc);
1829 if (ret < 0) 1870 if (ret < 0)
1830 goto error_rehash; 1871 goto error_rehash_old;
1831 } 1872 }
1832 1873
1833 if (ret == 0) { 1874 if (ret == 0) {
@@ -1853,10 +1894,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1853 drop_nlink(new_inode); 1894 drop_nlink(new_inode);
1854 spin_unlock(&new_inode->i_lock); 1895 spin_unlock(&new_inode->i_lock);
1855 } 1896 }
1897
1898 /* Now we can update d_fsdata on the dentries to reflect their
1899 * new parent's data_version.
1900 *
1901 * Note that if we ever implement RENAME_EXCHANGE, we'll have
1902 * to update both dentries with opposing dir versions.
1903 */
1904 if (new_dvnode != orig_dvnode) {
1905 afs_update_dentry_version(&fc, old_dentry, &scb[1]);
1906 afs_update_dentry_version(&fc, new_dentry, &scb[1]);
1907 } else {
1908 afs_update_dentry_version(&fc, old_dentry, &scb[0]);
1909 afs_update_dentry_version(&fc, new_dentry, &scb[0]);
1910 }
1856 d_move(old_dentry, new_dentry); 1911 d_move(old_dentry, new_dentry);
1857 goto error_tmp; 1912 goto error_tmp;
1858 } 1913 }
1859 1914
1915error_rehash_old:
1916 d_rehash(new_dentry);
1860error_rehash: 1917error_rehash:
1861 if (rehash) 1918 if (rehash)
1862 d_rehash(rehash); 1919 d_rehash(rehash);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 56b69576274d..dd3c55c9101c 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
191 int i; 191 int i;
192 192
193 if (refcount_dec_and_test(&req->usage)) { 193 if (refcount_dec_and_test(&req->usage)) {
194 for (i = 0; i < req->nr_pages; i++) 194 if (req->pages) {
195 if (req->pages[i]) 195 for (i = 0; i < req->nr_pages; i++)
196 put_page(req->pages[i]); 196 if (req->pages[i])
197 if (req->pages != req->array) 197 put_page(req->pages[i]);
198 kfree(req->pages); 198 if (req->pages != req->array)
199 kfree(req->pages);
200 }
199 kfree(req); 201 kfree(req);
200 } 202 }
201} 203}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index d7e0fd3c00df..cfb0ac4bd039 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
56 struct afs_uuid__xdr *xdr; 56 struct afs_uuid__xdr *xdr;
57 struct afs_uuid *uuid; 57 struct afs_uuid *uuid;
58 int j; 58 int j;
59 int n = entry->nr_servers;
59 60
60 tmp = ntohl(uvldb->serverFlags[i]); 61 tmp = ntohl(uvldb->serverFlags[i]);
61 if (tmp & AFS_VLSF_DONTUSE || 62 if (tmp & AFS_VLSF_DONTUSE ||
62 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 63 (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
63 continue; 64 continue;
64 if (tmp & AFS_VLSF_RWVOL) { 65 if (tmp & AFS_VLSF_RWVOL) {
65 entry->fs_mask[i] |= AFS_VOL_VTM_RW; 66 entry->fs_mask[n] |= AFS_VOL_VTM_RW;
66 if (vlflags & AFS_VLF_BACKEXISTS) 67 if (vlflags & AFS_VLF_BACKEXISTS)
67 entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 68 entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
68 } 69 }
69 if (tmp & AFS_VLSF_ROVOL) 70 if (tmp & AFS_VLSF_ROVOL)
70 entry->fs_mask[i] |= AFS_VOL_VTM_RO; 71 entry->fs_mask[n] |= AFS_VOL_VTM_RO;
71 if (!entry->fs_mask[i]) 72 if (!entry->fs_mask[n])
72 continue; 73 continue;
73 74
74 xdr = &uvldb->serverNumber[i]; 75 xdr = &uvldb->serverNumber[i];
75 uuid = (struct afs_uuid *)&entry->fs_server[i]; 76 uuid = (struct afs_uuid *)&entry->fs_server[n];
76 uuid->time_low = xdr->time_low; 77 uuid->time_low = xdr->time_low;
77 uuid->time_mid = htons(ntohl(xdr->time_mid)); 78 uuid->time_mid = htons(ntohl(xdr->time_mid));
78 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); 79 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
diff --git a/fs/block_dev.c b/fs/block_dev.c
index eb657ab94060..677cb364d33f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
345 struct bio *bio; 345 struct bio *bio;
346 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; 346 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
347 bool is_read = (iov_iter_rw(iter) == READ), is_sync; 347 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
348 bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
349 loff_t pos = iocb->ki_pos; 348 loff_t pos = iocb->ki_pos;
350 blk_qc_t qc = BLK_QC_T_NONE; 349 blk_qc_t qc = BLK_QC_T_NONE;
351 gfp_t gfp; 350 int ret = 0;
352 int ret;
353 351
354 if ((pos | iov_iter_alignment(iter)) & 352 if ((pos | iov_iter_alignment(iter)) &
355 (bdev_logical_block_size(bdev) - 1)) 353 (bdev_logical_block_size(bdev) - 1))
356 return -EINVAL; 354 return -EINVAL;
357 355
358 if (nowait) 356 bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
359 gfp = GFP_NOWAIT;
360 else
361 gfp = GFP_KERNEL;
362
363 bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
364 if (!bio)
365 return -EAGAIN;
366 357
367 dio = container_of(bio, struct blkdev_dio, bio); 358 dio = container_of(bio, struct blkdev_dio, bio);
368 dio->is_sync = is_sync = is_sync_kiocb(iocb); 359 dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
384 if (!is_poll) 375 if (!is_poll)
385 blk_start_plug(&plug); 376 blk_start_plug(&plug);
386 377
387 ret = 0;
388 for (;;) { 378 for (;;) {
389 bio_set_dev(bio, bdev); 379 bio_set_dev(bio, bdev);
390 bio->bi_iter.bi_sector = pos >> 9; 380 bio->bi_iter.bi_sector = pos >> 9;
@@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
409 task_io_account_write(bio->bi_iter.bi_size); 399 task_io_account_write(bio->bi_iter.bi_size);
410 } 400 }
411 401
412 /* 402 dio->size += bio->bi_iter.bi_size;
413 * Tell underlying layer to not block for resource shortage.
414 * And if we would have blocked, return error inline instead
415 * of through the bio->bi_end_io() callback.
416 */
417 if (nowait)
418 bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
419
420 pos += bio->bi_iter.bi_size; 403 pos += bio->bi_iter.bi_size;
421 404
422 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); 405 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
@@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
428 polled = true; 411 polled = true;
429 } 412 }
430 413
431 dio->size += bio->bi_iter.bi_size;
432 qc = submit_bio(bio); 414 qc = submit_bio(bio);
433 if (qc == BLK_QC_T_EAGAIN) {
434 dio->size -= bio->bi_iter.bi_size;
435 ret = -EAGAIN;
436 goto error;
437 }
438 415
439 if (polled) 416 if (polled)
440 WRITE_ONCE(iocb->ki_cookie, qc); 417 WRITE_ONCE(iocb->ki_cookie, qc);
@@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
455 atomic_inc(&dio->ref); 432 atomic_inc(&dio->ref);
456 } 433 }
457 434
458 dio->size += bio->bi_iter.bi_size; 435 submit_bio(bio);
459 qc = submit_bio(bio); 436 bio = bio_alloc(GFP_KERNEL, nr_pages);
460 if (qc == BLK_QC_T_EAGAIN) {
461 dio->size -= bio->bi_iter.bi_size;
462 ret = -EAGAIN;
463 goto error;
464 }
465
466 bio = bio_alloc(gfp, nr_pages);
467 if (!bio) {
468 ret = -EAGAIN;
469 goto error;
470 }
471 } 437 }
472 438
473 if (!is_poll) 439 if (!is_poll)
@@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
487 } 453 }
488 __set_current_state(TASK_RUNNING); 454 __set_current_state(TASK_RUNNING);
489 455
490out:
491 if (!ret) 456 if (!ret)
492 ret = blk_status_to_errno(dio->bio.bi_status); 457 ret = blk_status_to_errno(dio->bio.bi_status);
493 if (likely(!ret)) 458 if (likely(!ret))
@@ -495,10 +460,6 @@ out:
495 460
496 bio_put(&dio->bio); 461 bio_put(&dio->bio);
497 return ret; 462 return ret;
498error:
499 if (!is_poll)
500 blk_finish_plug(&plug);
501 goto out;
502} 463}
503 464
504static ssize_t 465static ssize_t
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 299e11e6c554..94660063a162 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -401,7 +401,6 @@ struct btrfs_dev_replace {
401struct raid_kobject { 401struct raid_kobject {
402 u64 flags; 402 u64 flags;
403 struct kobject kobj; 403 struct kobject kobj;
404 struct list_head list;
405}; 404};
406 405
407/* 406/*
@@ -915,8 +914,6 @@ struct btrfs_fs_info {
915 u32 thread_pool_size; 914 u32 thread_pool_size;
916 915
917 struct kobject *space_info_kobj; 916 struct kobject *space_info_kobj;
918 struct list_head pending_raid_kobjs;
919 spinlock_t pending_raid_kobjs_lock; /* uncontended */
920 917
921 u64 total_pinned; 918 u64 total_pinned;
922 919
@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
2698int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2695int btrfs_make_block_group(struct btrfs_trans_handle *trans,
2699 u64 bytes_used, u64 type, u64 chunk_offset, 2696 u64 bytes_used, u64 type, u64 chunk_offset,
2700 u64 size); 2697 u64 size);
2701void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
2702struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2698struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
2703 struct btrfs_fs_info *fs_info, 2699 struct btrfs_fs_info *fs_info,
2704 const u64 chunk_offset); 2700 const u64 chunk_offset);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5f7ee70b3d1a..97beb351a10c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
2683 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2683 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2684 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2684 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2685 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2685 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2686 INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2687 spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2688 spin_lock_init(&fs_info->delalloc_root_lock); 2686 spin_lock_init(&fs_info->delalloc_root_lock);
2689 spin_lock_init(&fs_info->trans_lock); 2687 spin_lock_init(&fs_info->trans_lock);
2690 spin_lock_init(&fs_info->fs_roots_radix_lock); 2688 spin_lock_init(&fs_info->fs_roots_radix_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..8b7eb22d508a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/sched/mm.h>
7#include <linux/sched/signal.h> 8#include <linux/sched/signal.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/writeback.h> 10#include <linux/writeback.h>
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7888 return 0; 7889 return 0;
7889} 7890}
7890 7891
7891/* link_block_group will queue up kobjects to add when we're reclaim-safe */
7892void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
7893{
7894 struct btrfs_space_info *space_info;
7895 struct raid_kobject *rkobj;
7896 LIST_HEAD(list);
7897 int ret = 0;
7898
7899 spin_lock(&fs_info->pending_raid_kobjs_lock);
7900 list_splice_init(&fs_info->pending_raid_kobjs, &list);
7901 spin_unlock(&fs_info->pending_raid_kobjs_lock);
7902
7903 list_for_each_entry(rkobj, &list, list) {
7904 space_info = btrfs_find_space_info(fs_info, rkobj->flags);
7905
7906 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
7907 "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
7908 if (ret) {
7909 kobject_put(&rkobj->kobj);
7910 break;
7911 }
7912 }
7913 if (ret)
7914 btrfs_warn(fs_info,
7915 "failed to add kobject for block cache, ignoring");
7916}
7917
7918static void link_block_group(struct btrfs_block_group_cache *cache) 7892static void link_block_group(struct btrfs_block_group_cache *cache)
7919{ 7893{
7920 struct btrfs_space_info *space_info = cache->space_info; 7894 struct btrfs_space_info *space_info = cache->space_info;
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
7929 up_write(&space_info->groups_sem); 7903 up_write(&space_info->groups_sem);
7930 7904
7931 if (first) { 7905 if (first) {
7932 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 7906 struct raid_kobject *rkobj;
7907 unsigned int nofs_flag;
7908 int ret;
7909
7910 /*
7911 * Setup a NOFS context because kobject_add(), deep in its call
7912 * chain, does GFP_KERNEL allocations, and we are often called
7913 * in a context where if reclaim is triggered we can deadlock
7914 * (we are either holding a transaction handle or some lock
7915 * required for a transaction commit).
7916 */
7917 nofs_flag = memalloc_nofs_save();
7918 rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
7933 if (!rkobj) { 7919 if (!rkobj) {
7920 memalloc_nofs_restore(nofs_flag);
7934 btrfs_warn(cache->fs_info, 7921 btrfs_warn(cache->fs_info,
7935 "couldn't alloc memory for raid level kobject"); 7922 "couldn't alloc memory for raid level kobject");
7936 return; 7923 return;
7937 } 7924 }
7938 rkobj->flags = cache->flags; 7925 rkobj->flags = cache->flags;
7939 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 7926 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
7940 7927 ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
7941 spin_lock(&fs_info->pending_raid_kobjs_lock); 7928 btrfs_bg_type_to_raid_name(rkobj->flags));
7942 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); 7929 memalloc_nofs_restore(nofs_flag);
7943 spin_unlock(&fs_info->pending_raid_kobjs_lock); 7930 if (ret) {
7931 kobject_put(&rkobj->kobj);
7932 btrfs_warn(fs_info,
7933 "failed to add kobject for block cache, ignoring");
7934 return;
7935 }
7944 space_info->block_group_kobjs[index] = &rkobj->kobj; 7936 space_info->block_group_kobjs[index] = &rkobj->kobj;
7945 } 7937 }
7946} 7938}
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
8206 inc_block_group_ro(cache, 1); 8198 inc_block_group_ro(cache, 1);
8207 } 8199 }
8208 8200
8209 btrfs_add_raid_kobjects(info);
8210 btrfs_init_global_block_rsv(info); 8201 btrfs_init_global_block_rsv(info);
8211 ret = check_chunk_block_group_mappings(info); 8202 ret = check_chunk_block_group_mappings(info);
8212error: 8203error:
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8975 struct btrfs_device *device; 8966 struct btrfs_device *device;
8976 struct list_head *devices; 8967 struct list_head *devices;
8977 u64 group_trimmed; 8968 u64 group_trimmed;
8969 u64 range_end = U64_MAX;
8978 u64 start; 8970 u64 start;
8979 u64 end; 8971 u64 end;
8980 u64 trimmed = 0; 8972 u64 trimmed = 0;
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8984 int dev_ret = 0; 8976 int dev_ret = 0;
8985 int ret = 0; 8977 int ret = 0;
8986 8978
8979 /*
8980 * Check range overflow if range->len is set.
8981 * The default range->len is U64_MAX.
8982 */
8983 if (range->len != U64_MAX &&
8984 check_add_overflow(range->start, range->len, &range_end))
8985 return -EINVAL;
8986
8987 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8987 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8988 for (; cache; cache = next_block_group(cache)) { 8988 for (; cache; cache = next_block_group(cache)) {
8989 if (cache->key.objectid >= (range->start + range->len)) { 8989 if (cache->key.objectid >= range_end) {
8990 btrfs_put_block_group(cache); 8990 btrfs_put_block_group(cache);
8991 break; 8991 break;
8992 } 8992 }
8993 8993
8994 start = max(range->start, cache->key.objectid); 8994 start = max(range->start, cache->key.objectid);
8995 end = min(range->start + range->len, 8995 end = min(range_end, cache->key.objectid + cache->key.offset);
8996 cache->key.objectid + cache->key.offset);
8997 8996
8998 if (end - start >= range->minlen) { 8997 if (end - start >= range->minlen) {
8999 if (!block_group_cache_done(cache)) { 8998 if (!block_group_cache_done(cache)) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d74b74ca07af..a447d3ec48d5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3087 if (ret) 3087 if (ret)
3088 return ret; 3088 return ret;
3089 3089
3090 /*
3091 * We add the kobjects here (and after forcing data chunk creation)
3092 * since relocation is the only place we'll create chunks of a new
3093 * type at runtime. The only place where we'll remove the last
3094 * chunk of a type is the call immediately below this one. Even
3095 * so, we're protected against races with the cleaner thread since
3096 * we're covered by the delete_unused_bgs_mutex.
3097 */
3098 btrfs_add_raid_kobjects(fs_info);
3099
3100 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3090 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3101 chunk_offset); 3091 chunk_offset);
3102 if (IS_ERR(trans)) { 3092 if (IS_ERR(trans)) {
@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3223 btrfs_end_transaction(trans); 3213 btrfs_end_transaction(trans);
3224 if (ret < 0) 3214 if (ret < 0)
3225 return ret; 3215 return ret;
3226
3227 btrfs_add_raid_kobjects(fs_info);
3228
3229 return 1; 3216 return 1;
3230 } 3217 }
3231 } 3218 }
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d542f1cf4428..24bbe3cb7ad4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1097,10 +1097,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1097 1097
1098 iter->bvec = bvec + seg_skip; 1098 iter->bvec = bvec + seg_skip;
1099 iter->nr_segs -= seg_skip; 1099 iter->nr_segs -= seg_skip;
1100 iter->count -= (seg_skip << PAGE_SHIFT); 1100 iter->count -= bvec->bv_len + offset;
1101 iter->iov_offset = offset & ~PAGE_MASK; 1101 iter->iov_offset = offset & ~PAGE_MASK;
1102 if (iter->iov_offset)
1103 iter->count -= iter->iov_offset;
1104 } 1102 }
1105 } 1103 }
1106 1104
@@ -2025,6 +2023,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2025{ 2023{
2026 int ret; 2024 int ret;
2027 2025
2026 ret = io_req_defer(ctx, req, s->sqe);
2027 if (ret) {
2028 if (ret != -EIOCBQUEUED) {
2029 io_free_req(req);
2030 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2031 }
2032 return 0;
2033 }
2034
2028 ret = __io_submit_sqe(ctx, req, s, true); 2035 ret = __io_submit_sqe(ctx, req, s, true);
2029 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { 2036 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
2030 struct io_uring_sqe *sqe_copy; 2037 struct io_uring_sqe *sqe_copy;
@@ -2097,13 +2104,6 @@ err:
2097 return; 2104 return;
2098 } 2105 }
2099 2106
2100 ret = io_req_defer(ctx, req, s->sqe);
2101 if (ret) {
2102 if (ret != -EIOCBQUEUED)
2103 goto err_req;
2104 return;
2105 }
2106
2107 /* 2107 /*
2108 * If we already have a head request, queue this one for async 2108 * If we already have a head request, queue this one for async
2109 * submittal once the head completes. If we don't have a head but 2109 * submittal once the head completes. If we don't have a head but
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 04f09689cd6d..1600034a929b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
119 } 119 }
120 if (seq_has_overflowed(m)) 120 if (seq_has_overflowed(m))
121 goto Eoverflow; 121 goto Eoverflow;
122 p = m->op->next(m, p, &m->index);
122 if (pos + m->count > offset) { 123 if (pos + m->count > offset) {
123 m->from = offset - pos; 124 m->from = offset - pos;
124 m->count -= m->from; 125 m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
126 } 127 }
127 pos += m->count; 128 pos += m->count;
128 m->count = 0; 129 m->count = 0;
129 p = m->op->next(m, p, &m->index);
130 if (pos == offset) 130 if (pos == offset)
131 break; 131 break;
132 } 132 }
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index baf0b72c0a37..07aad70f3931 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3835,15 +3835,28 @@ xfs_bmapi_read(
3835 XFS_STATS_INC(mp, xs_blk_mapr); 3835 XFS_STATS_INC(mp, xs_blk_mapr);
3836 3836
3837 ifp = XFS_IFORK_PTR(ip, whichfork); 3837 ifp = XFS_IFORK_PTR(ip, whichfork);
3838 if (!ifp) {
3839 /* No CoW fork? Return a hole. */
3840 if (whichfork == XFS_COW_FORK) {
3841 mval->br_startoff = bno;
3842 mval->br_startblock = HOLESTARTBLOCK;
3843 mval->br_blockcount = len;
3844 mval->br_state = XFS_EXT_NORM;
3845 *nmap = 1;
3846 return 0;
3847 }
3838 3848
3839 /* No CoW fork? Return a hole. */ 3849 /*
3840 if (whichfork == XFS_COW_FORK && !ifp) { 3850 * A missing attr ifork implies that the inode says we're in
3841 mval->br_startoff = bno; 3851 * extents or btree format but failed to pass the inode fork
3842 mval->br_startblock = HOLESTARTBLOCK; 3852 * verifier while trying to load it. Treat that as a file
3843 mval->br_blockcount = len; 3853 * corruption too.
3844 mval->br_state = XFS_EXT_NORM; 3854 */
3845 *nmap = 1; 3855#ifdef DEBUG
3846 return 0; 3856 xfs_alert(mp, "%s: inode %llu missing fork %d",
3857 __func__, ip->i_ino, whichfork);
3858#endif /* DEBUG */
3859 return -EFSCORRUPTED;
3847 } 3860 }
3848 3861
3849 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3862 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index d1c77fd0815d..0bf56e94bfe9 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -487,10 +487,8 @@ xfs_da3_split(
487 ASSERT(state->path.active == 0); 487 ASSERT(state->path.active == 0);
488 oldblk = &state->path.blk[0]; 488 oldblk = &state->path.blk[0];
489 error = xfs_da3_root_split(state, oldblk, addblk); 489 error = xfs_da3_root_split(state, oldblk, addblk);
490 if (error) { 490 if (error)
491 addblk->bp = NULL; 491 goto out;
492 return error; /* GROT: dir is inconsistent */
493 }
494 492
495 /* 493 /*
496 * Update pointers to the node which used to be block 0 and just got 494 * Update pointers to the node which used to be block 0 and just got
@@ -505,7 +503,10 @@ xfs_da3_split(
505 */ 503 */
506 node = oldblk->bp->b_addr; 504 node = oldblk->bp->b_addr;
507 if (node->hdr.info.forw) { 505 if (node->hdr.info.forw) {
508 ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); 506 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
507 error = -EFSCORRUPTED;
508 goto out;
509 }
509 node = addblk->bp->b_addr; 510 node = addblk->bp->b_addr;
510 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 511 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
511 xfs_trans_log_buf(state->args->trans, addblk->bp, 512 xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +515,19 @@ xfs_da3_split(
514 } 515 }
515 node = oldblk->bp->b_addr; 516 node = oldblk->bp->b_addr;
516 if (node->hdr.info.back) { 517 if (node->hdr.info.back) {
517 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); 518 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
519 error = -EFSCORRUPTED;
520 goto out;
521 }
518 node = addblk->bp->b_addr; 522 node = addblk->bp->b_addr;
519 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 523 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
520 xfs_trans_log_buf(state->args->trans, addblk->bp, 524 xfs_trans_log_buf(state->args->trans, addblk->bp,
521 XFS_DA_LOGRANGE(node, &node->hdr.info, 525 XFS_DA_LOGRANGE(node, &node->hdr.info,
522 sizeof(node->hdr.info))); 526 sizeof(node->hdr.info)));
523 } 527 }
528out:
524 addblk->bp = NULL; 529 addblk->bp = NULL;
525 return 0; 530 return error;
526} 531}
527 532
528/* 533/*
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index afcc6642690a..1fc44efc344d 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry(
741 ents = dp->d_ops->leaf_ents_p(leaf); 741 ents = dp->d_ops->leaf_ents_p(leaf);
742 742
743 xfs_dir3_leaf_check(dp, bp); 743 xfs_dir3_leaf_check(dp, bp);
744 ASSERT(leafhdr.count > 0); 744 if (leafhdr.count <= 0)
745 return -EFSCORRUPTED;
745 746
746 /* 747 /*
747 * Look up the hash value in the leaf entries. 748 * Look up the hash value in the leaf entries.
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 00e9f5c388d3..7fc3c1ad36bc 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -429,10 +429,7 @@ xfs_log_reserve(
429 429
430 ASSERT(*ticp == NULL); 430 ASSERT(*ticp == NULL);
431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
432 KM_SLEEP | KM_MAYFAIL); 432 KM_SLEEP);
433 if (!tic)
434 return -ENOMEM;
435
436 *ticp = tic; 433 *ticp = tic;
437 434
438 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 435 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index bb6cb347018c..f6947da70d71 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -19,9 +19,24 @@
19 19
20#define p4d_alloc(mm, pgd, address) (pgd) 20#define p4d_alloc(mm, pgd, address) (pgd)
21#define p4d_offset(pgd, start) (pgd) 21#define p4d_offset(pgd, start) (pgd)
22#define p4d_none(p4d) 0 22
23#define p4d_bad(p4d) 0 23#ifndef __ASSEMBLY__
24#define p4d_present(p4d) 1 24static inline int p4d_none(p4d_t p4d)
25{
26 return 0;
27}
28
29static inline int p4d_bad(p4d_t p4d)
30{
31 return 0;
32}
33
34static inline int p4d_present(p4d_t p4d)
35{
36 return 1;
37}
38#endif
39
25#define p4d_ERROR(p4d) do { } while (0) 40#define p4d_ERROR(p4d) do { } while (0)
26#define p4d_clear(p4d) pgd_clear(p4d) 41#define p4d_clear(p4d) pgd_clear(p4d)
27#define p4d_val(p4d) pgd_val(p4d) 42#define p4d_val(p4d) pgd_val(p4d)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1b1fa1557e68..feff3fe4467e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,7 +311,6 @@ enum req_flag_bits {
311 __REQ_RAHEAD, /* read ahead, can fail anytime */ 311 __REQ_RAHEAD, /* read ahead, can fail anytime */
312 __REQ_BACKGROUND, /* background IO */ 312 __REQ_BACKGROUND, /* background IO */
313 __REQ_NOWAIT, /* Don't wait if request will block */ 313 __REQ_NOWAIT, /* Don't wait if request will block */
314 __REQ_NOWAIT_INLINE, /* Return would-block error inline */
315 /* 314 /*
316 * When a shared kthread needs to issue a bio for a cgroup, doing 315 * When a shared kthread needs to issue a bio for a cgroup, doing
317 * so synchronously can lead to priority inversions as the kthread 316 * so synchronously can lead to priority inversions as the kthread
@@ -346,7 +345,6 @@ enum req_flag_bits {
346#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 345#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
347#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 346#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
348#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 347#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
349#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
350#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 348#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
351 349
352#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 350#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op)
420 418
421typedef unsigned int blk_qc_t; 419typedef unsigned int blk_qc_t;
422#define BLK_QC_T_NONE -1U 420#define BLK_QC_T_NONE -1U
423#define BLK_QC_T_EAGAIN -2U
424#define BLK_QC_T_SHIFT 16 421#define BLK_QC_T_SHIFT 16
425#define BLK_QC_T_INTERNAL (1U << 31) 422#define BLK_QC_T_INTERNAL (1U << 31)
426 423
427static inline bool blk_qc_t_valid(blk_qc_t cookie) 424static inline bool blk_qc_t_valid(blk_qc_t cookie)
428{ 425{
429 return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; 426 return cookie != BLK_QC_T_NONE;
430} 427}
431 428
432static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 429static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 3813211a9aad..0bff3d7fac92 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
42 dma_addr_t dma_addr, unsigned long attrs); 42 dma_addr_t dma_addr, unsigned long attrs);
43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
44 dma_addr_t dma_addr); 44 dma_addr_t dma_addr);
45
46#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
47pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 45pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
48 unsigned long attrs); 46 unsigned long attrs);
47
48#ifdef CONFIG_MMU
49pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
49#else 50#else
50# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) 51static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
51#endif 52 unsigned long attrs)
53{
54 return prot; /* no protection bits supported without page tables */
55}
56#endif /* CONFIG_MMU */
52 57
53#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 58#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
54void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 59void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..f33881688f42 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
510} 510}
511extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 511extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
512 struct vm_area_struct *vma, unsigned long addr, 512 struct vm_area_struct *vma, unsigned long addr,
513 int node, bool hugepage); 513 int node);
514#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
515 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
516#else 514#else
517#define alloc_pages(gfp_mask, order) \ 515#define alloc_pages(gfp_mask, order) \
518 alloc_pages_node(numa_node_id(), gfp_mask, order) 516 alloc_pages_node(numa_node_id(), gfp_mask, order)
519#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 517#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
520 alloc_pages(gfp_mask, order)
521#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
522 alloc_pages(gfp_mask, order) 518 alloc_pages(gfp_mask, order)
523#endif 519#endif
524#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 520#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
525#define alloc_page_vma(gfp_mask, vma, addr) \ 521#define alloc_page_vma(gfp_mask, vma, addr) \
526 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 522 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
527#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 523#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
528 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 524 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
529 525
530extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 526extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
531extern unsigned long get_zeroed_page(gfp_t gfp_mask); 527extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 44c41462be33..2cd4359cb38c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
668 668
669void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 669void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
670 int val); 670 int val);
671void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
671 672
672static inline void mod_lruvec_state(struct lruvec *lruvec, 673static inline void mod_lruvec_state(struct lruvec *lruvec,
673 enum node_stat_item idx, int val) 674 enum node_stat_item idx, int val)
@@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
1072 mod_node_page_state(page_pgdat(page), idx, val); 1073 mod_node_page_state(page_pgdat(page), idx, val);
1073} 1074}
1074 1075
1076static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1077 int val)
1078{
1079 struct page *page = virt_to_head_page(p);
1080
1081 __mod_node_page_state(page_pgdat(page), idx, val);
1082}
1083
1075static inline 1084static inline
1076unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1085unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1077 gfp_t gfp_mask, 1086 gfp_t gfp_mask,
@@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
1159 __mod_lruvec_page_state(page, idx, -1); 1168 __mod_lruvec_page_state(page, idx, -1);
1160} 1169}
1161 1170
1171static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1172{
1173 __mod_lruvec_slab_state(p, idx, 1);
1174}
1175
1176static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1177{
1178 __mod_lruvec_slab_state(p, idx, -1);
1179}
1180
1162/* idx can be of type enum memcg_stat_item or node_stat_item */ 1181/* idx can be of type enum memcg_stat_item or node_stat_item */
1163static inline void inc_memcg_state(struct mem_cgroup *memcg, 1182static inline void inc_memcg_state(struct mem_cgroup *memcg,
1164 int idx) 1183 int idx)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..bac395f1d00a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
139struct mempolicy *get_task_policy(struct task_struct *p); 139struct mempolicy *get_task_policy(struct task_struct *p);
140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141 unsigned long addr); 141 unsigned long addr);
142struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
143 unsigned long addr);
142bool vma_policy_mof(struct vm_area_struct *vma); 144bool vma_policy_mof(struct vm_area_struct *vma);
143 145
144extern void numa_default_policy(void); 146extern void numa_default_policy(void);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a37a89eb7a7..6a7a1083b6fb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -159,7 +159,16 @@ struct page {
159 /** @pgmap: Points to the hosting device page map. */ 159 /** @pgmap: Points to the hosting device page map. */
160 struct dev_pagemap *pgmap; 160 struct dev_pagemap *pgmap;
161 void *zone_device_data; 161 void *zone_device_data;
162 unsigned long _zd_pad_1; /* uses mapping */ 162 /*
163 * ZONE_DEVICE private pages are counted as being
164 * mapped so the next 3 words hold the mapping, index,
165 * and private fields from the source anonymous or
166 * page cache page while the page is migrated to device
167 * private memory.
168 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
169 * use the mapping, index, and private fields when
170 * pmem backed DAX files are mapped.
171 */
163 }; 172 };
164 173
165 /** @rcu_head: You can use this to free a page by RCU. */ 174 /** @rcu_head: You can use this to free a page by RCU. */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9e700d9f9f28..82e4cd1b7ac3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
1567 1567
1568#ifdef CONFIG_PCIEASPM 1568#ifdef CONFIG_PCIEASPM
1569bool pcie_aspm_support_enabled(void); 1569bool pcie_aspm_support_enabled(void);
1570bool pcie_aspm_enabled(struct pci_dev *pdev);
1570#else 1571#else
1571static inline bool pcie_aspm_support_enabled(void) { return false; } 1572static inline bool pcie_aspm_support_enabled(void) { return false; }
1573static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1572#endif 1574#endif
1573 1575
1574#ifdef CONFIG_PCIEAER 1576#ifdef CONFIG_PCIEAER
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 57f667cad3ec..b548c530f988 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1463,7 +1463,7 @@ typedef void (*usb_complete_t)(struct urb *);
1463 * field rather than determining a dma address themselves. 1463 * field rather than determining a dma address themselves.
1464 * 1464 *
1465 * Note that transfer_buffer must still be set if the controller 1465 * Note that transfer_buffer must still be set if the controller
1466 * does not support DMA (as indicated by bus.uses_dma) and when talking 1466 * does not support DMA (as indicated by hcd_uses_dma()) and when talking
1467 * to root hub. If you have to trasfer between highmem zone and the device 1467 * to root hub. If you have to trasfer between highmem zone and the device
1468 * on such controller, create a bounce buffer or bail out with an error. 1468 * on such controller, create a bounce buffer or bail out with an error.
1469 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA 1469 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index bab27ccc8ff5..a20e7815d814 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -422,6 +422,9 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
422 return hcd->high_prio_bh.completing_ep == ep; 422 return hcd->high_prio_bh.completing_ep == ep;
423} 423}
424 424
425#define hcd_uses_dma(hcd) \
426 (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma)
427
425extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 428extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
426extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, 429extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
427 int status); 430 int status);
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index 7de68f1dc707..af735f55b291 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -180,6 +180,7 @@ struct siw_cqe {
180 * to control CQ arming. 180 * to control CQ arming.
181 */ 181 */
182struct siw_cq_ctrl { 182struct siw_cq_ctrl {
183 __aligned_u64 notify; 183 __u32 flags;
184 __u32 pad;
184}; 185};
185#endif 186#endif
diff --git a/kernel/configs.c b/kernel/configs.c
index b062425ccf8d..c09ea4c995e1 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
1/* 2/*
2 * kernel/configs.c 3 * kernel/configs.c
3 * Echo the kernel .config file used to build the kernel 4 * Echo the kernel .config file used to build the kernel
@@ -6,21 +7,6 @@
6 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net> 7 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
7 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> 8 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
8 * Copyright (C) 2002 Hewlett-Packard Company 9 * Copyright (C) 2002 Hewlett-Packard Company
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */ 10 */
25 11
26#include <linux/kernel.h> 12#include <linux/kernel.h>
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 59bdceea3737..795c9b095d75 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
47{ 47{
48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
49 49
50 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
51 max_dma = dev->bus_dma_mask;
52
53 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 50 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
54} 51}
55 52
@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
130 if (!page) 127 if (!page)
131 return NULL; 128 return NULL;
132 129
133 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 130 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
131 !force_dma_unencrypted(dev)) {
134 /* remove any dirty cache lines on the kernel alias */ 132 /* remove any dirty cache lines on the kernel alias */
135 if (!PageHighMem(page)) 133 if (!PageHighMem(page))
136 arch_dma_prep_coherent(page, size); 134 arch_dma_prep_coherent(page, size);
135 *dma_handle = phys_to_dma(dev, page_to_phys(page));
137 /* return the page pointer as the opaque cookie */ 136 /* return the page pointer as the opaque cookie */
138 return page; 137 return page;
139 } 138 }
@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
178{ 177{
179 unsigned int page_order = get_order(size); 178 unsigned int page_order = get_order(size);
180 179
181 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 180 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
181 !force_dma_unencrypted(dev)) {
182 /* cpu_addr is a struct page cookie, not a kernel address */ 182 /* cpu_addr is a struct page cookie, not a kernel address */
183 __dma_direct_free_pages(dev, size, cpu_addr); 183 __dma_direct_free_pages(dev, size, cpu_addr);
184 return; 184 return;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b945239621d8..b0038ca3aa92 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
150} 150}
151EXPORT_SYMBOL(dma_get_sgtable_attrs); 151EXPORT_SYMBOL(dma_get_sgtable_attrs);
152 152
153#ifdef CONFIG_MMU
154/*
155 * Return the page attributes used for mapping dma_alloc_* memory, either in
156 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
157 */
158pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
159{
160 if (dev_is_dma_coherent(dev) ||
161 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
162 (attrs & DMA_ATTR_NON_CONSISTENT)))
163 return prot;
164 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
165 return arch_dma_mmap_pgprot(dev, prot, attrs);
166 return pgprot_noncached(prot);
167}
168#endif /* CONFIG_MMU */
169
153/* 170/*
154 * Create userspace mapping for the DMA-coherent memory. 171 * Create userspace mapping for the DMA-coherent memory.
155 */ 172 */
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
164 unsigned long pfn; 181 unsigned long pfn;
165 int ret = -ENXIO; 182 int ret = -ENXIO;
166 183
167 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 184 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
168 185
169 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 186 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
170 return ret; 187 return ret;
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index a594aec07882..ffe78f0b2fe4 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
218 218
219 /* create a coherent mapping */ 219 /* create a coherent mapping */
220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
221 arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), 221 dma_pgprot(dev, PAGE_KERNEL, attrs),
222 __builtin_return_address(0)); 222 __builtin_return_address(0));
223 if (!ret) { 223 if (!ret) {
224 __dma_direct_free_pages(dev, size, page); 224 __dma_direct_free_pages(dev, size, page);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 636ca6f88c8e..867b4bb6d4be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -40,6 +40,7 @@ struct sugov_policy {
40 struct task_struct *thread; 40 struct task_struct *thread;
41 bool work_in_progress; 41 bool work_in_progress;
42 42
43 bool limits_changed;
43 bool need_freq_update; 44 bool need_freq_update;
44}; 45};
45 46
@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
89 !cpufreq_this_cpu_can_update(sg_policy->policy)) 90 !cpufreq_this_cpu_can_update(sg_policy->policy))
90 return false; 91 return false;
91 92
92 if (unlikely(sg_policy->need_freq_update)) 93 if (unlikely(sg_policy->limits_changed)) {
94 sg_policy->limits_changed = false;
95 sg_policy->need_freq_update = true;
93 return true; 96 return true;
97 }
94 98
95 delta_ns = time - sg_policy->last_freq_update_time; 99 delta_ns = time - sg_policy->last_freq_update_time;
96 100
@@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
437static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) 441static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
438{ 442{
439 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 443 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
440 sg_policy->need_freq_update = true; 444 sg_policy->limits_changed = true;
441} 445}
442 446
443static void sugov_update_single(struct update_util_data *hook, u64 time, 447static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
457 if (!sugov_should_update_freq(sg_policy, time)) 461 if (!sugov_should_update_freq(sg_policy, time))
458 return; 462 return;
459 463
460 busy = sugov_cpu_is_busy(sg_cpu); 464 /* Limits may have changed, don't skip frequency update */
465 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
461 466
462 util = sugov_get_util(sg_cpu); 467 util = sugov_get_util(sg_cpu);
463 max = sg_cpu->max; 468 max = sg_cpu->max;
@@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy)
831 sg_policy->last_freq_update_time = 0; 836 sg_policy->last_freq_update_time = 0;
832 sg_policy->next_freq = 0; 837 sg_policy->next_freq = 0;
833 sg_policy->work_in_progress = false; 838 sg_policy->work_in_progress = false;
839 sg_policy->limits_changed = false;
834 sg_policy->need_freq_update = false; 840 sg_policy->need_freq_update = false;
835 sg_policy->cached_raw_freq = 0; 841 sg_policy->cached_raw_freq = 0;
836 842
@@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
879 mutex_unlock(&sg_policy->work_lock); 885 mutex_unlock(&sg_policy->work_lock);
880 } 886 }
881 887
882 sg_policy->need_freq_update = true; 888 sg_policy->limits_changed = true;
883} 889}
884 890
885struct cpufreq_governor schedutil_gov = { 891struct cpufreq_governor schedutil_gov = {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1334ede667a8..738065f765ab 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -644,30 +644,40 @@ release:
644 * available 644 * available
645 * never: never stall for any thp allocation 645 * never: never stall for any thp allocation
646 */ 646 */
647static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 647static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
648{ 648{
649 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 649 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
650 gfp_t this_node = 0;
651
652#ifdef CONFIG_NUMA
653 struct mempolicy *pol;
654 /*
655 * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
656 * specified, to express a general desire to stay on the current
657 * node for optimistic allocation attempts. If the defrag mode
658 * and/or madvise hint requires the direct reclaim then we prefer
659 * to fallback to other node rather than node reclaim because that
660 * can lead to excessive reclaim even though there is free memory
661 * on other nodes. We expect that NUMA preferences are specified
662 * by memory policies.
663 */
664 pol = get_vma_policy(vma, addr);
665 if (pol->mode != MPOL_BIND)
666 this_node = __GFP_THISNODE;
667 mpol_cond_put(pol);
668#endif
650 669
651 /* Always do synchronous compaction */
652 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 670 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
653 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 671 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
654
655 /* Kick kcompactd and fail quickly */
656 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 672 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
657 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 673 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
658
659 /* Synchronous compaction if madvised, otherwise kick kcompactd */
660 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 674 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
661 return GFP_TRANSHUGE_LIGHT | 675 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
662 (vma_madvised ? __GFP_DIRECT_RECLAIM : 676 __GFP_KSWAPD_RECLAIM | this_node);
663 __GFP_KSWAPD_RECLAIM);
664
665 /* Only do synchronous compaction if madvised */
666 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 677 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
667 return GFP_TRANSHUGE_LIGHT | 678 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
668 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 679 this_node);
669 680 return GFP_TRANSHUGE_LIGHT | this_node;
670 return GFP_TRANSHUGE_LIGHT;
671} 681}
672 682
673/* Caller must hold page table lock. */ 683/* Caller must hold page table lock. */
@@ -739,8 +749,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
739 pte_free(vma->vm_mm, pgtable); 749 pte_free(vma->vm_mm, pgtable);
740 return ret; 750 return ret;
741 } 751 }
742 gfp = alloc_hugepage_direct_gfpmask(vma); 752 gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
743 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 753 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
744 if (unlikely(!page)) { 754 if (unlikely(!page)) {
745 count_vm_event(THP_FAULT_FALLBACK); 755 count_vm_event(THP_FAULT_FALLBACK);
746 return VM_FAULT_FALLBACK; 756 return VM_FAULT_FALLBACK;
@@ -1347,8 +1357,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1347alloc: 1357alloc:
1348 if (__transparent_hugepage_enabled(vma) && 1358 if (__transparent_hugepage_enabled(vma) &&
1349 !transparent_hugepage_debug_cow()) { 1359 !transparent_hugepage_debug_cow()) {
1350 huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1360 huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
1351 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1361 new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
1362 haddr, numa_node_id());
1352 } else 1363 } else
1353 new_page = NULL; 1364 new_page = NULL;
1354 1365
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ede7e7f5d1ab..6d7296dd11b8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3856,6 +3856,25 @@ retry:
3856 3856
3857 page = alloc_huge_page(vma, haddr, 0); 3857 page = alloc_huge_page(vma, haddr, 0);
3858 if (IS_ERR(page)) { 3858 if (IS_ERR(page)) {
3859 /*
3860 * Returning error will result in faulting task being
3861 * sent SIGBUS. The hugetlb fault mutex prevents two
3862 * tasks from racing to fault in the same page which
3863 * could result in false unable to allocate errors.
3864 * Page migration does not take the fault mutex, but
3865 * does a clear then write of pte's under page table
3866 * lock. Page fault code could race with migration,
3867 * notice the clear pte and try to allocate a page
3868 * here. Before returning error, get ptl and make
3869 * sure there really is no pte entry.
3870 */
3871 ptl = huge_pte_lock(h, mm, ptep);
3872 if (!huge_pte_none(huge_ptep_get(ptep))) {
3873 ret = 0;
3874 spin_unlock(ptl);
3875 goto out;
3876 }
3877 spin_unlock(ptl);
3859 ret = vmf_error(PTR_ERR(page)); 3878 ret = vmf_error(PTR_ERR(page));
3860 goto out; 3879 goto out;
3861 } 3880 }
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 6e9e8cca663e..f6e602918dac 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1966,6 +1966,7 @@ static void kmemleak_disable(void)
1966 1966
1967 /* stop any memory operation tracing */ 1967 /* stop any memory operation tracing */
1968 kmemleak_enabled = 0; 1968 kmemleak_enabled = 0;
1969 kmemleak_early_log = 0;
1969 1970
1970 /* check whether it is too early for a kernel thread */ 1971 /* check whether it is too early for a kernel thread */
1971 if (kmemleak_initialized) 1972 if (kmemleak_initialized)
@@ -2009,7 +2010,6 @@ void __init kmemleak_init(void)
2009 2010
2010#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 2011#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2011 if (!kmemleak_skip_disable) { 2012 if (!kmemleak_skip_disable) {
2012 kmemleak_early_log = 0;
2013 kmemleak_disable(); 2013 kmemleak_disable();
2014 return; 2014 return;
2015 } 2015 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cdbb7a84cb6e..6f5c0c517c49 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -768,6 +768,26 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
768 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 768 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
769} 769}
770 770
771void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
772{
773 struct page *page = virt_to_head_page(p);
774 pg_data_t *pgdat = page_pgdat(page);
775 struct mem_cgroup *memcg;
776 struct lruvec *lruvec;
777
778 rcu_read_lock();
779 memcg = memcg_from_slab_page(page);
780
781 /* Untracked pages have no memcg, no lruvec. Update only the node */
782 if (!memcg || memcg == root_mem_cgroup) {
783 __mod_node_page_state(pgdat, idx, val);
784 } else {
785 lruvec = mem_cgroup_lruvec(pgdat, memcg);
786 __mod_lruvec_state(lruvec, idx, val);
787 }
788 rcu_read_unlock();
789}
790
771/** 791/**
772 * __count_memcg_events - account VM events in a cgroup 792 * __count_memcg_events - account VM events in a cgroup
773 * @memcg: the memory cgroup 793 * @memcg: the memory cgroup
@@ -1130,26 +1150,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
1130 css_put(&prev->css); 1150 css_put(&prev->css);
1131} 1151}
1132 1152
1133static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1153static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1154 struct mem_cgroup *dead_memcg)
1134{ 1155{
1135 struct mem_cgroup *memcg = dead_memcg;
1136 struct mem_cgroup_reclaim_iter *iter; 1156 struct mem_cgroup_reclaim_iter *iter;
1137 struct mem_cgroup_per_node *mz; 1157 struct mem_cgroup_per_node *mz;
1138 int nid; 1158 int nid;
1139 int i; 1159 int i;
1140 1160
1141 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1161 for_each_node(nid) {
1142 for_each_node(nid) { 1162 mz = mem_cgroup_nodeinfo(from, nid);
1143 mz = mem_cgroup_nodeinfo(memcg, nid); 1163 for (i = 0; i <= DEF_PRIORITY; i++) {
1144 for (i = 0; i <= DEF_PRIORITY; i++) { 1164 iter = &mz->iter[i];
1145 iter = &mz->iter[i]; 1165 cmpxchg(&iter->position,
1146 cmpxchg(&iter->position, 1166 dead_memcg, NULL);
1147 dead_memcg, NULL);
1148 }
1149 } 1167 }
1150 } 1168 }
1151} 1169}
1152 1170
1171static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1172{
1173 struct mem_cgroup *memcg = dead_memcg;
1174 struct mem_cgroup *last;
1175
1176 do {
1177 __invalidate_reclaim_iterators(memcg, dead_memcg);
1178 last = memcg;
1179 } while ((memcg = parent_mem_cgroup(memcg)));
1180
1181 /*
1182 * When cgruop1 non-hierarchy mode is used,
1183 * parent_mem_cgroup() does not walk all the way up to the
1184 * cgroup root (root_mem_cgroup). So we have to handle
1185 * dead_memcg from cgroup root separately.
1186 */
1187 if (last != root_mem_cgroup)
1188 __invalidate_reclaim_iterators(root_mem_cgroup,
1189 dead_memcg);
1190}
1191
1153/** 1192/**
1154 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1193 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1155 * @memcg: hierarchy root 1194 * @memcg: hierarchy root
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f48693f75b37..65e0874fce17 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
403 }, 403 },
404}; 404};
405 405
406static void migrate_page_add(struct page *page, struct list_head *pagelist, 406static int migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags); 407 unsigned long flags);
408 408
409struct queue_pages { 409struct queue_pages {
@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
429} 429}
430 430
431/* 431/*
432 * queue_pages_pmd() has three possible return values: 432 * queue_pages_pmd() has four possible return values:
433 * 1 - pages are placed on the right node or queued successfully. 433 * 0 - pages are placed on the right node or queued successfully.
434 * 0 - THP was split. 434 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435 * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing 435 * specified.
436 * page was already on a node that does not follow the policy. 436 * 2 - THP was split.
437 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438 * existing page was already on a node that does not follow the
439 * policy.
437 */ 440 */
438static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 441static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
439 unsigned long end, struct mm_walk *walk) 442 unsigned long end, struct mm_walk *walk)
@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 if (is_huge_zero_page(page)) { 454 if (is_huge_zero_page(page)) {
452 spin_unlock(ptl); 455 spin_unlock(ptl);
453 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 456 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457 ret = 2;
454 goto out; 458 goto out;
455 } 459 }
456 if (!queue_pages_required(page, qp)) { 460 if (!queue_pages_required(page, qp))
457 ret = 1;
458 goto unlock; 461 goto unlock;
459 }
460 462
461 ret = 1;
462 flags = qp->flags; 463 flags = qp->flags;
463 /* go to thp migration */ 464 /* go to thp migration */
464 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 465 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465 if (!vma_migratable(walk->vma)) { 466 if (!vma_migratable(walk->vma) ||
466 ret = -EIO; 467 migrate_page_add(page, qp->pagelist, flags)) {
468 ret = 1;
467 goto unlock; 469 goto unlock;
468 } 470 }
469
470 migrate_page_add(page, qp->pagelist, flags);
471 } else 471 } else
472 ret = -EIO; 472 ret = -EIO;
473unlock: 473unlock:
@@ -479,6 +479,13 @@ out:
479/* 479/*
480 * Scan through pages checking if pages follow certain conditions, 480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do. 481 * and move them to the pagelist if they do.
482 *
483 * queue_pages_pte_range() has three possible return values:
484 * 0 - pages are placed on the right node or queued successfully.
485 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
486 * specified.
487 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
488 * on a node that does not follow the policy.
482 */ 489 */
483static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 490static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
484 unsigned long end, struct mm_walk *walk) 491 unsigned long end, struct mm_walk *walk)
@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
488 struct queue_pages *qp = walk->private; 495 struct queue_pages *qp = walk->private;
489 unsigned long flags = qp->flags; 496 unsigned long flags = qp->flags;
490 int ret; 497 int ret;
498 bool has_unmovable = false;
491 pte_t *pte; 499 pte_t *pte;
492 spinlock_t *ptl; 500 spinlock_t *ptl;
493 501
494 ptl = pmd_trans_huge_lock(pmd, vma); 502 ptl = pmd_trans_huge_lock(pmd, vma);
495 if (ptl) { 503 if (ptl) {
496 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
497 if (ret > 0) 505 if (ret != 2)
498 return 0;
499 else if (ret < 0)
500 return ret; 506 return ret;
501 } 507 }
508 /* THP was split, fall through to pte walk */
502 509
503 if (pmd_trans_unstable(pmd)) 510 if (pmd_trans_unstable(pmd))
504 return 0; 511 return 0;
@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
519 if (!queue_pages_required(page, qp)) 526 if (!queue_pages_required(page, qp))
520 continue; 527 continue;
521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 528 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522 if (!vma_migratable(vma)) 529 /* MPOL_MF_STRICT must be specified if we get here */
530 if (!vma_migratable(vma)) {
531 has_unmovable = true;
523 break; 532 break;
524 migrate_page_add(page, qp->pagelist, flags); 533 }
534
535 /*
536 * Do not abort immediately since there may be
537 * temporary off LRU pages in the range. Still
538 * need migrate other LRU pages.
539 */
540 if (migrate_page_add(page, qp->pagelist, flags))
541 has_unmovable = true;
525 } else 542 } else
526 break; 543 break;
527 } 544 }
528 pte_unmap_unlock(pte - 1, ptl); 545 pte_unmap_unlock(pte - 1, ptl);
529 cond_resched(); 546 cond_resched();
547
548 if (has_unmovable)
549 return 1;
550
530 return addr != end ? -EIO : 0; 551 return addr != end ? -EIO : 0;
531} 552}
532 553
@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
639 * 660 *
640 * If pages found in a given range are on a set of nodes (determined by 661 * If pages found in a given range are on a set of nodes (determined by
641 * @nodes and @flags,) it's isolated and queued to the pagelist which is 662 * @nodes and @flags,) it's isolated and queued to the pagelist which is
642 * passed via @private.) 663 * passed via @private.
664 *
665 * queue_pages_range() has three possible return values:
666 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
667 * specified.
668 * 0 - queue pages successfully or no misplaced page.
669 * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
643 */ 670 */
644static int 671static int
645queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 672queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
940/* 967/*
941 * page migration, thp tail pages can be passed. 968 * page migration, thp tail pages can be passed.
942 */ 969 */
943static void migrate_page_add(struct page *page, struct list_head *pagelist, 970static int migrate_page_add(struct page *page, struct list_head *pagelist,
944 unsigned long flags) 971 unsigned long flags)
945{ 972{
946 struct page *head = compound_head(page); 973 struct page *head = compound_head(page);
@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
953 mod_node_page_state(page_pgdat(head), 980 mod_node_page_state(page_pgdat(head),
954 NR_ISOLATED_ANON + page_is_file_cache(head), 981 NR_ISOLATED_ANON + page_is_file_cache(head),
955 hpage_nr_pages(head)); 982 hpage_nr_pages(head));
983 } else if (flags & MPOL_MF_STRICT) {
984 /*
985 * Non-movable page may reach here. And, there may be
986 * temporary off LRU pages or non-LRU movable pages.
987 * Treat them as unmovable pages since they can't be
988 * isolated, so they can't be moved at the moment. It
989 * should return -EIO for this case too.
990 */
991 return -EIO;
956 } 992 }
957 } 993 }
994
995 return 0;
958} 996}
959 997
960/* page allocation callback for NUMA node migration */ 998/* page allocation callback for NUMA node migration */
@@ -1142,8 +1180,8 @@ static struct page *new_page(struct page *page, unsigned long start)
1142 } else if (PageTransHuge(page)) { 1180 } else if (PageTransHuge(page)) {
1143 struct page *thp; 1181 struct page *thp;
1144 1182
1145 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1183 thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
1146 HPAGE_PMD_ORDER); 1184 address, numa_node_id());
1147 if (!thp) 1185 if (!thp)
1148 return NULL; 1186 return NULL;
1149 prep_transhuge_page(thp); 1187 prep_transhuge_page(thp);
@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
1157} 1195}
1158#else 1196#else
1159 1197
1160static void migrate_page_add(struct page *page, struct list_head *pagelist, 1198static int migrate_page_add(struct page *page, struct list_head *pagelist,
1161 unsigned long flags) 1199 unsigned long flags)
1162{ 1200{
1201 return -EIO;
1163} 1202}
1164 1203
1165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1204int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1182 struct mempolicy *new; 1221 struct mempolicy *new;
1183 unsigned long end; 1222 unsigned long end;
1184 int err; 1223 int err;
1224 int ret;
1185 LIST_HEAD(pagelist); 1225 LIST_HEAD(pagelist);
1186 1226
1187 if (flags & ~(unsigned long)MPOL_MF_VALID) 1227 if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
1243 if (err) 1283 if (err)
1244 goto mpol_out; 1284 goto mpol_out;
1245 1285
1246 err = queue_pages_range(mm, start, end, nmask, 1286 ret = queue_pages_range(mm, start, end, nmask,
1247 flags | MPOL_MF_INVERT, &pagelist); 1287 flags | MPOL_MF_INVERT, &pagelist);
1248 if (!err) 1288
1249 err = mbind_range(mm, start, end, new); 1289 if (ret < 0) {
1290 err = -EIO;
1291 goto up_out;
1292 }
1293
1294 err = mbind_range(mm, start, end, new);
1250 1295
1251 if (!err) { 1296 if (!err) {
1252 int nr_failed = 0; 1297 int nr_failed = 0;
@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
1259 putback_movable_pages(&pagelist); 1304 putback_movable_pages(&pagelist);
1260 } 1305 }
1261 1306
1262 if (nr_failed && (flags & MPOL_MF_STRICT)) 1307 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1263 err = -EIO; 1308 err = -EIO;
1264 } else 1309 } else
1265 putback_movable_pages(&pagelist); 1310 putback_movable_pages(&pagelist);
1266 1311
1312up_out:
1267 up_write(&mm->mmap_sem); 1313 up_write(&mm->mmap_sem);
1268 mpol_out: 1314mpol_out:
1269 mpol_put(new); 1315 mpol_put(new);
1270 return err; 1316 return err;
1271} 1317}
@@ -1688,7 +1734,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1688 * freeing by another task. It is the caller's responsibility to free the 1734 * freeing by another task. It is the caller's responsibility to free the
1689 * extra reference for shared policies. 1735 * extra reference for shared policies.
1690 */ 1736 */
1691static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1737struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1692 unsigned long addr) 1738 unsigned long addr)
1693{ 1739{
1694 struct mempolicy *pol = __get_vma_policy(vma, addr); 1740 struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2037,7 +2083,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2037 * @vma: Pointer to VMA or NULL if not available. 2083 * @vma: Pointer to VMA or NULL if not available.
2038 * @addr: Virtual Address of the allocation. Must be inside the VMA. 2084 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2039 * @node: Which node to prefer for allocation (modulo policy). 2085 * @node: Which node to prefer for allocation (modulo policy).
2040 * @hugepage: for hugepages try only the preferred node if possible
2041 * 2086 *
2042 * This function allocates a page from the kernel page pool and applies 2087 * This function allocates a page from the kernel page pool and applies
2043 * a NUMA policy associated with the VMA or the current process. 2088 * a NUMA policy associated with the VMA or the current process.
@@ -2048,7 +2093,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2048 */ 2093 */
2049struct page * 2094struct page *
2050alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2095alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2051 unsigned long addr, int node, bool hugepage) 2096 unsigned long addr, int node)
2052{ 2097{
2053 struct mempolicy *pol; 2098 struct mempolicy *pol;
2054 struct page *page; 2099 struct page *page;
@@ -2066,31 +2111,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2066 goto out; 2111 goto out;
2067 } 2112 }
2068 2113
2069 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2070 int hpage_node = node;
2071
2072 /*
2073 * For hugepage allocation and non-interleave policy which
2074 * allows the current node (or other explicitly preferred
2075 * node) we only try to allocate from the current/preferred
2076 * node and don't fall back to other nodes, as the cost of
2077 * remote accesses would likely offset THP benefits.
2078 *
2079 * If the policy is interleave, or does not allow the current
2080 * node in its nodemask, we allocate the standard way.
2081 */
2082 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2083 hpage_node = pol->v.preferred_node;
2084
2085 nmask = policy_nodemask(gfp, pol);
2086 if (!nmask || node_isset(hpage_node, *nmask)) {
2087 mpol_cond_put(pol);
2088 page = __alloc_pages_node(hpage_node,
2089 gfp | __GFP_THISNODE, order);
2090 goto out;
2091 }
2092 }
2093
2094 nmask = policy_nodemask(gfp, pol); 2114 nmask = policy_nodemask(gfp, pol);
2095 preferred_nid = policy_node(gfp, pol, node); 2115 preferred_nid = policy_node(gfp, pol, node);
2096 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2116 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
diff --git a/mm/memremap.c b/mm/memremap.c
index 86432650f829..ed70c4e8e52a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -403,6 +403,30 @@ void __put_devmap_managed_page(struct page *page)
403 403
404 mem_cgroup_uncharge(page); 404 mem_cgroup_uncharge(page);
405 405
406 /*
407 * When a device_private page is freed, the page->mapping field
408 * may still contain a (stale) mapping value. For example, the
409 * lower bits of page->mapping may still identify the page as
410 * an anonymous page. Ultimately, this entire field is just
411 * stale and wrong, and it will cause errors if not cleared.
412 * One example is:
413 *
414 * migrate_vma_pages()
415 * migrate_vma_insert_page()
416 * page_add_new_anon_rmap()
417 * __page_set_anon_rmap()
418 * ...checks page->mapping, via PageAnon(page) call,
419 * and incorrectly concludes that the page is an
420 * anonymous page. Therefore, it incorrectly,
421 * silently fails to set up the new anon rmap.
422 *
423 * For other types of ZONE_DEVICE pages, migration is either
424 * handled differently or not done at all, so there is no need
425 * to clear page->mapping.
426 */
427 if (is_device_private_page(page))
428 page->mapping = NULL;
429
406 page->pgmap->ops->page_free(page); 430 page->pgmap->ops->page_free(page);
407 } else if (!count) 431 } else if (!count)
408 __put_page(page); 432 __put_page(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2ae6b0d..003377e24232 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1475 /* 1475 /*
1476 * No need to invalidate here it will synchronize on 1476 * No need to invalidate here it will synchronize on
1477 * against the special swap migration pte. 1477 * against the special swap migration pte.
1478 *
1479 * The assignment to subpage above was computed from a
1480 * swap PTE which results in an invalid pointer.
1481 * Since only PAGE_SIZE pages can currently be
1482 * migrated, just set it to page. This will need to be
1483 * changed when hugepage migrations to device private
1484 * memory are supported.
1478 */ 1485 */
1486 subpage = page;
1479 goto discard; 1487 goto discard;
1480 } 1488 }
1481 1489
diff --git a/mm/shmem.c b/mm/shmem.c
index 626d8c74b973..2bed4761f279 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1466,7 +1466,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
1466 1466
1467 shmem_pseudo_vma_init(&pvma, info, hindex); 1467 shmem_pseudo_vma_init(&pvma, info, hindex);
1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
1470 shmem_pseudo_vma_destroy(&pvma); 1470 shmem_pseudo_vma_destroy(&pvma);
1471 if (page) 1471 if (page)
1472 prep_transhuge_page(page); 1472 prep_transhuge_page(page);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 2a09796edef8..98e924864554 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
147 bool to_user) 147 bool to_user)
148{ 148{
149 /* Reject if object wraps past end of memory. */ 149 /* Reject if object wraps past end of memory. */
150 if (ptr + n < ptr) 150 if (ptr + (n - 1) < ptr)
151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); 151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
152 152
153 /* Reject if NULL or ZERO-allocation. */ 153 /* Reject if NULL or ZERO-allocation. */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e0fc963acc41..7ba11e12a11f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3279,9 +3279,19 @@ retry:
3279 goto overflow; 3279 goto overflow;
3280 3280
3281 /* 3281 /*
3282 * If required width exeeds current VA block, move
3283 * base downwards and then recheck.
3284 */
3285 if (base + end > va->va_end) {
3286 base = pvm_determine_end_from_reverse(&va, align) - end;
3287 term_area = area;
3288 continue;
3289 }
3290
3291 /*
3282 * If this VA does not fit, move base downwards and recheck. 3292 * If this VA does not fit, move base downwards and recheck.
3283 */ 3293 */
3284 if (base + start < va->va_start || base + end > va->va_end) { 3294 if (base + start < va->va_start) {
3285 va = node_to_va(rb_prev(&va->rb_node)); 3295 va = node_to_va(rb_prev(&va->rb_node));
3286 base = pvm_determine_end_from_reverse(&va, align) - end; 3296 base = pvm_determine_end_from_reverse(&va, align) - end;
3287 term_area = area; 3297 term_area = area;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dbdc46a84f63..c77d1e3761a7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -88,9 +88,6 @@ struct scan_control {
88 /* Can pages be swapped as part of reclaim? */ 88 /* Can pages be swapped as part of reclaim? */
89 unsigned int may_swap:1; 89 unsigned int may_swap:1;
90 90
91 /* e.g. boosted watermark reclaim leaves slabs alone */
92 unsigned int may_shrinkslab:1;
93
94 /* 91 /*
95 * Cgroups are not reclaimed below their configured memory.low, 92 * Cgroups are not reclaimed below their configured memory.low,
96 * unless we threaten to OOM. If any cgroups are skipped due to 93 * unless we threaten to OOM. If any cgroups are skipped due to
@@ -2714,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2714 shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2711 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2715 node_lru_pages += lru_pages; 2712 node_lru_pages += lru_pages;
2716 2713
2717 if (sc->may_shrinkslab) { 2714 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2718 shrink_slab(sc->gfp_mask, pgdat->node_id, 2715 sc->priority);
2719 memcg, sc->priority);
2720 }
2721 2716
2722 /* Record the group's reclaim efficiency */ 2717 /* Record the group's reclaim efficiency */
2723 vmpressure(sc->gfp_mask, memcg, false, 2718 vmpressure(sc->gfp_mask, memcg, false,
@@ -3194,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3194 .may_writepage = !laptop_mode, 3189 .may_writepage = !laptop_mode,
3195 .may_unmap = 1, 3190 .may_unmap = 1,
3196 .may_swap = 1, 3191 .may_swap = 1,
3197 .may_shrinkslab = 1,
3198 }; 3192 };
3199 3193
3200 /* 3194 /*
@@ -3238,7 +3232,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3238 .may_unmap = 1, 3232 .may_unmap = 1,
3239 .reclaim_idx = MAX_NR_ZONES - 1, 3233 .reclaim_idx = MAX_NR_ZONES - 1,
3240 .may_swap = !noswap, 3234 .may_swap = !noswap,
3241 .may_shrinkslab = 1,
3242 }; 3235 };
3243 unsigned long lru_pages; 3236 unsigned long lru_pages;
3244 3237
@@ -3286,7 +3279,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3286 .may_writepage = !laptop_mode, 3279 .may_writepage = !laptop_mode,
3287 .may_unmap = 1, 3280 .may_unmap = 1,
3288 .may_swap = may_swap, 3281 .may_swap = may_swap,
3289 .may_shrinkslab = 1,
3290 }; 3282 };
3291 3283
3292 set_task_reclaim_state(current, &sc.reclaim_state); 3284 set_task_reclaim_state(current, &sc.reclaim_state);
@@ -3598,7 +3590,6 @@ restart:
3598 */ 3590 */
3599 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 3591 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3600 sc.may_swap = !nr_boost_reclaim; 3592 sc.may_swap = !nr_boost_reclaim;
3601 sc.may_shrinkslab = !nr_boost_reclaim;
3602 3593
3603 /* 3594 /*
3604 * Do some background aging of the anon list, to give 3595 * Do some background aging of the anon list, to give
diff --git a/mm/workingset.c b/mm/workingset.c
index e0b4edcb88c8..c963831d354f 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -380,14 +380,12 @@ void workingset_update_node(struct xa_node *node)
380 if (node->count && node->count == node->nr_values) { 380 if (node->count && node->count == node->nr_values) {
381 if (list_empty(&node->private_list)) { 381 if (list_empty(&node->private_list)) {
382 list_lru_add(&shadow_nodes, &node->private_list); 382 list_lru_add(&shadow_nodes, &node->private_list);
383 __inc_lruvec_page_state(virt_to_page(node), 383 __inc_lruvec_slab_state(node, WORKINGSET_NODES);
384 WORKINGSET_NODES);
385 } 384 }
386 } else { 385 } else {
387 if (!list_empty(&node->private_list)) { 386 if (!list_empty(&node->private_list)) {
388 list_lru_del(&shadow_nodes, &node->private_list); 387 list_lru_del(&shadow_nodes, &node->private_list);
389 __dec_lruvec_page_state(virt_to_page(node), 388 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
390 WORKINGSET_NODES);
391 } 389 }
392 } 390 }
393} 391}
@@ -480,7 +478,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
480 } 478 }
481 479
482 list_lru_isolate(lru, item); 480 list_lru_isolate(lru, item);
483 __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES); 481 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
484 482
485 spin_unlock(lru_lock); 483 spin_unlock(lru_lock);
486 484
@@ -503,7 +501,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
503 * shadow entries we were tracking ... 501 * shadow entries we were tracking ...
504 */ 502 */
505 xas_store(&xas, NULL); 503 xas_store(&xas, NULL);
506 __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); 504 __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
507 505
508out_invalid: 506out_invalid:
509 xa_unlock_irq(&mapping->i_pages); 507 xa_unlock_irq(&mapping->i_pages);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 1a029a7432ee..ed19d98c9dcd 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -817,9 +817,19 @@ out:
817static void z3fold_destroy_pool(struct z3fold_pool *pool) 817static void z3fold_destroy_pool(struct z3fold_pool *pool)
818{ 818{
819 kmem_cache_destroy(pool->c_handle); 819 kmem_cache_destroy(pool->c_handle);
820 z3fold_unregister_migration(pool); 820
821 destroy_workqueue(pool->release_wq); 821 /*
822 * We need to destroy pool->compact_wq before pool->release_wq,
823 * as any pending work on pool->compact_wq will call
824 * queue_work(pool->release_wq, &pool->work).
825 *
826 * There are still outstanding pages until both workqueues are drained,
827 * so we cannot unregister migration until then.
828 */
829
822 destroy_workqueue(pool->compact_wq); 830 destroy_workqueue(pool->compact_wq);
831 destroy_workqueue(pool->release_wq);
832 z3fold_unregister_migration(pool);
823 kfree(pool); 833 kfree(pool);
824} 834}
825 835
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index 85571e90191f..bfeab44f81d0 100644
--- a/samples/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
245 245
246 if (argc != 2) { 246 if (argc != 2) {
247 printf( 247 printf(
248 "Sintax: %s fbdev\n" 248 "Syntax: %s fbdev\n"
249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]); 249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
250 return -1; 250 return -1;
251 } 251 }
diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci b/scripts/coccinelle/api/atomic_as_refcounter.cocci
index 988120e0fd67..0f78d94abc35 100644
--- a/scripts/coccinelle/api/atomic_as_refcounter.cocci
+++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1// Check if refcount_t type and API should be used 2// Check if refcount_t type and API should be used
2// instead of atomic_t type when dealing with refcounters 3// instead of atomic_t type when dealing with refcounters
3// 4//
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 9a94672e7adc..ade699131065 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1228,24 +1228,11 @@ hashalg_fail:
1228 1228
1229static int __init init_digests(void) 1229static int __init init_digests(void)
1230{ 1230{
1231 u8 digest[TPM_MAX_DIGEST_SIZE];
1232 int ret;
1233 int i;
1234
1235 ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
1236 if (ret < 0)
1237 return ret;
1238 if (ret < TPM_MAX_DIGEST_SIZE)
1239 return -EFAULT;
1240
1241 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), 1231 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
1242 GFP_KERNEL); 1232 GFP_KERNEL);
1243 if (!digests) 1233 if (!digests)
1244 return -ENOMEM; 1234 return -ENOMEM;
1245 1235
1246 for (i = 0; i < chip->nr_allocated_banks; i++)
1247 memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
1248
1249 return 0; 1236 return 0;
1250} 1237}
1251 1238
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 485edaba0037..5bf24fb819d2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
6051} 6051}
6052EXPORT_SYMBOL_GPL(snd_hda_gen_free); 6052EXPORT_SYMBOL_GPL(snd_hda_gen_free);
6053 6053
6054/**
6055 * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
6056 * @codec: the HDA codec
6057 *
6058 * This can be put as patch_ops reboot_notify function.
6059 */
6060void snd_hda_gen_reboot_notify(struct hda_codec *codec)
6061{
6062 /* Make the codec enter D3 to avoid spurious noises from the internal
6063 * speaker during (and after) reboot
6064 */
6065 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
6066 snd_hda_codec_write(codec, codec->core.afg, 0,
6067 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
6068 msleep(10);
6069}
6070EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
6071
6054#ifdef CONFIG_PM 6072#ifdef CONFIG_PM
6055/** 6073/**
6056 * snd_hda_gen_check_power_status - check the loopback power save state 6074 * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
6078 .init = snd_hda_gen_init, 6096 .init = snd_hda_gen_init,
6079 .free = snd_hda_gen_free, 6097 .free = snd_hda_gen_free,
6080 .unsol_event = snd_hda_jack_unsol_event, 6098 .unsol_event = snd_hda_jack_unsol_event,
6099 .reboot_notify = snd_hda_gen_reboot_notify,
6081#ifdef CONFIG_PM 6100#ifdef CONFIG_PM
6082 .check_power_status = snd_hda_gen_check_power_status, 6101 .check_power_status = snd_hda_gen_check_power_status,
6083#endif 6102#endif
@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
6100 6119
6101 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); 6120 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
6102 if (err < 0) 6121 if (err < 0)
6103 return err; 6122 goto error;
6104 6123
6105 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); 6124 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
6106 if (err < 0) 6125 if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 35a670a71c42..5f199dcb0d18 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
332 struct auto_pin_cfg *cfg); 332 struct auto_pin_cfg *cfg);
333int snd_hda_gen_build_controls(struct hda_codec *codec); 333int snd_hda_gen_build_controls(struct hda_codec *codec);
334int snd_hda_gen_build_pcms(struct hda_codec *codec); 334int snd_hda_gen_build_pcms(struct hda_codec *codec);
335void snd_hda_gen_reboot_notify(struct hda_codec *codec);
335 336
336/* standard jack event callbacks */ 337/* standard jack event callbacks */
337void snd_hda_gen_hp_automute(struct hda_codec *codec, 338void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a6d8c0d77b84..99fc0917339b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2508,6 +2508,9 @@ static const struct pci_device_id azx_ids[] = {
2508 /* AMD, X370 & co */ 2508 /* AMD, X370 & co */
2509 { PCI_DEVICE(0x1022, 0x1457), 2509 { PCI_DEVICE(0x1022, 0x1457),
2510 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, 2510 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2511 /* AMD, X570 & co */
2512 { PCI_DEVICE(0x1022, 0x1487),
2513 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2511 /* AMD Stoney */ 2514 /* AMD Stoney */
2512 { PCI_DEVICE(0x1022, 0x157a), 2515 { PCI_DEVICE(0x1022, 0x157a),
2513 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | 2516 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f299f137eaea..14298ef45b21 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
163{ 163{
164 struct conexant_spec *spec = codec->spec; 164 struct conexant_spec *spec = codec->spec;
165 165
166 switch (codec->core.vendor_id) {
167 case 0x14f12008: /* CX8200 */
168 case 0x14f150f2: /* CX20722 */
169 case 0x14f150f4: /* CX20724 */
170 break;
171 default:
172 return;
173 }
174
175 /* Turn the problematic codec into D3 to avoid spurious noises 166 /* Turn the problematic codec into D3 to avoid spurious noises
176 from the internal speaker during (and after) reboot */ 167 from the internal speaker during (and after) reboot */
177 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); 168 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
178 169 snd_hda_gen_reboot_notify(codec);
179 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
180 snd_hda_codec_write(codec, codec->core.afg, 0,
181 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
182 msleep(10);
183} 170}
184 171
185static void cx_auto_free(struct hda_codec *codec) 172static void cx_auto_free(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index de224cbea7a0..e333b3e30e31 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
869 alc_shutup(codec); 869 alc_shutup(codec);
870} 870}
871 871
872/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
873static void alc_d3_at_reboot(struct hda_codec *codec)
874{
875 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
876 snd_hda_codec_write(codec, codec->core.afg, 0,
877 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
878 msleep(10);
879}
880
881#define alc_free snd_hda_gen_free 872#define alc_free snd_hda_gen_free
882 873
883#ifdef CONFIG_PM 874#ifdef CONFIG_PM
@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
5152 struct alc_spec *spec = codec->spec; 5143 struct alc_spec *spec = codec->spec;
5153 5144
5154 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 5145 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5155 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ 5146 spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
5156 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 5147 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
5157 codec->power_save_node = 0; /* avoid click noises */ 5148 codec->power_save_node = 0; /* avoid click noises */
5158 snd_hda_apply_pincfgs(codec, pincfgs); 5149 snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6987 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6978 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6988 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6979 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6989 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6980 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6981 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6990 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 6982 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6991 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 6983 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
6992 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 6984 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 7498b5191b68..b5927c3d5bc0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -68,6 +68,7 @@ struct mixer_build {
68 unsigned char *buffer; 68 unsigned char *buffer;
69 unsigned int buflen; 69 unsigned int buflen;
70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); 70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
71 DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
71 struct usb_audio_term oterm; 72 struct usb_audio_term oterm;
72 const struct usbmix_name_map *map; 73 const struct usbmix_name_map *map;
73 const struct usbmix_selector_map *selector_map; 74 const struct usbmix_selector_map *selector_map;
@@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
744 return -EINVAL; 745 return -EINVAL;
745 if (!desc->bNrInPins) 746 if (!desc->bNrInPins)
746 return -EINVAL; 747 return -EINVAL;
748 if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
749 return -EINVAL;
747 750
748 switch (state->mixer->protocol) { 751 switch (state->mixer->protocol) {
749 case UAC_VERSION_1: 752 case UAC_VERSION_1:
@@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
773 * parse the source unit recursively until it reaches to a terminal 776 * parse the source unit recursively until it reaches to a terminal
774 * or a branched unit. 777 * or a branched unit.
775 */ 778 */
776static int check_input_term(struct mixer_build *state, int id, 779static int __check_input_term(struct mixer_build *state, int id,
777 struct usb_audio_term *term) 780 struct usb_audio_term *term)
778{ 781{
779 int protocol = state->mixer->protocol; 782 int protocol = state->mixer->protocol;
780 int err; 783 int err;
781 void *p1; 784 void *p1;
785 unsigned char *hdr;
782 786
783 memset(term, 0, sizeof(*term)); 787 memset(term, 0, sizeof(*term));
784 while ((p1 = find_audio_control_unit(state, id)) != NULL) { 788 for (;;) {
785 unsigned char *hdr = p1; 789 /* a loop in the terminal chain? */
790 if (test_and_set_bit(id, state->termbitmap))
791 return -EINVAL;
792
793 p1 = find_audio_control_unit(state, id);
794 if (!p1)
795 break;
796
797 hdr = p1;
786 term->id = id; 798 term->id = id;
787 799
788 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) { 800 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id,
800 812
801 /* call recursively to verify that the 813 /* call recursively to verify that the
802 * referenced clock entity is valid */ 814 * referenced clock entity is valid */
803 err = check_input_term(state, d->bCSourceID, term); 815 err = __check_input_term(state, d->bCSourceID, term);
804 if (err < 0) 816 if (err < 0)
805 return err; 817 return err;
806 818
@@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id,
834 case UAC2_CLOCK_SELECTOR: { 846 case UAC2_CLOCK_SELECTOR: {
835 struct uac_selector_unit_descriptor *d = p1; 847 struct uac_selector_unit_descriptor *d = p1;
836 /* call recursively to retrieve the channel info */ 848 /* call recursively to retrieve the channel info */
837 err = check_input_term(state, d->baSourceID[0], term); 849 err = __check_input_term(state, d->baSourceID[0], term);
838 if (err < 0) 850 if (err < 0)
839 return err; 851 return err;
840 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ 852 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id,
897 909
898 /* call recursively to verify that the 910 /* call recursively to verify that the
899 * referenced clock entity is valid */ 911 * referenced clock entity is valid */
900 err = check_input_term(state, d->bCSourceID, term); 912 err = __check_input_term(state, d->bCSourceID, term);
901 if (err < 0) 913 if (err < 0)
902 return err; 914 return err;
903 915
@@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id,
948 case UAC3_CLOCK_SELECTOR: { 960 case UAC3_CLOCK_SELECTOR: {
949 struct uac_selector_unit_descriptor *d = p1; 961 struct uac_selector_unit_descriptor *d = p1;
950 /* call recursively to retrieve the channel info */ 962 /* call recursively to retrieve the channel info */
951 err = check_input_term(state, d->baSourceID[0], term); 963 err = __check_input_term(state, d->baSourceID[0], term);
952 if (err < 0) 964 if (err < 0)
953 return err; 965 return err;
954 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ 966 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id,
964 return -EINVAL; 976 return -EINVAL;
965 977
966 /* call recursively to retrieve the channel info */ 978 /* call recursively to retrieve the channel info */
967 err = check_input_term(state, d->baSourceID[0], term); 979 err = __check_input_term(state, d->baSourceID[0], term);
968 if (err < 0) 980 if (err < 0)
969 return err; 981 return err;
970 982
@@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id,
982 return -ENODEV; 994 return -ENODEV;
983} 995}
984 996
997
998static int check_input_term(struct mixer_build *state, int id,
999 struct usb_audio_term *term)
1000{
1001 memset(term, 0, sizeof(*term));
1002 memset(state->termbitmap, 0, sizeof(state->termbitmap));
1003 return __check_input_term(state, id, term);
1004}
1005
985/* 1006/*
986 * Feature Unit 1007 * Feature Unit
987 */ 1008 */
diff --git a/tools/hv/hv_get_dhcp_info.sh b/tools/hv/hv_get_dhcp_info.sh
index c38686c44656..2f2a3c7df3de 100755
--- a/tools/hv/hv_get_dhcp_info.sh
+++ b/tools/hv/hv_get_dhcp_info.sh
@@ -13,7 +13,7 @@
13# the script prints the string "Disabled" to stdout. 13# the script prints the string "Disabled" to stdout.
14# 14#
15# Each Distro is expected to implement this script in a distro specific 15# Each Distro is expected to implement this script in a distro specific
16# fashion. For instance on Distros that ship with Network Manager enabled, 16# fashion. For instance, on Distros that ship with Network Manager enabled,
17# this script can be based on the Network Manager APIs for retrieving DHCP 17# this script can be based on the Network Manager APIs for retrieving DHCP
18# information. 18# information.
19 19
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d7e06fe0270e..f5597503c771 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -700,7 +700,7 @@ static void kvp_get_ipconfig_info(char *if_name,
700 700
701 701
702 /* 702 /*
703 * Gather the DNS state. 703 * Gather the DNS state.
704 * Since there is no standard way to get this information 704 * Since there is no standard way to get this information
705 * across various distributions of interest; we just invoke 705 * across various distributions of interest; we just invoke
706 * an external script that needs to be ported across distros 706 * an external script that needs to be ported across distros
@@ -1051,7 +1051,7 @@ static int parse_ip_val_buffer(char *in_buf, int *offset,
1051 char *start; 1051 char *start;
1052 1052
1053 /* 1053 /*
1054 * in_buf has sequence of characters that are seperated by 1054 * in_buf has sequence of characters that are separated by
1055 * the character ';'. The last sequence does not have the 1055 * the character ';'. The last sequence does not have the
1056 * terminating ";" character. 1056 * terminating ";" character.
1057 */ 1057 */
@@ -1386,6 +1386,8 @@ int main(int argc, char *argv[])
1386 daemonize = 0; 1386 daemonize = 0;
1387 break; 1387 break;
1388 case 'h': 1388 case 'h':
1389 print_usage(argv);
1390 exit(0);
1389 default: 1391 default:
1390 print_usage(argv); 1392 print_usage(argv);
1391 exit(EXIT_FAILURE); 1393 exit(EXIT_FAILURE);
@@ -1490,7 +1492,7 @@ int main(int argc, char *argv[])
1490 case KVP_OP_GET_IP_INFO: 1492 case KVP_OP_GET_IP_INFO:
1491 kvp_ip_val = &hv_msg->body.kvp_ip_val; 1493 kvp_ip_val = &hv_msg->body.kvp_ip_val;
1492 1494
1493 error = kvp_mac_to_ip(kvp_ip_val); 1495 error = kvp_mac_to_ip(kvp_ip_val);
1494 1496
1495 if (error) 1497 if (error)
1496 hv_msg->error = error; 1498 hv_msg->error = error;
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
index 7ed9f85ef908..d10fe35b7f25 100755
--- a/tools/hv/hv_set_ifconfig.sh
+++ b/tools/hv/hv_set_ifconfig.sh
@@ -12,7 +12,7 @@
12# be used to configure the interface. 12# be used to configure the interface.
13# 13#
14# Each Distro is expected to implement this script in a distro specific 14# Each Distro is expected to implement this script in a distro specific
15# fashion. For instance on Distros that ship with Network Manager enabled, 15# fashion. For instance, on Distros that ship with Network Manager enabled,
16# this script can be based on the Network Manager APIs for configuring the 16# this script can be based on the Network Manager APIs for configuring the
17# interface. 17# interface.
18# 18#
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index efe1e34dd91b..92902a88f671 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -42,7 +42,7 @@ static int vss_do_freeze(char *dir, unsigned int cmd)
42 * If a partition is mounted more than once, only the first 42 * If a partition is mounted more than once, only the first
43 * FREEZE/THAW can succeed and the later ones will get 43 * FREEZE/THAW can succeed and the later ones will get
44 * EBUSY/EINVAL respectively: there could be 2 cases: 44 * EBUSY/EINVAL respectively: there could be 2 cases:
45 * 1) a user may mount the same partition to differnt directories 45 * 1) a user may mount the same partition to different directories
46 * by mistake or on purpose; 46 * by mistake or on purpose;
47 * 2) The subvolume of btrfs appears to have the same partition 47 * 2) The subvolume of btrfs appears to have the same partition
48 * mounted more than once. 48 * mounted more than once.
@@ -218,6 +218,8 @@ int main(int argc, char *argv[])
218 daemonize = 0; 218 daemonize = 0;
219 break; 219 break;
220 case 'h': 220 case 'h':
221 print_usage(argv);
222 exit(0);
221 default: 223 default:
222 print_usage(argv); 224 print_usage(argv);
223 exit(EXIT_FAILURE); 225 exit(EXIT_FAILURE);
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
index 55e7374bade0..099f2c44dbed 100644
--- a/tools/hv/lsvmbus
+++ b/tools/hv/lsvmbus
@@ -4,10 +4,10 @@
4import os 4import os
5from optparse import OptionParser 5from optparse import OptionParser
6 6
7help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages"
7parser = OptionParser() 8parser = OptionParser()
8parser.add_option("-v", "--verbose", dest="verbose", 9parser.add_option(
9 help="print verbose messages. Try -vv, -vvv for \ 10 "-v", "--verbose", dest="verbose", help=help_msg, action="count")
10 more verbose messages", action="count")
11 11
12(options, args) = parser.parse_args() 12(options, args) = parser.parse_args()
13 13
@@ -21,27 +21,28 @@ if not os.path.isdir(vmbus_sys_path):
21 exit(-1) 21 exit(-1)
22 22
23vmbus_dev_dict = { 23vmbus_dev_dict = {
24 '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]', 24 '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
25 '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]', 25 '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
26 '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]', 26 '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
27 '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]', 27 '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
28 '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]', 28 '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
29 '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]', 29 '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
30 '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]', 30 '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
31 '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse', 31 '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
32 '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard', 32 '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
33 '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter', 33 '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
34 '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter', 34 '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
35 '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller', 35 '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
36 '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', 36 '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
37 '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', 37 '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
38 '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', 38 '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
39 '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through', 39 '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
40 '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', 40 '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
41 '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', 41 '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
42 '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', 42 '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
43} 43}
44 44
45
45def get_vmbus_dev_attr(dev_name, attr): 46def get_vmbus_dev_attr(dev_name, attr):
46 try: 47 try:
47 f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r') 48 f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
@@ -52,6 +53,7 @@ def get_vmbus_dev_attr(dev_name, attr):
52 53
53 return lines 54 return lines
54 55
56
55class VMBus_Dev: 57class VMBus_Dev:
56 pass 58 pass
57 59
@@ -66,12 +68,13 @@ for f in os.listdir(vmbus_sys_path):
66 68
67 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping') 69 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
68 chn_vp_mapping = [c.strip() for c in chn_vp_mapping] 70 chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
69 chn_vp_mapping = sorted(chn_vp_mapping, 71 chn_vp_mapping = sorted(
70 key = lambda c : int(c.split(':')[0])) 72 chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
71 73
72 chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' % 74 chn_vp_mapping = [
73 (c.split(':')[0], c.split(':')[1]) 75 '\tRel_ID=%s, target_cpu=%s' %
74 for c in chn_vp_mapping] 76 (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
77 ]
75 d = VMBus_Dev() 78 d = VMBus_Dev()
76 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f) 79 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
77 d.vmbus_id = vmbus_id 80 d.vmbus_id = vmbus_id
@@ -85,7 +88,7 @@ for f in os.listdir(vmbus_sys_path):
85 vmbus_dev_list.append(d) 88 vmbus_dev_list.append(d)
86 89
87 90
88vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id)) 91vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
89 92
90format0 = '%2s: %s' 93format0 = '%2s: %s'
91format1 = '%2s: Class_ID = %s - %s\n%s' 94format1 = '%2s: Class_ID = %s - %s\n%s'
@@ -95,9 +98,15 @@ for d in vmbus_dev_list:
95 if verbose == 0: 98 if verbose == 0:
96 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc)) 99 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
97 elif verbose == 1: 100 elif verbose == 1:
98 print (('VMBUS ID ' + format1) % \ 101 print(
99 (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)) 102 ('VMBUS ID ' + format1) %
103 (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
104 )
100 else: 105 else:
101 print (('VMBUS ID ' + format2) % \ 106 print(
102 (d.vmbus_id, d.class_id, d.dev_desc, \ 107 ('VMBUS ID ' + format2) %
103 d.device_id, d.sysfs_path, d.chn_vp_mapping)) 108 (
109 d.vmbus_id, d.class_id, d.dev_desc,
110 d.device_id, d.sysfs_path, d.chn_vp_mapping
111 )
112 )