aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-07-08 12:04:41 -0400
committerIngo Molnar <mingo@kernel.org>2019-07-08 12:04:41 -0400
commit552a031ba12a4236be107a5b082a399237758a5d (patch)
treea2bcdeb651b360013dbb654bbcd61dbaf51e0331
parentf584dd32edc5d4400d7ceb92111a89f0c1f6651f (diff)
parent0ecfebd2b52404ae0c54a878c872bb93363ada36 (diff)
Merge tag 'v5.2' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml26
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/plat-hsdk/platform.c161
-rw-r--r--arch/arm/boot/dts/armada-xp-98dx3236.dtsi8
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts2
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dns-313.dts2
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi8
-rw-r--r--arch/arm/boot/dts/meson8.dtsi5
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi11
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c5
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c3
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi18
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/kernel/image.h6
-rw-r--r--arch/arm64/kernel/module.c8
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/csky/kernel/signal.c5
-rw-r--r--arch/mips/Makefile3
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c2
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart.h4
-rw-r--r--arch/mips/include/asm/mips-gic.h30
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/mips/mm/tlbex.c29
-rw-r--r--arch/parisc/kernel/module.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c55
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi6
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts13
-rw-r--r--arch/riscv/configs/defconfig5
-rw-r--r--arch/riscv/mm/fault.c3
-rw-r--r--arch/x86/events/core.c16
-rw-r--r--arch/x86/events/intel/ds.c9
-rw-r--r--arch/x86/events/perf_event.h21
-rw-r--r--arch/x86/include/uapi/asm/perf_regs.h3
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/cpu/bugs.c11
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c15
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c35
-rw-r--r--arch/x86/kernel/ftrace.c10
-rw-r--r--arch/x86/kernel/head64.c20
-rw-r--r--arch/x86/kernel/perf_regs.c7
-rw-r--r--arch/x86/kernel/unwind_orc.c26
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/vmx/nested.c30
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/mm/init_64.c24
-rw-r--r--arch/x86/platform/efi/quirks.c2
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/blk-mq-debugfs.c7
-rw-r--r--crypto/cryptd.c1
-rw-r--r--crypto/crypto_user_base.c3
-rw-r--r--drivers/auxdisplay/cfag12864bfb.c5
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/meson/g12a.c4
-rw-r--r--drivers/clk/meson/g12a.h2
-rw-r--r--drivers/clk/meson/meson8b.c10
-rw-r--r--drivers/clk/socfpga/clk-s10.c4
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/clk/ti/clkctrl.c7
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/imx-sdma.c52
-rw-r--r--drivers/dma/qcom/bam_dma.c3
-rw-r--r--drivers/firmware/efi/efi-bgrt.c5
-rw-r--r--drivers/firmware/efi/efi.c12
-rw-r--r--drivers/firmware/efi/efibc.c12
-rw-r--r--drivers/gpio/gpiolib-of.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-uclogic-core.c2
-rw-r--r--drivers/hid/hid-uclogic-params.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c15
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c15
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c35
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/md/dm-init.c10
-rw-r--r--drivers/md/dm-log-writes.c23
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/mfd/stmfx.c12
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile4
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c9
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c (renamed from drivers/mtd/nand/raw/ingenic/ingenic_nand.c)0
-rw-r--r--drivers/mtd/nand/raw/nand_base.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c40
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c2
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c119
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c28
-rw-r--r--drivers/net/ethernet/sis/sis900.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c22
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/pci/pci-driver.c8
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c34
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c18
-rw-r--r--drivers/scsi/vmw_pvscsi.c6
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/ti/Kconfig4
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c16
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/afs/callback.c4
-rw-r--r--fs/afs/inode.c31
-rw-r--r--fs/afs/internal.h8
-rw-r--r--fs/afs/volume.c1
-rw-r--r--fs/aio.c28
-rw-r--r--fs/binfmt_flat.c23
-rw-r--r--fs/ceph/mds_client.c3
-rw-r--r--fs/cifs/smb2ops.c64
-rw-r--r--fs/cifs/smb2pdu.h14
-rw-r--r--fs/dax.c9
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/inode.c2
-rw-r--r--fs/io_uring.c12
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/select.c18
-rw-r--r--fs/userfaultfd.c42
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h2
-rw-r--r--include/dt-bindings/clock/sifive-fu540-prci.h2
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/intel-ish-client-if.h1
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/mtd/spi-nor.h3
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/perf_regs.h8
-rw-r--r--include/linux/pfn_t.h2
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/suspend.h26
-rw-r--r--include/linux/xarray.h1
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/route.h1
-rw-r--r--include/net/tls.h15
-rw-r--r--init/initramfs.c4
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/fork.c65
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/ptrace.c4
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/trace.c24
-rw-r--r--lib/devres.c3
-rw-r--r--lib/idr.c14
-rw-r--r--lib/mpi/mpi-pow.c6
-rw-r--r--lib/test_xarray.c38
-rw-r--r--lib/xarray.c12
-rw-r--r--mm/filemap.c146
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/hugetlb.c29
-rw-r--r--mm/khugepaged.c4
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory-failure.c7
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/oom_kill.c12
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/page_idle.c4
-rw-r--r--mm/page_io.c20
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c27
-rw-r--r--net/bluetooth/6lowpan.c4
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/ipv4/ip_output.c12
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c33
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/netfilter/nf_flow_table_ip.c2
-rw-r--r--net/packet/af_packet.c23
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/sched/sch_cbs.c9
-rw-r--r--net/sctp/endpointola.c8
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c7
-rw-r--r--net/sunrpc/xprtsock.c16
-rw-r--r--net/tipc/core.c12
-rw-r--r--net/tipc/netlink_compat.c18
-rw-r--r--net/tls/tls_main.c3
-rw-r--r--samples/pidfd/pidfd-metadata.c8
-rw-r--r--sound/core/seq/oss/seq_oss_ioctl.c2
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c2
-rw-r--r--sound/firewire/amdtp-am824.c2
-rw-r--r--sound/hda/hdac_device.c18
-rw-r--r--sound/pci/hda/patch_realtek.c8
-rw-r--r--sound/usb/line6/pcm.c5
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--tools/arch/x86/include/uapi/asm/perf_regs.h3
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h1
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c4
-rw-r--r--tools/testing/radix-tree/idr-test.c46
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c1
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c87
234 files changed, 1832 insertions, 829 deletions
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index 27f02ec4bb45..f97a4ecd7b91 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -152,17 +152,19 @@ examples:
152 - | 152 - |
153 // Example 2: Spike ISA Simulator with 1 Hart 153 // Example 2: Spike ISA Simulator with 1 Hart
154 cpus { 154 cpus {
155 cpu@0 { 155 #address-cells = <1>;
156 device_type = "cpu"; 156 #size-cells = <0>;
157 reg = <0>; 157 cpu@0 {
158 compatible = "riscv"; 158 device_type = "cpu";
159 riscv,isa = "rv64imafdc"; 159 reg = <0>;
160 mmu-type = "riscv,sv48"; 160 compatible = "riscv";
161 interrupt-controller { 161 riscv,isa = "rv64imafdc";
162 #interrupt-cells = <1>; 162 mmu-type = "riscv,sv48";
163 interrupt-controller; 163 interrupt-controller {
164 compatible = "riscv,cpu-intc"; 164 #interrupt-cells = <1>;
165 }; 165 interrupt-controller;
166 }; 166 compatible = "riscv,cpu-intc";
167 };
168 };
167 }; 169 };
168... 170...
diff --git a/MAINTAINERS b/MAINTAINERS
index 4be48eb5473d..c80bd74681cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3122,6 +3122,7 @@ F: arch/arm/mach-bcm/
3122BROADCOM BCM2835 ARM ARCHITECTURE 3122BROADCOM BCM2835 ARM ARCHITECTURE
3123M: Eric Anholt <eric@anholt.net> 3123M: Eric Anholt <eric@anholt.net>
3124M: Stefan Wahren <wahrenst@gmx.net> 3124M: Stefan Wahren <wahrenst@gmx.net>
3125L: bcm-kernel-feedback-list@broadcom.com
3125L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) 3126L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
3126L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 3127L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
3127T: git git://github.com/anholt/linux 3128T: git git://github.com/anholt/linux
@@ -3151,6 +3152,7 @@ F: arch/arm/boot/dts/bcm953012*
3151 3152
3152BROADCOM BCM53573 ARM ARCHITECTURE 3153BROADCOM BCM53573 ARM ARCHITECTURE
3153M: Rafał Miłecki <rafal@milecki.pl> 3154M: Rafał Miłecki <rafal@milecki.pl>
3155L: bcm-kernel-feedback-list@broadcom.com
3154L: linux-arm-kernel@lists.infradead.org 3156L: linux-arm-kernel@lists.infradead.org
3155S: Maintained 3157S: Maintained
3156F: arch/arm/boot/dts/bcm53573* 3158F: arch/arm/boot/dts/bcm53573*
@@ -3940,6 +3942,14 @@ M: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
3940S: Maintained 3942S: Maintained
3941F: .clang-format 3943F: .clang-format
3942 3944
3945CLANG/LLVM BUILD SUPPORT
3946L: clang-built-linux@googlegroups.com
3947W: https://clangbuiltlinux.github.io/
3948B: https://github.com/ClangBuiltLinux/linux/issues
3949C: irc://chat.freenode.net/clangbuiltlinux
3950S: Supported
3951K: \b(?i:clang|llvm)\b
3952
3943CLEANCACHE API 3953CLEANCACHE API
3944M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 3954M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
3945L: linux-kernel@vger.kernel.org 3955L: linux-kernel@vger.kernel.org
@@ -7800,7 +7810,7 @@ INGENIC JZ4780 NAND DRIVER
7800M: Harvey Hunt <harveyhuntnexus@gmail.com> 7810M: Harvey Hunt <harveyhuntnexus@gmail.com>
7801L: linux-mtd@lists.infradead.org 7811L: linux-mtd@lists.infradead.org
7802S: Maintained 7812S: Maintained
7803F: drivers/mtd/nand/raw/jz4780_* 7813F: drivers/mtd/nand/raw/ingenic/
7804 7814
7805INOTIFY 7815INOTIFY
7806M: Jan Kara <jack@suse.cz> 7816M: Jan Kara <jack@suse.cz>
@@ -15493,6 +15503,7 @@ F: drivers/dma/tegra*
15493 15503
15494TEGRA I2C DRIVER 15504TEGRA I2C DRIVER
15495M: Laxman Dewangan <ldewangan@nvidia.com> 15505M: Laxman Dewangan <ldewangan@nvidia.com>
15506R: Dmitry Osipenko <digetx@gmail.com>
15496S: Supported 15507S: Supported
15497F: drivers/i2c/busses/i2c-tegra.c 15508F: drivers/i2c/busses/i2c-tegra.c
15498 15509
diff --git a/Makefile b/Makefile
index 7a7c17eb0cbf..3e4868a6498b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 2 3PATCHLEVEL = 2
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION =
6NAME = Golden Lions 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
9# To see a list of typical targets execute "make help" 9# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 480af1af9e63..03a0b19c92cd 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -5,6 +5,10 @@
5 5
6KBUILD_DEFCONFIG := nsim_hs_defconfig 6KBUILD_DEFCONFIG := nsim_hs_defconfig
7 7
8ifeq ($(CROSS_COMPILE),)
9CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
10endif
11
8cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ 12cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
9cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 13cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
10cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38 14cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 6a91a742ab3d..7dd2dd335cf6 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -32,8 +32,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
32 32
33#define ARC_PERIPHERAL_BASE 0xf0000000 33#define ARC_PERIPHERAL_BASE 0xf0000000
34#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000) 34#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000)
35#define CREG_PAE (CREG_BASE + 0x180)
36#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
37 35
38#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) 36#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
39#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) 37#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
@@ -99,20 +97,167 @@ static void __init hsdk_enable_gpio_intc_wire(void)
99 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); 97 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
100} 98}
101 99
102static void __init hsdk_init_early(void) 100enum hsdk_axi_masters {
101 M_HS_CORE = 0,
102 M_HS_RTT,
103 M_AXI_TUN,
104 M_HDMI_VIDEO,
105 M_HDMI_AUDIO,
106 M_USB_HOST,
107 M_ETHERNET,
108 M_SDIO,
109 M_GPU,
110 M_DMAC_0,
111 M_DMAC_1,
112 M_DVFS
113};
114
115#define UPDATE_VAL 1
116
117/*
118 * This is modified configuration of AXI bridge. Default settings
119 * are specified in "Table 111 CREG Address Decoder register reset values".
120 *
121 * AXI_M_m_SLV{0|1} - Slave Select register for master 'm'.
122 * Possible slaves are:
123 * - 0 => no slave selected
124 * - 1 => DDR controller port #1
125 * - 2 => SRAM controller
126 * - 3 => AXI tunnel
127 * - 4 => EBI controller
128 * - 5 => ROM controller
129 * - 6 => AXI2APB bridge
130 * - 7 => DDR controller port #2
131 * - 8 => DDR controller port #3
132 * - 9 => HS38x4 IOC
133 * - 10 => HS38x4 DMI
134 * AXI_M_m_OFFSET{0|1} - Addr Offset register for master 'm'
135 *
136 * Please read ARC HS Development IC Specification, section 17.2 for more
137 * information about apertures configuration.
138 *
139 * m master AXI_M_m_SLV0 AXI_M_m_SLV1 AXI_M_m_OFFSET0 AXI_M_m_OFFSET1
140 * 0 HS (CBU) 0x11111111 0x63111111 0xFEDCBA98 0x0E543210
141 * 1 HS (RTT) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
142 * 2 AXI Tunnel 0x88888888 0x88888888 0xFEDCBA98 0x76543210
143 * 3 HDMI-VIDEO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
144 * 4 HDMI-ADUIO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
145 * 5 USB-HOST 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
146 * 6 ETHERNET 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
147 * 7 SDIO 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
148 * 8 GPU 0x77777777 0x77777777 0xFEDCBA98 0x76543210
149 * 9 DMAC (port #1) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
150 * 10 DMAC (port #2) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
151 * 11 DVFS 0x00000000 0x60000000 0x00000000 0x00000000
152 */
153
154#define CREG_AXI_M_SLV0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m)))
155#define CREG_AXI_M_SLV1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x04))
156#define CREG_AXI_M_OFT0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x08))
157#define CREG_AXI_M_OFT1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x0C))
158#define CREG_AXI_M_UPDT(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x14))
159
160#define CREG_AXI_M_HS_CORE_BOOT ((void __iomem *)(CREG_BASE + 0x010))
161
162#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
163#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
164
165static void __init hsdk_init_memory_bridge(void)
103{ 166{
167 u32 reg;
168
169 /*
170 * M_HS_CORE has one unique register - BOOT.
171 * We need to clean boot mirror (BOOT[1:0]) bits in them to avoid first
172 * aperture to be masked by 'boot mirror'.
173 */
174 reg = readl(CREG_AXI_M_HS_CORE_BOOT) & (~0x3);
175 writel(reg, CREG_AXI_M_HS_CORE_BOOT);
176 writel(0x11111111, CREG_AXI_M_SLV0(M_HS_CORE));
177 writel(0x63111111, CREG_AXI_M_SLV1(M_HS_CORE));
178 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_CORE));
179 writel(0x0E543210, CREG_AXI_M_OFT1(M_HS_CORE));
180 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_CORE));
181
182 writel(0x77777777, CREG_AXI_M_SLV0(M_HS_RTT));
183 writel(0x77777777, CREG_AXI_M_SLV1(M_HS_RTT));
184 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_RTT));
185 writel(0x76543210, CREG_AXI_M_OFT1(M_HS_RTT));
186 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_RTT));
187
188 writel(0x88888888, CREG_AXI_M_SLV0(M_AXI_TUN));
189 writel(0x88888888, CREG_AXI_M_SLV1(M_AXI_TUN));
190 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_AXI_TUN));
191 writel(0x76543210, CREG_AXI_M_OFT1(M_AXI_TUN));
192 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_AXI_TUN));
193
194 writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_VIDEO));
195 writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_VIDEO));
196 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_VIDEO));
197 writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_VIDEO));
198 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_VIDEO));
199
200 writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_AUDIO));
201 writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_AUDIO));
202 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_AUDIO));
203 writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_AUDIO));
204 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_AUDIO));
205
206 writel(0x77777777, CREG_AXI_M_SLV0(M_USB_HOST));
207 writel(0x77999999, CREG_AXI_M_SLV1(M_USB_HOST));
208 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_USB_HOST));
209 writel(0x76DCBA98, CREG_AXI_M_OFT1(M_USB_HOST));
210 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_USB_HOST));
211
212 writel(0x77777777, CREG_AXI_M_SLV0(M_ETHERNET));
213 writel(0x77999999, CREG_AXI_M_SLV1(M_ETHERNET));
214 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_ETHERNET));
215 writel(0x76DCBA98, CREG_AXI_M_OFT1(M_ETHERNET));
216 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_ETHERNET));
217
218 writel(0x77777777, CREG_AXI_M_SLV0(M_SDIO));
219 writel(0x77999999, CREG_AXI_M_SLV1(M_SDIO));
220 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_SDIO));
221 writel(0x76DCBA98, CREG_AXI_M_OFT1(M_SDIO));
222 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_SDIO));
223
224 writel(0x77777777, CREG_AXI_M_SLV0(M_GPU));
225 writel(0x77777777, CREG_AXI_M_SLV1(M_GPU));
226 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_GPU));
227 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
228 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
229
230 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
231 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
233 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
234 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
235
236 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
237 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
238 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
239 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
240 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
241
242 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
243 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
244 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
245 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
246 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
247
104 /* 248 /*
105 * PAE remapping for DMA clients does not work due to an RTL bug, so 249 * PAE remapping for DMA clients does not work due to an RTL bug, so
106 * CREG_PAE register must be programmed to all zeroes, otherwise it 250 * CREG_PAE register must be programmed to all zeroes, otherwise it
107 * will cause problems with DMA to/from peripherals even if PAE40 is 251 * will cause problems with DMA to/from peripherals even if PAE40 is
108 * not used. 252 * not used.
109 */ 253 */
254 writel(0x00000000, CREG_PAE);
255 writel(UPDATE_VAL, CREG_PAE_UPDT);
256}
110 257
111 /* Default is 1, which means "PAE offset = 4GByte" */ 258static void __init hsdk_init_early(void)
112 writel_relaxed(0, (void __iomem *) CREG_PAE); 259{
113 260 hsdk_init_memory_bridge();
114 /* Really apply settings made above */
115 writel(1, (void __iomem *) CREG_PAE_UPDATE);
116 261
117 /* 262 /*
118 * Switch SDIO external ciu clock divider from default div-by-8 to 263 * Switch SDIO external ciu clock divider from default div-by-8 to
diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
index 59753470cd34..267d0c178e55 100644
--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
+++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
@@ -336,3 +336,11 @@
336 status = "disabled"; 336 status = "disabled";
337}; 337};
338 338
339&uart0 {
340 compatible = "marvell,armada-38x-uart";
341};
342
343&uart1 {
344 compatible = "marvell,armada-38x-uart";
345};
346
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index cfbfbc91a1e1..3613f05f8a80 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -20,7 +20,7 @@
20 }; 20 };
21 21
22 chosen { 22 chosen {
23 bootargs = "console=ttyS0,19200n8 root=/dev/sda1 rw rootwait"; 23 bootargs = "console=ttyS0,19200n8 root=/dev/sda1 rw rootwait consoleblank=300";
24 stdout-path = "uart0:19200n8"; 24 stdout-path = "uart0:19200n8";
25 }; 25 };
26 26
diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
index b12504e10f0b..360642a02a48 100644
--- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts
@@ -11,7 +11,7 @@
11 11
12/ { 12/ {
13 model = "D-Link DNS-313 1-Bay Network Storage Enclosure"; 13 model = "D-Link DNS-313 1-Bay Network Storage Enclosure";
14 compatible = "dlink,dir-313", "cortina,gemini"; 14 compatible = "dlink,dns-313", "cortina,gemini";
15 #address-cells = <1>; 15 #address-cells = <1>;
16 #size-cells = <1>; 16 #size-cells = <1>;
17 17
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index bbf010c73336..a7f6d1d58e20 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -358,7 +358,7 @@
358 pwm1: pwm@2080000 { 358 pwm1: pwm@2080000 {
359 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; 359 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
360 reg = <0x02080000 0x4000>; 360 reg = <0x02080000 0x4000>;
361 interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; 361 interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
362 clocks = <&clks IMX6UL_CLK_PWM1>, 362 clocks = <&clks IMX6UL_CLK_PWM1>,
363 <&clks IMX6UL_CLK_PWM1>; 363 <&clks IMX6UL_CLK_PWM1>;
364 clock-names = "ipg", "per"; 364 clock-names = "ipg", "per";
@@ -369,7 +369,7 @@
369 pwm2: pwm@2084000 { 369 pwm2: pwm@2084000 {
370 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; 370 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
371 reg = <0x02084000 0x4000>; 371 reg = <0x02084000 0x4000>;
372 interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; 372 interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
373 clocks = <&clks IMX6UL_CLK_PWM2>, 373 clocks = <&clks IMX6UL_CLK_PWM2>,
374 <&clks IMX6UL_CLK_PWM2>; 374 <&clks IMX6UL_CLK_PWM2>;
375 clock-names = "ipg", "per"; 375 clock-names = "ipg", "per";
@@ -380,7 +380,7 @@
380 pwm3: pwm@2088000 { 380 pwm3: pwm@2088000 {
381 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; 381 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
382 reg = <0x02088000 0x4000>; 382 reg = <0x02088000 0x4000>;
383 interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; 383 interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
384 clocks = <&clks IMX6UL_CLK_PWM3>, 384 clocks = <&clks IMX6UL_CLK_PWM3>,
385 <&clks IMX6UL_CLK_PWM3>; 385 <&clks IMX6UL_CLK_PWM3>;
386 clock-names = "ipg", "per"; 386 clock-names = "ipg", "per";
@@ -391,7 +391,7 @@
391 pwm4: pwm@208c000 { 391 pwm4: pwm@208c000 {
392 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; 392 compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
393 reg = <0x0208c000 0x4000>; 393 reg = <0x0208c000 0x4000>;
394 interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; 394 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
395 clocks = <&clks IMX6UL_CLK_PWM4>, 395 clocks = <&clks IMX6UL_CLK_PWM4>,
396 <&clks IMX6UL_CLK_PWM4>; 396 <&clks IMX6UL_CLK_PWM4>;
397 clock-names = "ipg", "per"; 397 clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index 7ef442462ea4..40c11b6b217a 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -248,8 +248,8 @@
248 <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, 248 <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
249 <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, 249 <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
250 <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, 250 <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
251 <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, 251 <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
252 <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>, 252 <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
253 <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>, 253 <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
254 <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>, 254 <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
255 <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>, 255 <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
@@ -264,7 +264,6 @@
264 clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; 264 clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
265 clock-names = "bus", "core"; 265 clock-names = "bus", "core";
266 operating-points-v2 = <&gpu_opp_table>; 266 operating-points-v2 = <&gpu_opp_table>;
267 switch-delay = <0xffff>;
268 }; 267 };
269 }; 268 };
270}; /* end of / */ 269}; /* end of / */
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index 800cd65fc50a..ec67f49116d9 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -163,23 +163,23 @@
163 163
164 opp-255000000 { 164 opp-255000000 {
165 opp-hz = /bits/ 64 <255000000>; 165 opp-hz = /bits/ 64 <255000000>;
166 opp-microvolt = <1150000>; 166 opp-microvolt = <1100000>;
167 }; 167 };
168 opp-364300000 { 168 opp-364300000 {
169 opp-hz = /bits/ 64 <364300000>; 169 opp-hz = /bits/ 64 <364300000>;
170 opp-microvolt = <1150000>; 170 opp-microvolt = <1100000>;
171 }; 171 };
172 opp-425000000 { 172 opp-425000000 {
173 opp-hz = /bits/ 64 <425000000>; 173 opp-hz = /bits/ 64 <425000000>;
174 opp-microvolt = <1150000>; 174 opp-microvolt = <1100000>;
175 }; 175 };
176 opp-510000000 { 176 opp-510000000 {
177 opp-hz = /bits/ 64 <510000000>; 177 opp-hz = /bits/ 64 <510000000>;
178 opp-microvolt = <1150000>; 178 opp-microvolt = <1100000>;
179 }; 179 };
180 opp-637500000 { 180 opp-637500000 {
181 opp-hz = /bits/ 64 <637500000>; 181 opp-hz = /bits/ 64 <637500000>;
182 opp-microvolt = <1150000>; 182 opp-microvolt = <1100000>;
183 turbo-mode; 183 turbo-mode;
184 }; 184 };
185 }; 185 };
@@ -229,7 +229,6 @@
229 clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; 229 clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
230 clock-names = "bus", "core"; 230 clock-names = "bus", "core";
231 operating-points-v2 = <&gpu_opp_table>; 231 operating-points-v2 = <&gpu_opp_table>;
232 switch-delay = <0xffff>;
233 }; 232 };
234 }; 233 };
235}; /* end of / */ 234}; /* end of / */
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 51a892702e27..a273ab25c668 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -61,6 +61,9 @@ static struct regulator_consumer_supply da830_evm_usb_supplies[] = {
61static struct regulator_init_data da830_evm_usb_vbus_data = { 61static struct regulator_init_data da830_evm_usb_vbus_data = {
62 .consumer_supplies = da830_evm_usb_supplies, 62 .consumer_supplies = da830_evm_usb_supplies,
63 .num_consumer_supplies = ARRAY_SIZE(da830_evm_usb_supplies), 63 .num_consumer_supplies = ARRAY_SIZE(da830_evm_usb_supplies),
64 .constraints = {
65 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
66 },
64}; 67};
65 68
66static struct fixed_voltage_config da830_evm_usb_vbus = { 69static struct fixed_voltage_config da830_evm_usb_vbus = {
@@ -88,7 +91,7 @@ static struct gpiod_lookup_table da830_evm_usb_oc_gpio_lookup = {
88static struct gpiod_lookup_table da830_evm_usb_vbus_gpio_lookup = { 91static struct gpiod_lookup_table da830_evm_usb_vbus_gpio_lookup = {
89 .dev_id = "reg-fixed-voltage.0", 92 .dev_id = "reg-fixed-voltage.0",
90 .table = { 93 .table = {
91 GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0), 94 GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, NULL, 0),
92 { } 95 { }
93 }, 96 },
94}; 97};
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index db177a6a7e48..5390a8630cf0 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -306,6 +306,9 @@ static struct regulator_consumer_supply hawk_usb_supplies[] = {
306static struct regulator_init_data hawk_usb_vbus_data = { 306static struct regulator_init_data hawk_usb_vbus_data = {
307 .consumer_supplies = hawk_usb_supplies, 307 .consumer_supplies = hawk_usb_supplies,
308 .num_consumer_supplies = ARRAY_SIZE(hawk_usb_supplies), 308 .num_consumer_supplies = ARRAY_SIZE(hawk_usb_supplies),
309 .constraints = {
310 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
311 },
309}; 312};
310 313
311static struct fixed_voltage_config hawk_usb_vbus = { 314static struct fixed_voltage_config hawk_usb_vbus = {
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index fd4a3bf27993..1b442b128569 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -430,7 +430,7 @@ static void omap3_prm_reconfigure_io_chain(void)
430 * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. 430 * registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
431 * No return value. 431 * No return value.
432 */ 432 */
433static void __init omap3xxx_prm_enable_io_wakeup(void) 433static void omap3xxx_prm_enable_io_wakeup(void)
434{ 434{
435 if (prm_features & PRM_HAS_IO_WAKEUP) 435 if (prm_features & PRM_HAS_IO_WAKEUP)
436 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, 436 omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index b04581249f0b..bf7f845447ed 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -28,7 +28,7 @@
28 enable-method = "psci"; 28 enable-method = "psci";
29 clocks = <&clockgen 1 0>; 29 clocks = <&clockgen 1 0>;
30 next-level-cache = <&l2>; 30 next-level-cache = <&l2>;
31 cpu-idle-states = <&CPU_PH20>; 31 cpu-idle-states = <&CPU_PW20>;
32 }; 32 };
33 33
34 cpu1: cpu@1 { 34 cpu1: cpu@1 {
@@ -38,7 +38,7 @@
38 enable-method = "psci"; 38 enable-method = "psci";
39 clocks = <&clockgen 1 0>; 39 clocks = <&clockgen 1 0>;
40 next-level-cache = <&l2>; 40 next-level-cache = <&l2>;
41 cpu-idle-states = <&CPU_PH20>; 41 cpu-idle-states = <&CPU_PW20>;
42 }; 42 };
43 43
44 l2: l2-cache { 44 l2: l2-cache {
@@ -53,13 +53,13 @@
53 */ 53 */
54 entry-method = "arm,psci"; 54 entry-method = "arm,psci";
55 55
56 CPU_PH20: cpu-ph20 { 56 CPU_PW20: cpu-pw20 {
57 compatible = "arm,idle-state"; 57 compatible = "arm,idle-state";
58 idle-state-name = "PH20"; 58 idle-state-name = "PW20";
59 arm,psci-suspend-param = <0x00010000>; 59 arm,psci-suspend-param = <0x0>;
60 entry-latency-us = <1000>; 60 entry-latency-us = <2000>;
61 exit-latency-us = <1000>; 61 exit-latency-us = <2000>;
62 min-residency-us = <3000>; 62 min-residency-us = <6000>;
63 }; 63 };
64 }; 64 };
65 65
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 4d583514258c..6bca5b082ea4 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -613,6 +613,7 @@ CONFIG_RTC_DRV_TEGRA=y
613CONFIG_RTC_DRV_IMX_SC=m 613CONFIG_RTC_DRV_IMX_SC=m
614CONFIG_RTC_DRV_XGENE=y 614CONFIG_RTC_DRV_XGENE=y
615CONFIG_DMADEVICES=y 615CONFIG_DMADEVICES=y
616CONFIG_FSL_EDMA=y
616CONFIG_DMA_BCM2835=m 617CONFIG_DMA_BCM2835=m
617CONFIG_K3_DMA=y 618CONFIG_K3_DMA=y
618CONFIG_MV_XOR=y 619CONFIG_MV_XOR=y
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 04ca08086d35..2b85c0d6fa3d 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -67,7 +67,11 @@
67 67
68#ifdef CONFIG_EFI 68#ifdef CONFIG_EFI
69 69
70__efistub_stext_offset = stext - _text; 70/*
71 * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
72 * https://github.com/ClangBuiltLinux/linux/issues/561
73 */
74__efistub_stext_offset = ABSOLUTE(stext - _text);
71 75
72/* 76/*
73 * The EFI stub has its own symbol namespace prefixed by __efistub_, to 77 * The EFI stub has its own symbol namespace prefixed by __efistub_, to
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index e23a68a5808f..71530e080ecc 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -21,6 +21,7 @@
21 21
22void *module_alloc(unsigned long size) 22void *module_alloc(unsigned long size)
23{ 23{
24 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
24 gfp_t gfp_mask = GFP_KERNEL; 25 gfp_t gfp_mask = GFP_KERNEL;
25 void *p; 26 void *p;
26 27
@@ -28,9 +29,12 @@ void *module_alloc(unsigned long size)
28 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 29 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
29 gfp_mask |= __GFP_NOWARN; 30 gfp_mask |= __GFP_NOWARN;
30 31
32 if (IS_ENABLED(CONFIG_KASAN))
33 /* don't exceed the static module region - see below */
34 module_alloc_end = MODULES_END;
35
31 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 36 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
32 module_alloc_base + MODULES_VSIZE, 37 module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
33 gfp_mask, PAGE_KERNEL_EXEC, 0,
34 NUMA_NO_NODE, __builtin_return_address(0)); 38 NUMA_NO_NODE, __builtin_return_address(0));
35 39
36 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 40 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index c2afa7982047..dfd626447482 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -208,7 +208,7 @@ out:
208 208
209#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) 209#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
210#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) 210#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
211#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq)) 211#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
212 212
213static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 213static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
214{ 214{
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 04a43cfd4e09..d47a3381aad8 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -39,6 +39,11 @@ static int save_fpu_state(struct sigcontext __user *sc)
39#endif 39#endif
40 40
41struct rt_sigframe { 41struct rt_sigframe {
42 /*
43 * pad[3] is compatible with the same struct defined in
44 * gcc/libgcc/config/csky/linux-unwind.h
45 */
46 int pad[3];
42 struct siginfo info; 47 struct siginfo info;
43 struct ucontext uc; 48 struct ucontext uc;
44}; 49};
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 8f4486c4415b..eceff9b75b22 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -17,6 +17,7 @@ archscripts: scripts_basic
17 $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs 17 $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
18 18
19KBUILD_DEFCONFIG := 32r2el_defconfig 19KBUILD_DEFCONFIG := 32r2el_defconfig
20KBUILD_DTBS := dtbs
20 21
21# 22#
22# Select the object file format to substitute into the linker script. 23# Select the object file format to substitute into the linker script.
@@ -384,7 +385,7 @@ quiet_cmd_64 = OBJCOPY $@
384vmlinux.64: vmlinux 385vmlinux.64: vmlinux
385 $(call cmd,64) 386 $(call cmd,64)
386 387
387all: $(all-y) 388all: $(all-y) $(KBUILD_DTBS)
388 389
389# boot 390# boot
390$(boot-y): $(vmlinux-32) FORCE 391$(boot-y): $(vmlinux-32) FORCE
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 3c453a1f1ff1..172801ed35b8 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
78$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE 78$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
79 $(call if_changed,objcopy) 79 $(call if_changed,objcopy)
80 80
81HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
82
81# Calculate the load address of the compressed kernel image 83# Calculate the load address of the compressed kernel image
82hostprogs-y := calc_vmlinuz_load_addr 84hostprogs-y := calc_vmlinuz_load_addr
83 85
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 240f1d12df75..080b926d2623 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -9,7 +9,7 @@
9#include <stdint.h> 9#include <stdint.h>
10#include <stdio.h> 10#include <stdio.h>
11#include <stdlib.h> 11#include <stdlib.h>
12#include "../../../../include/linux/sizes.h" 12#include <linux/sizes.h>
13 13
14int main(int argc, char *argv[]) 14int main(int argc, char *argv[])
15{ 15{
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
index b8f8af7dc47c..cacf3545e018 100644
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -24,8 +24,8 @@
24#define AR933X_UART_CS_PARITY_S 0 24#define AR933X_UART_CS_PARITY_S 0
25#define AR933X_UART_CS_PARITY_M 0x3 25#define AR933X_UART_CS_PARITY_M 0x3
26#define AR933X_UART_CS_PARITY_NONE 0 26#define AR933X_UART_CS_PARITY_NONE 0
27#define AR933X_UART_CS_PARITY_ODD 1 27#define AR933X_UART_CS_PARITY_ODD 2
28#define AR933X_UART_CS_PARITY_EVEN 2 28#define AR933X_UART_CS_PARITY_EVEN 3
29#define AR933X_UART_CS_IF_MODE_S 2 29#define AR933X_UART_CS_IF_MODE_S 2
30#define AR933X_UART_CS_IF_MODE_M 0x3 30#define AR933X_UART_CS_IF_MODE_M 0x3
31#define AR933X_UART_CS_IF_MODE_NONE 0 31#define AR933X_UART_CS_IF_MODE_NONE 0
diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h
index 75a1cdee1331..084cac1c5ea2 100644
--- a/arch/mips/include/asm/mips-gic.h
+++ b/arch/mips/include/asm/mips-gic.h
@@ -311,6 +311,36 @@ static inline bool mips_gic_present(void)
311} 311}
312 312
313/** 313/**
314 * mips_gic_vx_map_reg() - Return GIC_Vx_<intr>_MAP register offset
315 * @intr: A GIC local interrupt
316 *
317 * Determine the index of the GIC_VL_<intr>_MAP or GIC_VO_<intr>_MAP register
318 * within the block of GIC map registers. This is almost the same as the order
319 * of interrupts in the pending & mask registers, as used by enum
320 * mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the
321 * interrupts after it...
322 *
323 * Return: The map register index corresponding to @intr.
324 *
325 * The return value is suitable for use with the (read|write)_gic_v[lo]_map
326 * accessor functions.
327 */
328static inline unsigned int
329mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr)
330{
331 /* WD, Compare & Timer are 1:1 */
332 if (intr <= GIC_LOCAL_INT_TIMER)
333 return intr;
334
335 /* FDC moves to after Timer... */
336 if (intr == GIC_LOCAL_INT_FDC)
337 return GIC_LOCAL_INT_TIMER + 1;
338
339 /* As a result everything else is offset by 1 */
340 return intr + 1;
341}
342
343/**
314 * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq 344 * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq
315 * 345 *
316 * Determine the virq number to use for the coprocessor 0 count/compare 346 * Determine the virq number to use for the coprocessor 0 count/compare
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 50ee7213b432..d79f2b432318 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -203,7 +203,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
203 203
204bool __virt_addr_valid(const volatile void *kaddr) 204bool __virt_addr_valid(const volatile void *kaddr)
205{ 205{
206 unsigned long vaddr = (unsigned long)vaddr; 206 unsigned long vaddr = (unsigned long)kaddr;
207 207
208 if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE)) 208 if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
209 return false; 209 return false;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 65b6e85447b1..144ceb0fba88 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -391,6 +391,7 @@ static struct work_registers build_get_work_registers(u32 **p)
391static void build_restore_work_registers(u32 **p) 391static void build_restore_work_registers(u32 **p)
392{ 392{
393 if (scratch_reg >= 0) { 393 if (scratch_reg >= 0) {
394 uasm_i_ehb(p);
394 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 395 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
395 return; 396 return;
396 } 397 }
@@ -668,10 +669,12 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
668 uasm_i_mtc0(p, 0, C0_PAGEMASK); 669 uasm_i_mtc0(p, 0, C0_PAGEMASK);
669 uasm_il_b(p, r, lid); 670 uasm_il_b(p, r, lid);
670 } 671 }
671 if (scratch_reg >= 0) 672 if (scratch_reg >= 0) {
673 uasm_i_ehb(p);
672 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 674 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
673 else 675 } else {
674 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 676 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
677 }
675 } else { 678 } else {
676 /* Reset default page size */ 679 /* Reset default page size */
677 if (PM_DEFAULT_MASK >> 16) { 680 if (PM_DEFAULT_MASK >> 16) {
@@ -938,10 +941,12 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
938 uasm_i_jr(p, ptr); 941 uasm_i_jr(p, ptr);
939 942
940 if (mode == refill_scratch) { 943 if (mode == refill_scratch) {
941 if (scratch_reg >= 0) 944 if (scratch_reg >= 0) {
945 uasm_i_ehb(p);
942 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 946 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
943 else 947 } else {
944 UASM_i_LW(p, 1, scratchpad_offset(0), 0); 948 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
949 }
945 } else { 950 } else {
946 uasm_i_nop(p); 951 uasm_i_nop(p);
947 } 952 }
@@ -1258,6 +1263,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1258 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ 1263 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1259 1264
1260 if (c0_scratch_reg >= 0) { 1265 if (c0_scratch_reg >= 0) {
1266 uasm_i_ehb(p);
1261 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); 1267 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
1262 build_tlb_write_entry(p, l, r, tlb_random); 1268 build_tlb_write_entry(p, l, r, tlb_random);
1263 uasm_l_leave(l, *p); 1269 uasm_l_leave(l, *p);
@@ -1603,15 +1609,17 @@ static void build_setup_pgd(void)
1603 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); 1609 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1604 uasm_l_tlbl_goaround1(&l, p); 1610 uasm_l_tlbl_goaround1(&l, p);
1605 UASM_i_SLL(&p, a0, a0, 11); 1611 UASM_i_SLL(&p, a0, a0, 11);
1606 uasm_i_jr(&p, 31);
1607 UASM_i_MTC0(&p, a0, C0_CONTEXT); 1612 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1613 uasm_i_jr(&p, 31);
1614 uasm_i_ehb(&p);
1608 } else { 1615 } else {
1609 /* PGD in c0_KScratch */ 1616 /* PGD in c0_KScratch */
1610 uasm_i_jr(&p, 31);
1611 if (cpu_has_ldpte) 1617 if (cpu_has_ldpte)
1612 UASM_i_MTC0(&p, a0, C0_PWBASE); 1618 UASM_i_MTC0(&p, a0, C0_PWBASE);
1613 else 1619 else
1614 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1620 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1621 uasm_i_jr(&p, 31);
1622 uasm_i_ehb(&p);
1615 } 1623 }
1616#else 1624#else
1617#ifdef CONFIG_SMP 1625#ifdef CONFIG_SMP
@@ -1625,13 +1633,16 @@ static void build_setup_pgd(void)
1625 UASM_i_LA_mostly(&p, a2, pgdc); 1633 UASM_i_LA_mostly(&p, a2, pgdc);
1626 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); 1634 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1627#endif /* SMP */ 1635#endif /* SMP */
1628 uasm_i_jr(&p, 31);
1629 1636
1630 /* if pgd_reg is allocated, save PGD also to scratch register */ 1637 /* if pgd_reg is allocated, save PGD also to scratch register */
1631 if (pgd_reg != -1) 1638 if (pgd_reg != -1) {
1632 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); 1639 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
1633 else 1640 uasm_i_jr(&p, 31);
1641 uasm_i_ehb(&p);
1642 } else {
1643 uasm_i_jr(&p, 31);
1634 uasm_i_nop(&p); 1644 uasm_i_nop(&p);
1645 }
1635#endif 1646#endif
1636 if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) 1647 if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
1637 panic("tlbmiss_handler_setup_pgd space exceeded"); 1648 panic("tlbmiss_handler_setup_pgd space exceeded");
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index f241ded9239b..1f0f29a289d3 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -786,6 +786,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
786 /* 32-bit PC relative address */ 786 /* 32-bit PC relative address */
787 *loc = val - dot - 8 + addend; 787 *loc = val - dot - 8 + addend;
788 break; 788 break;
789 case R_PARISC_PCREL64:
790 /* 64-bit PC relative address */
791 *loc64 = val - dot - 8 + addend;
792 break;
789 case R_PARISC_DIR64: 793 case R_PARISC_DIR64:
790 /* 64-bit effective address */ 794 /* 64-bit effective address */
791 *loc64 = val + addend; 795 *loc64 = val + addend;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6b86055e5251..73ba246ca11d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -315,7 +315,7 @@ TRAMP_REAL_BEGIN(machine_check_common_early)
315 mfspr r11,SPRN_DSISR /* Save DSISR */ 315 mfspr r11,SPRN_DSISR /* Save DSISR */
316 std r11,_DSISR(r1) 316 std r11,_DSISR(r1)
317 std r9,_CCR(r1) /* Save CR in stackframe */ 317 std r9,_CCR(r1) /* Save CR in stackframe */
318 kuap_save_amr_and_lock r9, r10, cr1 318 /* We don't touch AMR here, we never go to virtual mode */
319 /* Save r9 through r13 from EXMC save area to stack frame. */ 319 /* Save r9 through r13 from EXMC save area to stack frame. */
320 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) 320 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
321 mfmsr r11 /* get MSR value */ 321 mfmsr r11 /* get MSR value */
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index bb70391401f7..794404d50a85 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -50,20 +50,52 @@ EXPORT_SYMBOL_GPL(hash__alloc_context_id);
50 50
51void slb_setup_new_exec(void); 51void slb_setup_new_exec(void);
52 52
53static int realloc_context_ids(mm_context_t *ctx)
54{
55 int i, id;
56
57 /*
58 * id 0 (aka. ctx->id) is special, we always allocate a new one, even if
59 * there wasn't one allocated previously (which happens in the exec
60 * case where ctx is newly allocated).
61 *
62 * We have to be a bit careful here. We must keep the existing ids in
63 * the array, so that we can test if they're non-zero to decide if we
64 * need to allocate a new one. However in case of error we must free the
65 * ids we've allocated but *not* any of the existing ones (or risk a
66 * UAF). That's why we decrement i at the start of the error handling
67 * loop, to skip the id that we just tested but couldn't reallocate.
68 */
69 for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) {
70 if (i == 0 || ctx->extended_id[i]) {
71 id = hash__alloc_context_id();
72 if (id < 0)
73 goto error;
74
75 ctx->extended_id[i] = id;
76 }
77 }
78
79 /* The caller expects us to return id */
80 return ctx->id;
81
82error:
83 for (i--; i >= 0; i--) {
84 if (ctx->extended_id[i])
85 ida_free(&mmu_context_ida, ctx->extended_id[i]);
86 }
87
88 return id;
89}
90
53static int hash__init_new_context(struct mm_struct *mm) 91static int hash__init_new_context(struct mm_struct *mm)
54{ 92{
55 int index; 93 int index;
56 94
57 index = hash__alloc_context_id();
58 if (index < 0)
59 return index;
60
61 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), 95 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
62 GFP_KERNEL); 96 GFP_KERNEL);
63 if (!mm->context.hash_context) { 97 if (!mm->context.hash_context)
64 ida_free(&mmu_context_ida, index);
65 return -ENOMEM; 98 return -ENOMEM;
66 }
67 99
68 /* 100 /*
69 * The old code would re-promote on fork, we don't do that when using 101 * The old code would re-promote on fork, we don't do that when using
@@ -91,13 +123,20 @@ static int hash__init_new_context(struct mm_struct *mm)
91 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), 123 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
92 GFP_KERNEL); 124 GFP_KERNEL);
93 if (!mm->context.hash_context->spt) { 125 if (!mm->context.hash_context->spt) {
94 ida_free(&mmu_context_ida, index);
95 kfree(mm->context.hash_context); 126 kfree(mm->context.hash_context);
96 return -ENOMEM; 127 return -ENOMEM;
97 } 128 }
98 } 129 }
99#endif 130#endif
131 }
100 132
133 index = realloc_context_ids(&mm->context);
134 if (index < 0) {
135#ifdef CONFIG_PPC_SUBPAGE_PROT
136 kfree(mm->context.hash_context->spt);
137#endif
138 kfree(mm->context.hash_context);
139 return index;
101 } 140 }
102 141
103 pkey_mm_init(mm); 142 pkey_mm_init(mm);
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 3c06ee4b2b29..40983491b95f 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -163,6 +163,7 @@
163 interrupt-parent = <&plic0>; 163 interrupt-parent = <&plic0>;
164 interrupts = <4>; 164 interrupts = <4>;
165 clocks = <&prci PRCI_CLK_TLCLK>; 165 clocks = <&prci PRCI_CLK_TLCLK>;
166 status = "disabled";
166 }; 167 };
167 uart1: serial@10011000 { 168 uart1: serial@10011000 {
168 compatible = "sifive,fu540-c000-uart", "sifive,uart0"; 169 compatible = "sifive,fu540-c000-uart", "sifive,uart0";
@@ -170,6 +171,7 @@
170 interrupt-parent = <&plic0>; 171 interrupt-parent = <&plic0>;
171 interrupts = <5>; 172 interrupts = <5>;
172 clocks = <&prci PRCI_CLK_TLCLK>; 173 clocks = <&prci PRCI_CLK_TLCLK>;
174 status = "disabled";
173 }; 175 };
174 i2c0: i2c@10030000 { 176 i2c0: i2c@10030000 {
175 compatible = "sifive,fu540-c000-i2c", "sifive,i2c0"; 177 compatible = "sifive,fu540-c000-i2c", "sifive,i2c0";
@@ -181,6 +183,7 @@
181 reg-io-width = <1>; 183 reg-io-width = <1>;
182 #address-cells = <1>; 184 #address-cells = <1>;
183 #size-cells = <0>; 185 #size-cells = <0>;
186 status = "disabled";
184 }; 187 };
185 qspi0: spi@10040000 { 188 qspi0: spi@10040000 {
186 compatible = "sifive,fu540-c000-spi", "sifive,spi0"; 189 compatible = "sifive,fu540-c000-spi", "sifive,spi0";
@@ -191,6 +194,7 @@
191 clocks = <&prci PRCI_CLK_TLCLK>; 194 clocks = <&prci PRCI_CLK_TLCLK>;
192 #address-cells = <1>; 195 #address-cells = <1>;
193 #size-cells = <0>; 196 #size-cells = <0>;
197 status = "disabled";
194 }; 198 };
195 qspi1: spi@10041000 { 199 qspi1: spi@10041000 {
196 compatible = "sifive,fu540-c000-spi", "sifive,spi0"; 200 compatible = "sifive,fu540-c000-spi", "sifive,spi0";
@@ -201,6 +205,7 @@
201 clocks = <&prci PRCI_CLK_TLCLK>; 205 clocks = <&prci PRCI_CLK_TLCLK>;
202 #address-cells = <1>; 206 #address-cells = <1>;
203 #size-cells = <0>; 207 #size-cells = <0>;
208 status = "disabled";
204 }; 209 };
205 qspi2: spi@10050000 { 210 qspi2: spi@10050000 {
206 compatible = "sifive,fu540-c000-spi", "sifive,spi0"; 211 compatible = "sifive,fu540-c000-spi", "sifive,spi0";
@@ -210,6 +215,7 @@
210 clocks = <&prci PRCI_CLK_TLCLK>; 215 clocks = <&prci PRCI_CLK_TLCLK>;
211 #address-cells = <1>; 216 #address-cells = <1>;
212 #size-cells = <0>; 217 #size-cells = <0>;
218 status = "disabled";
213 }; 219 };
214 }; 220 };
215}; 221};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 4da88707e28f..0b55c53c08c7 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -42,7 +42,20 @@
42 }; 42 };
43}; 43};
44 44
45&uart0 {
46 status = "okay";
47};
48
49&uart1 {
50 status = "okay";
51};
52
53&i2c0 {
54 status = "okay";
55};
56
45&qspi0 { 57&qspi0 {
58 status = "okay";
46 flash@0 { 59 flash@0 {
47 compatible = "issi,is25wp256", "jedec,spi-nor"; 60 compatible = "issi,is25wp256", "jedec,spi-nor";
48 reg = <0>; 61 reg = <0>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 4f02967e55de..04944fb4fa7a 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -69,6 +69,7 @@ CONFIG_VIRTIO_MMIO=y
69CONFIG_CLK_SIFIVE=y 69CONFIG_CLK_SIFIVE=y
70CONFIG_CLK_SIFIVE_FU540_PRCI=y 70CONFIG_CLK_SIFIVE_FU540_PRCI=y
71CONFIG_SIFIVE_PLIC=y 71CONFIG_SIFIVE_PLIC=y
72CONFIG_SPI_SIFIVE=y
72CONFIG_EXT4_FS=y 73CONFIG_EXT4_FS=y
73CONFIG_EXT4_FS_POSIX_ACL=y 74CONFIG_EXT4_FS_POSIX_ACL=y
74CONFIG_AUTOFS4_FS=y 75CONFIG_AUTOFS4_FS=y
@@ -84,4 +85,8 @@ CONFIG_ROOT_NFS=y
84CONFIG_CRYPTO_USER_API_HASH=y 85CONFIG_CRYPTO_USER_API_HASH=y
85CONFIG_CRYPTO_DEV_VIRTIO=y 86CONFIG_CRYPTO_DEV_VIRTIO=y
86CONFIG_PRINTK_TIME=y 87CONFIG_PRINTK_TIME=y
88CONFIG_SPI=y
89CONFIG_MMC_SPI=y
90CONFIG_MMC=y
91CONFIG_DEVTMPFS_MOUNT=y
87# CONFIG_RCU_TRACE is not set 92# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 3e2708c626a8..f960c3f4ce47 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -272,9 +272,6 @@ vmalloc_fault:
272 * entries, but in RISC-V, SFENCE.VMA specifies an 272 * entries, but in RISC-V, SFENCE.VMA specifies an
273 * ordering constraint, not a cache flush; it is 273 * ordering constraint, not a cache flush; it is
274 * necessary even after writing invalid entries. 274 * necessary even after writing invalid entries.
275 * Relying on flush_tlb_fix_spurious_fault would
276 * suffice, but the extra traps reduce
277 * performance. So, eagerly SFENCE.VMA.
278 */ 275 */
279 local_flush_tlb_page(addr); 276 local_flush_tlb_page(addr);
280 277
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f0e4804515d8..ffc015bd257e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -561,14 +561,14 @@ int x86_pmu_hw_config(struct perf_event *event)
561 } 561 }
562 562
563 /* sample_regs_user never support XMM registers */ 563 /* sample_regs_user never support XMM registers */
564 if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS)) 564 if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK))
565 return -EINVAL; 565 return -EINVAL;
566 /* 566 /*
567 * Besides the general purpose registers, XMM registers may 567 * Besides the general purpose registers, XMM registers may
568 * be collected in PEBS on some platforms, e.g. Icelake 568 * be collected in PEBS on some platforms, e.g. Icelake
569 */ 569 */
570 if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) { 570 if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) {
571 if (x86_pmu.pebs_no_xmm_regs) 571 if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS))
572 return -EINVAL; 572 return -EINVAL;
573 573
574 if (!event->attr.precise_ip) 574 if (!event->attr.precise_ip)
@@ -2328,13 +2328,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
2328 return; 2328 return;
2329 } 2329 }
2330 2330
2331 if (perf_hw_regs(regs)) { 2331 if (perf_callchain_store(entry, regs->ip))
2332 if (perf_callchain_store(entry, regs->ip)) 2332 return;
2333 return; 2333
2334 if (perf_hw_regs(regs))
2334 unwind_start(&state, current, regs, NULL); 2335 unwind_start(&state, current, regs, NULL);
2335 } else { 2336 else
2336 unwind_start(&state, current, NULL, (void *)regs->sp); 2337 unwind_start(&state, current, NULL, (void *)regs->sp);
2337 }
2338 2338
2339 for (; !unwind_done(&state); unwind_next_frame(&state)) { 2339 for (; !unwind_done(&state); unwind_next_frame(&state)) {
2340 addr = unwind_get_return_address(&state); 2340 addr = unwind_get_return_address(&state);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 7acc526b4ad2..505c73dc6a73 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -987,7 +987,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
987 pebs_data_cfg |= PEBS_DATACFG_GP; 987 pebs_data_cfg |= PEBS_DATACFG_GP;
988 988
989 if ((sample_type & PERF_SAMPLE_REGS_INTR) && 989 if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
990 (attr->sample_regs_intr & PEBS_XMM_REGS)) 990 (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
991 pebs_data_cfg |= PEBS_DATACFG_XMMS; 991 pebs_data_cfg |= PEBS_DATACFG_XMMS;
992 992
993 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 993 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
@@ -1964,10 +1964,9 @@ void __init intel_ds_init(void)
1964 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); 1964 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
1965 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); 1965 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
1966 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; 1966 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
1967 if (x86_pmu.version <= 4) { 1967 if (x86_pmu.version <= 4)
1968 x86_pmu.pebs_no_isolation = 1; 1968 x86_pmu.pebs_no_isolation = 1;
1969 x86_pmu.pebs_no_xmm_regs = 1; 1969
1970 }
1971 if (x86_pmu.pebs) { 1970 if (x86_pmu.pebs) {
1972 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; 1971 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
1973 char *pebs_qual = ""; 1972 char *pebs_qual = "";
@@ -2020,9 +2019,9 @@ void __init intel_ds_init(void)
2020 PERF_SAMPLE_TIME; 2019 PERF_SAMPLE_TIME;
2021 x86_pmu.flags |= PMU_FL_PEBS_ALL; 2020 x86_pmu.flags |= PMU_FL_PEBS_ALL;
2022 pebs_qual = "-baseline"; 2021 pebs_qual = "-baseline";
2022 x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2023 } else { 2023 } else {
2024 /* Only basic record supported */ 2024 /* Only basic record supported */
2025 x86_pmu.pebs_no_xmm_regs = 1;
2026 x86_pmu.large_pebs_flags &= 2025 x86_pmu.large_pebs_flags &=
2027 ~(PERF_SAMPLE_ADDR | 2026 ~(PERF_SAMPLE_ADDR |
2028 PERF_SAMPLE_TIME | 2027 PERF_SAMPLE_TIME |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 9bcec3f99e4a..8751008fc170 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -121,24 +121,6 @@ struct amd_nb {
121 (1ULL << PERF_REG_X86_R14) | \ 121 (1ULL << PERF_REG_X86_R14) | \
122 (1ULL << PERF_REG_X86_R15)) 122 (1ULL << PERF_REG_X86_R15))
123 123
124#define PEBS_XMM_REGS \
125 ((1ULL << PERF_REG_X86_XMM0) | \
126 (1ULL << PERF_REG_X86_XMM1) | \
127 (1ULL << PERF_REG_X86_XMM2) | \
128 (1ULL << PERF_REG_X86_XMM3) | \
129 (1ULL << PERF_REG_X86_XMM4) | \
130 (1ULL << PERF_REG_X86_XMM5) | \
131 (1ULL << PERF_REG_X86_XMM6) | \
132 (1ULL << PERF_REG_X86_XMM7) | \
133 (1ULL << PERF_REG_X86_XMM8) | \
134 (1ULL << PERF_REG_X86_XMM9) | \
135 (1ULL << PERF_REG_X86_XMM10) | \
136 (1ULL << PERF_REG_X86_XMM11) | \
137 (1ULL << PERF_REG_X86_XMM12) | \
138 (1ULL << PERF_REG_X86_XMM13) | \
139 (1ULL << PERF_REG_X86_XMM14) | \
140 (1ULL << PERF_REG_X86_XMM15))
141
142/* 124/*
143 * Per register state. 125 * Per register state.
144 */ 126 */
@@ -665,8 +647,7 @@ struct x86_pmu {
665 pebs_broken :1, 647 pebs_broken :1,
666 pebs_prec_dist :1, 648 pebs_prec_dist :1,
667 pebs_no_tlb :1, 649 pebs_no_tlb :1,
668 pebs_no_isolation :1, 650 pebs_no_isolation :1;
669 pebs_no_xmm_regs :1;
670 int pebs_record_size; 651 int pebs_record_size;
671 int pebs_buffer_size; 652 int pebs_buffer_size;
672 int max_pebs_events; 653 int max_pebs_events;
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
index ac67bbea10ca..7c9d2bb3833b 100644
--- a/arch/x86/include/uapi/asm/perf_regs.h
+++ b/arch/x86/include/uapi/asm/perf_regs.h
@@ -52,4 +52,7 @@ enum perf_event_x86_regs {
52 /* These include both GPRs and XMMX registers */ 52 /* These include both GPRs and XMMX registers */
53 PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, 53 PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
54}; 54};
55
56#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1))
57
55#endif /* _ASM_X86_PERF_REGS_H */ 58#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 177aa8ef2afa..85be316665b4 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1464,7 +1464,8 @@ static void apic_pending_intr_clear(void)
1464 if (queued) { 1464 if (queued) {
1465 if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { 1465 if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
1466 ntsc = rdtsc(); 1466 ntsc = rdtsc();
1467 max_loops = (cpu_khz << 10) - (ntsc - tsc); 1467 max_loops = (long long)cpu_khz << 10;
1468 max_loops -= ntsc - tsc;
1468 } else { 1469 } else {
1469 max_loops--; 1470 max_loops--;
1470 } 1471 }
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 03b4cc0ec3a7..66ca906aa790 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -836,6 +836,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
836 } 836 }
837 837
838 /* 838 /*
839 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
840 * bit in the mask to allow guests to use the mitigation even in the
841 * case where the host does not enable it.
842 */
843 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
844 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
845 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
846 }
847
848 /*
839 * We have three CPU feature flags that are in play here: 849 * We have three CPU feature flags that are in play here:
840 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 850 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
841 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 851 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
@@ -852,7 +862,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
852 x86_amd_ssb_disable(); 862 x86_amd_ssb_disable();
853 } else { 863 } else {
854 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 864 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
855 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
856 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 865 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
857 } 866 }
858 } 867 }
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index a813987b5552..cb0fdcaf1415 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -789,13 +789,16 @@ static struct syscore_ops mc_syscore_ops = {
789 .resume = mc_bp_resume, 789 .resume = mc_bp_resume,
790}; 790};
791 791
792static int mc_cpu_online(unsigned int cpu) 792static int mc_cpu_starting(unsigned int cpu)
793{ 793{
794 struct device *dev;
795
796 dev = get_cpu_device(cpu);
797 microcode_update_cpu(cpu); 794 microcode_update_cpu(cpu);
798 pr_debug("CPU%d added\n", cpu); 795 pr_debug("CPU%d added\n", cpu);
796 return 0;
797}
798
799static int mc_cpu_online(unsigned int cpu)
800{
801 struct device *dev = get_cpu_device(cpu);
799 802
800 if (sysfs_create_group(&dev->kobj, &mc_attr_group)) 803 if (sysfs_create_group(&dev->kobj, &mc_attr_group))
801 pr_err("Failed to create group for CPU%d\n", cpu); 804 pr_err("Failed to create group for CPU%d\n", cpu);
@@ -872,7 +875,9 @@ int __init microcode_init(void)
872 goto out_ucode_group; 875 goto out_ucode_group;
873 876
874 register_syscore_ops(&mc_syscore_ops); 877 register_syscore_ops(&mc_syscore_ops);
875 cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online", 878 cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
879 mc_cpu_starting, NULL);
880 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
876 mc_cpu_online, mc_cpu_down_prep); 881 mc_cpu_online, mc_cpu_down_prep);
877 882
878 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); 883 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 2131b8bbaad7..2f4824793798 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -796,8 +796,12 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
796 struct seq_file *seq, void *v) 796 struct seq_file *seq, void *v)
797{ 797{
798 struct rdt_resource *r = of->kn->parent->priv; 798 struct rdt_resource *r = of->kn->parent->priv;
799 u32 sw_shareable = 0, hw_shareable = 0; 799 /*
800 u32 exclusive = 0, pseudo_locked = 0; 800 * Use unsigned long even though only 32 bits are used to ensure
801 * test_bit() is used safely.
802 */
803 unsigned long sw_shareable = 0, hw_shareable = 0;
804 unsigned long exclusive = 0, pseudo_locked = 0;
801 struct rdt_domain *dom; 805 struct rdt_domain *dom;
802 int i, hwb, swb, excl, psl; 806 int i, hwb, swb, excl, psl;
803 enum rdtgrp_mode mode; 807 enum rdtgrp_mode mode;
@@ -842,10 +846,10 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
842 } 846 }
843 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 847 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
844 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 848 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
845 hwb = test_bit(i, (unsigned long *)&hw_shareable); 849 hwb = test_bit(i, &hw_shareable);
846 swb = test_bit(i, (unsigned long *)&sw_shareable); 850 swb = test_bit(i, &sw_shareable);
847 excl = test_bit(i, (unsigned long *)&exclusive); 851 excl = test_bit(i, &exclusive);
848 psl = test_bit(i, (unsigned long *)&pseudo_locked); 852 psl = test_bit(i, &pseudo_locked);
849 if (hwb && swb) 853 if (hwb && swb)
850 seq_putc(seq, 'X'); 854 seq_putc(seq, 'X');
851 else if (hwb && !swb) 855 else if (hwb && !swb)
@@ -2486,26 +2490,19 @@ out_destroy:
2486 */ 2490 */
2487static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r) 2491static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
2488{ 2492{
2489 /* 2493 unsigned long val = *_val;
2490 * Convert the u32 _val to an unsigned long required by all the bit
2491 * operations within this function. No more than 32 bits of this
2492 * converted value can be accessed because all bit operations are
2493 * additionally provided with cbm_len that is initialized during
2494 * hardware enumeration using five bits from the EAX register and
2495 * thus never can exceed 32 bits.
2496 */
2497 unsigned long *val = (unsigned long *)_val;
2498 unsigned int cbm_len = r->cache.cbm_len; 2494 unsigned int cbm_len = r->cache.cbm_len;
2499 unsigned long first_bit, zero_bit; 2495 unsigned long first_bit, zero_bit;
2500 2496
2501 if (*val == 0) 2497 if (val == 0)
2502 return; 2498 return;
2503 2499
2504 first_bit = find_first_bit(val, cbm_len); 2500 first_bit = find_first_bit(&val, cbm_len);
2505 zero_bit = find_next_zero_bit(val, cbm_len, first_bit); 2501 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2506 2502
2507 /* Clear any remaining bits to ensure contiguous region */ 2503 /* Clear any remaining bits to ensure contiguous region */
2508 bitmap_clear(val, zero_bit, cbm_len - zero_bit); 2504 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2505 *_val = (u32)val;
2509} 2506}
2510 2507
2511/* 2508/*
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0927bb158ffc..76228525acd0 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/memory.h>
25 26
26#include <trace/syscall.h> 27#include <trace/syscall.h>
27 28
@@ -34,16 +35,25 @@
34#ifdef CONFIG_DYNAMIC_FTRACE 35#ifdef CONFIG_DYNAMIC_FTRACE
35 36
36int ftrace_arch_code_modify_prepare(void) 37int ftrace_arch_code_modify_prepare(void)
38 __acquires(&text_mutex)
37{ 39{
40 /*
41 * Need to grab text_mutex to prevent a race from module loading
42 * and live kernel patching from changing the text permissions while
43 * ftrace has it set to "read/write".
44 */
45 mutex_lock(&text_mutex);
38 set_kernel_text_rw(); 46 set_kernel_text_rw();
39 set_all_modules_text_rw(); 47 set_all_modules_text_rw();
40 return 0; 48 return 0;
41} 49}
42 50
43int ftrace_arch_code_modify_post_process(void) 51int ftrace_arch_code_modify_post_process(void)
52 __releases(&text_mutex)
44{ 53{
45 set_all_modules_text_ro(); 54 set_all_modules_text_ro();
46 set_kernel_text_ro(); 55 set_kernel_text_ro();
56 mutex_unlock(&text_mutex);
47 return 0; 57 return 0;
48} 58}
49 59
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 16b1cbd3a61e..29ffa495bd1c 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr,
184 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); 184 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
185 185
186 if (la57) { 186 if (la57) {
187 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); 187 p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
188 physaddr);
188 189
189 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; 190 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
190 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; 191 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
191 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; 192 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
192 193
193 i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; 194 i = physaddr >> P4D_SHIFT;
194 p4d[i + 0] = (pgdval_t)pud + pgtable_flags; 195 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
195 p4d[i + 1] = (pgdval_t)pud + pgtable_flags; 196 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
196 } else { 197 } else {
197 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; 198 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
198 pgd[i + 0] = (pgdval_t)pud + pgtable_flags; 199 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
199 pgd[i + 1] = (pgdval_t)pud + pgtable_flags; 200 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
200 } 201 }
201 202
202 i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; 203 i = physaddr >> PUD_SHIFT;
203 pud[i + 0] = (pudval_t)pmd + pgtable_flags; 204 pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
204 pud[i + 1] = (pudval_t)pmd + pgtable_flags; 205 pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
205 206
206 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; 207 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
207 /* Filter out unsupported __PAGE_KERNEL_* bits: */ 208 /* Filter out unsupported __PAGE_KERNEL_* bits: */
@@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr,
211 pmd_entry += physaddr; 212 pmd_entry += physaddr;
212 213
213 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { 214 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
214 int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; 215 int idx = i + (physaddr >> PMD_SHIFT);
215 pmd[idx] = pmd_entry + i * PMD_SIZE; 216
217 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
216 } 218 }
217 219
218 /* 220 /*
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index 07c30ee17425..bb7e1132290b 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -74,6 +74,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
74 return regs_get_register(regs, pt_regs_offset[idx]); 74 return regs_get_register(regs, pt_regs_offset[idx]);
75} 75}
76 76
77#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \
78 ~((1ULL << PERF_REG_X86_MAX) - 1))
79
77#ifdef CONFIG_X86_32 80#ifdef CONFIG_X86_32
78#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ 81#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
79 (1ULL << PERF_REG_X86_R9) | \ 82 (1ULL << PERF_REG_X86_R9) | \
@@ -86,7 +89,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
86 89
87int perf_reg_validate(u64 mask) 90int perf_reg_validate(u64 mask)
88{ 91{
89 if (!mask || (mask & REG_NOSUPPORT)) 92 if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
90 return -EINVAL; 93 return -EINVAL;
91 94
92 return 0; 95 return 0;
@@ -112,7 +115,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
112 115
113int perf_reg_validate(u64 mask) 116int perf_reg_validate(u64 mask)
114{ 117{
115 if (!mask || (mask & REG_NOSUPPORT)) 118 if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
116 return -EINVAL; 119 return -EINVAL;
117 120
118 return 0; 121 return 0;
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 33b66b5c5aec..72b997eaa1fc 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -82,9 +82,9 @@ static struct orc_entry *orc_find(unsigned long ip);
82 * But they are copies of the ftrace entries that are static and 82 * But they are copies of the ftrace entries that are static and
83 * defined in ftrace_*.S, which do have orc entries. 83 * defined in ftrace_*.S, which do have orc entries.
84 * 84 *
85 * If the undwinder comes across a ftrace trampoline, then find the 85 * If the unwinder comes across a ftrace trampoline, then find the
86 * ftrace function that was used to create it, and use that ftrace 86 * ftrace function that was used to create it, and use that ftrace
87 * function's orc entrie, as the placement of the return code in 87 * function's orc entry, as the placement of the return code in
88 * the stack will be identical. 88 * the stack will be identical.
89 */ 89 */
90static struct orc_entry *orc_ftrace_find(unsigned long ip) 90static struct orc_entry *orc_ftrace_find(unsigned long ip)
@@ -128,6 +128,16 @@ static struct orc_entry null_orc_entry = {
128 .type = ORC_TYPE_CALL 128 .type = ORC_TYPE_CALL
129}; 129};
130 130
131/* Fake frame pointer entry -- used as a fallback for generated code */
132static struct orc_entry orc_fp_entry = {
133 .type = ORC_TYPE_CALL,
134 .sp_reg = ORC_REG_BP,
135 .sp_offset = 16,
136 .bp_reg = ORC_REG_PREV_SP,
137 .bp_offset = -16,
138 .end = 0,
139};
140
131static struct orc_entry *orc_find(unsigned long ip) 141static struct orc_entry *orc_find(unsigned long ip)
132{ 142{
133 static struct orc_entry *orc; 143 static struct orc_entry *orc;
@@ -392,8 +402,16 @@ bool unwind_next_frame(struct unwind_state *state)
392 * calls and calls to noreturn functions. 402 * calls and calls to noreturn functions.
393 */ 403 */
394 orc = orc_find(state->signal ? state->ip : state->ip - 1); 404 orc = orc_find(state->signal ? state->ip : state->ip - 1);
395 if (!orc) 405 if (!orc) {
396 goto err; 406 /*
407 * As a fallback, try to assume this code uses a frame pointer.
408 * This is useful for generated code, like BPF, which ORC
409 * doesn't know about. This is just a guess, so the rest of
410 * the unwind is no longer considered reliable.
411 */
412 orc = &orc_fp_entry;
413 state->error = true;
414 }
397 415
398 /* End-of-stack check for kernel threads: */ 416 /* End-of-stack check for kernel threads: */
399 if (orc->sp_reg == ORC_REG_UNDEFINED) { 417 if (orc->sp_reg == ORC_REG_UNDEFINED) {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a21c440ff356..4dabc318adb8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2339,7 +2339,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2339 struct kvm_lapic *apic = vcpu->arch.apic; 2339 struct kvm_lapic *apic = vcpu->arch.apic;
2340 u32 ppr; 2340 u32 ppr;
2341 2341
2342 if (!apic_enabled(apic)) 2342 if (!kvm_apic_hw_enabled(apic))
2343 return -1; 2343 return -1;
2344 2344
2345 __apic_update_ppr(apic, &ppr); 2345 __apic_update_ppr(apic, &ppr);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 5f9c1a200201..46af3a5e9209 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5240,9 +5240,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5240 vmx = to_vmx(vcpu); 5240 vmx = to_vmx(vcpu);
5241 vmcs12 = get_vmcs12(vcpu); 5241 vmcs12 = get_vmcs12(vcpu);
5242 5242
5243 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
5244 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5245
5246 if (nested_vmx_allowed(vcpu) && 5243 if (nested_vmx_allowed(vcpu) &&
5247 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 5244 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5248 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 5245 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
@@ -5251,6 +5248,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5251 if (vmx_has_valid_vmcs12(vcpu)) { 5248 if (vmx_has_valid_vmcs12(vcpu)) {
5252 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 5249 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
5253 5250
5251 if (vmx->nested.hv_evmcs)
5252 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5253
5254 if (is_guest_mode(vcpu) && 5254 if (is_guest_mode(vcpu) &&
5255 nested_cpu_has_shadow_vmcs(vmcs12) && 5255 nested_cpu_has_shadow_vmcs(vmcs12) &&
5256 vmcs12->vmcs_link_pointer != -1ull) 5256 vmcs12->vmcs_link_pointer != -1ull)
@@ -5350,6 +5350,15 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5350 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) 5350 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
5351 return -EINVAL; 5351 return -EINVAL;
5352 5352
5353 /*
5354 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
5355 * enable eVMCS capability on vCPU. However, since then
5356 * code was changed such that flag signals vmcs12 should
5357 * be copied into eVMCS in guest memory.
5358 *
5359 * To preserve backwards compatability, allow user
5360 * to set this flag even when there is no VMXON region.
5361 */
5353 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 5362 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
5354 return -EINVAL; 5363 return -EINVAL;
5355 } else { 5364 } else {
@@ -5358,7 +5367,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5358 5367
5359 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 5368 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
5360 return -EINVAL; 5369 return -EINVAL;
5361 } 5370 }
5362 5371
5363 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5372 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5364 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5373 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
@@ -5373,20 +5382,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5373 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 5382 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5374 * must be zero. 5383 * must be zero.
5375 */ 5384 */
5376 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->hdr.vmx.smm.flags) 5385 if (is_smm(vcpu) ?
5386 (kvm_state->flags &
5387 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
5388 : kvm_state->hdr.vmx.smm.flags)
5377 return -EINVAL; 5389 return -EINVAL;
5378 5390
5379 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5391 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5380 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 5392 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5381 return -EINVAL; 5393 return -EINVAL;
5382 5394
5383 vmx_leave_nested(vcpu); 5395 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
5384 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 5396 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
5385 if (!nested_vmx_allowed(vcpu))
5386 return -EINVAL; 5397 return -EINVAL;
5387 5398
5388 nested_enable_evmcs(vcpu, NULL); 5399 vmx_leave_nested(vcpu);
5389 }
5390 5400
5391 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) 5401 if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
5392 return 0; 5402 return 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9857992d4e58..fafd81d2c9ea 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1554,7 +1554,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1554 vcpu->arch.tsc_always_catchup = 1; 1554 vcpu->arch.tsc_always_catchup = 1;
1555 return 0; 1555 return 0;
1556 } else { 1556 } else {
1557 WARN(1, "user requested TSC rate below hardware speed\n"); 1557 pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
1558 return -1; 1558 return -1;
1559 } 1559 }
1560 } 1560 }
@@ -1564,8 +1564,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1564 user_tsc_khz, tsc_khz); 1564 user_tsc_khz, tsc_khz);
1565 1565
1566 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) { 1566 if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
1567 WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", 1567 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
1568 user_tsc_khz); 1568 user_tsc_khz);
1569 return -1; 1569 return -1;
1570 } 1570 }
1571 1571
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 693aaf28d5fe..0f01c7b1d217 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -671,23 +671,25 @@ static unsigned long __meminit
671phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, 671phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
672 unsigned long page_size_mask, bool init) 672 unsigned long page_size_mask, bool init)
673{ 673{
674 unsigned long paddr_next, paddr_last = paddr_end; 674 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last;
675 unsigned long vaddr = (unsigned long)__va(paddr); 675
676 int i = p4d_index(vaddr); 676 paddr_last = paddr_end;
677 vaddr = (unsigned long)__va(paddr);
678 vaddr_end = (unsigned long)__va(paddr_end);
677 679
678 if (!pgtable_l5_enabled()) 680 if (!pgtable_l5_enabled())
679 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, 681 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
680 page_size_mask, init); 682 page_size_mask, init);
681 683
682 for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { 684 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
683 p4d_t *p4d; 685 p4d_t *p4d = p4d_page + p4d_index(vaddr);
684 pud_t *pud; 686 pud_t *pud;
685 687
686 vaddr = (unsigned long)__va(paddr); 688 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE;
687 p4d = p4d_page + p4d_index(vaddr); 689 paddr = __pa(vaddr);
688 paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
689 690
690 if (paddr >= paddr_end) { 691 if (paddr >= paddr_end) {
692 paddr_next = __pa(vaddr_next);
691 if (!after_bootmem && 693 if (!after_bootmem &&
692 !e820__mapped_any(paddr & P4D_MASK, paddr_next, 694 !e820__mapped_any(paddr & P4D_MASK, paddr_next,
693 E820_TYPE_RAM) && 695 E820_TYPE_RAM) &&
@@ -699,13 +701,13 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
699 701
700 if (!p4d_none(*p4d)) { 702 if (!p4d_none(*p4d)) {
701 pud = pud_offset(p4d, 0); 703 pud = pud_offset(p4d, 0);
702 paddr_last = phys_pud_init(pud, paddr, paddr_end, 704 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
703 page_size_mask, init); 705 page_size_mask, init);
704 continue; 706 continue;
705 } 707 }
706 708
707 pud = alloc_low_page(); 709 pud = alloc_low_page();
708 paddr_last = phys_pud_init(pud, paddr, paddr_end, 710 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end),
709 page_size_mask, init); 711 page_size_mask, init);
710 712
711 spin_lock(&init_mm.page_table_lock); 713 spin_lock(&init_mm.page_table_lock);
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 632b83885867..3b9fd679cea9 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -728,7 +728,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
728 * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so 728 * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so
729 * page faulting on these addresses isn't expected. 729 * page faulting on these addresses isn't expected.
730 */ 730 */
731 if (phys_addr >= 0x0000 && phys_addr <= 0x0fff) 731 if (phys_addr <= 0x0fff)
732 return; 732 return;
733 733
734 /* 734 /*
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f8d430f88d25..f9269ae6da9c 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -240,7 +240,7 @@ static struct kmem_cache *bfq_pool;
240 * containing only random (seeky) I/O are prevented from being tagged 240 * containing only random (seeky) I/O are prevented from being tagged
241 * as soft real-time. 241 * as soft real-time.
242 */ 242 */
243#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history & -1) 243#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
244 244
245/* Min number of samples required to perform peak-rate update */ 245/* Min number of samples required to perform peak-rate update */
246#define BFQ_RATE_MIN_SAMPLES 32 246#define BFQ_RATE_MIN_SAMPLES 32
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 2489ddbb21db..3afe327f816f 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -934,6 +934,13 @@ void blk_mq_debugfs_register_sched(struct request_queue *q)
934{ 934{
935 struct elevator_type *e = q->elevator->type; 935 struct elevator_type *e = q->elevator->type;
936 936
937 /*
938 * If the parent directory has not been created yet, return, we will be
939 * called again later on and the directory/files will be created then.
940 */
941 if (!q->debugfs_dir)
942 return;
943
937 if (!e->queue_debugfs_attrs) 944 if (!e->queue_debugfs_attrs)
938 return; 945 return;
939 946
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 1ce1bf6d3bab..5f76c6e222c6 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -388,6 +388,7 @@ static void cryptd_skcipher_free(struct skcipher_instance *inst)
388 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); 388 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
389 389
390 crypto_drop_skcipher(&ctx->spawn); 390 crypto_drop_skcipher(&ctx->spawn);
391 kfree(inst);
391} 392}
392 393
393static int cryptd_create_skcipher(struct crypto_template *tmpl, 394static int cryptd_create_skcipher(struct crypto_template *tmpl,
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index d5d5d155340b..c65e39005ce2 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -44,6 +44,9 @@ struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
44 list_for_each_entry(q, &crypto_alg_list, cra_list) { 44 list_for_each_entry(q, &crypto_alg_list, cra_list) {
45 int match = 0; 45 int match = 0;
46 46
47 if (crypto_is_larval(q))
48 continue;
49
47 if ((q->cra_flags ^ p->cru_type) & p->cru_mask) 50 if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
48 continue; 51 continue;
49 52
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 40c8a552a478..4074886b7bc8 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -52,8 +52,9 @@ static const struct fb_var_screeninfo cfag12864bfb_var = {
52 52
53static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma) 53static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
54{ 54{
55 return vm_insert_page(vma, vma->vm_start, 55 struct page *pages = virt_to_page(cfag12864b_buffer);
56 virt_to_page(cfag12864b_buffer)); 56
57 return vm_map_pages_zero(vma, &pages, 1);
57} 58}
58 59
59static struct fb_ops cfag12864bfb_ops = { 60static struct fb_ops cfag12864bfb_ops = {
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 21393ec3b9a4..9c0bb771751d 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -223,9 +223,9 @@ static const struct backlight_ops ht16k33_bl_ops = {
223static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma) 223static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
224{ 224{
225 struct ht16k33_priv *priv = info->par; 225 struct ht16k33_priv *priv = info->par;
226 struct page *pages = virt_to_page(priv->fbdev.buffer);
226 227
227 return vm_insert_page(vma, vma->vm_start, 228 return vm_map_pages_zero(vma, &pages, 1);
228 virt_to_page(priv->fbdev.buffer));
229} 229}
230 230
231static struct fb_ops ht16k33_fb_ops = { 231static struct fb_ops ht16k33_fb_ops = {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index aa51756fd4d6..87b410d6e51d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -368,7 +368,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
368 const char *dev_id = dev ? dev_name(dev) : NULL; 368 const char *dev_id = dev ? dev_name(dev) : NULL;
369 struct device_node *np = core->of_node; 369 struct device_node *np = core->of_node;
370 370
371 if (np && index >= 0) 371 if (np && (name || index >= 0))
372 hw = of_clk_get_hw(np, index, name); 372 hw = of_clk_get_hw(np, index, name);
373 373
374 /* 374 /*
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 739f64fdf1e3..206fafd299ea 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -2734,8 +2734,8 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
2734 [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw, 2734 [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw,
2735 [CLKID_MALI_1] = &g12a_mali_1.hw, 2735 [CLKID_MALI_1] = &g12a_mali_1.hw,
2736 [CLKID_MALI] = &g12a_mali.hw, 2736 [CLKID_MALI] = &g12a_mali.hw,
2737 [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw, 2737 [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw,
2738 [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw, 2738 [CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
2739 [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw, 2739 [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
2740 [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw, 2740 [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
2741 [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw, 2741 [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 39c41af70804..bcc05cd9882f 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -166,7 +166,7 @@
166#define CLKID_HDMI_DIV 167 166#define CLKID_HDMI_DIV 167
167#define CLKID_MALI_0_DIV 170 167#define CLKID_MALI_0_DIV 170
168#define CLKID_MALI_1_DIV 173 168#define CLKID_MALI_1_DIV 173
169#define CLKID_MPLL_5OM_DIV 176 169#define CLKID_MPLL_50M_DIV 176
170#define CLKID_SYS_PLL_DIV16_EN 178 170#define CLKID_SYS_PLL_DIV16_EN 178
171#define CLKID_SYS_PLL_DIV16 179 171#define CLKID_SYS_PLL_DIV16 179
172#define CLKID_CPU_CLK_DYN0_SEL 180 172#define CLKID_CPU_CLK_DYN0_SEL 180
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 37cf0f01bb5d..62cd3a7f1f65 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1761,7 +1761,7 @@ static struct clk_regmap meson8m2_gp_pll = {
1761 }, 1761 },
1762}; 1762};
1763 1763
1764static const char * const mmeson8b_vpu_0_1_parent_names[] = { 1764static const char * const meson8b_vpu_0_1_parent_names[] = {
1765 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7" 1765 "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
1766}; 1766};
1767 1767
@@ -1778,8 +1778,8 @@ static struct clk_regmap meson8b_vpu_0_sel = {
1778 .hw.init = &(struct clk_init_data){ 1778 .hw.init = &(struct clk_init_data){
1779 .name = "vpu_0_sel", 1779 .name = "vpu_0_sel",
1780 .ops = &clk_regmap_mux_ops, 1780 .ops = &clk_regmap_mux_ops,
1781 .parent_names = mmeson8b_vpu_0_1_parent_names, 1781 .parent_names = meson8b_vpu_0_1_parent_names,
1782 .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), 1782 .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
1783 .flags = CLK_SET_RATE_PARENT, 1783 .flags = CLK_SET_RATE_PARENT,
1784 }, 1784 },
1785}; 1785};
@@ -1837,8 +1837,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
1837 .hw.init = &(struct clk_init_data){ 1837 .hw.init = &(struct clk_init_data){
1838 .name = "vpu_1_sel", 1838 .name = "vpu_1_sel",
1839 .ops = &clk_regmap_mux_ops, 1839 .ops = &clk_regmap_mux_ops,
1840 .parent_names = mmeson8b_vpu_0_1_parent_names, 1840 .parent_names = meson8b_vpu_0_1_parent_names,
1841 .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names), 1841 .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_names),
1842 .flags = CLK_SET_RATE_PARENT, 1842 .flags = CLK_SET_RATE_PARENT,
1843 }, 1843 },
1844}; 1844};
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 8281dfbf38c2..5bed36e12951 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -103,9 +103,9 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = {
103 { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux), 103 { STRATIX10_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
104 0, 0, 0, 0x3C, 1}, 104 0, 0, 0, 0x3C, 1},
105 { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux), 105 { STRATIX10_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
106 0, 0, 4, 0xB0, 0}, 106 0, 0, 2, 0xB0, 0},
107 { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), 107 { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
108 0, 0, 4, 0xB0, 1}, 108 0, 0, 2, 0xB0, 1},
109 { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, 109 { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
110 ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, 110 ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2},
111 { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, 111 { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index e1ba62d2b1a0..ac1d27a8c650 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3366,6 +3366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
3366 { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, 3366 { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
3367 { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, 3367 { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
3368 { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, 3368 { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
3369 { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
3370 { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
3369 /* This MUST be the last entry. */ 3371 /* This MUST be the last entry. */
3370 { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 }, 3372 { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
3371}; 3373};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 8e834317c97d..975995eea15c 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
229{ 229{
230 struct omap_clkctrl_provider *provider = data; 230 struct omap_clkctrl_provider *provider = data;
231 struct omap_clkctrl_clk *entry; 231 struct omap_clkctrl_clk *entry;
232 bool found = false;
232 233
233 if (clkspec->args_count != 2) 234 if (clkspec->args_count != 2)
234 return ERR_PTR(-EINVAL); 235 return ERR_PTR(-EINVAL);
@@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
238 239
239 list_for_each_entry(entry, &provider->clocks, node) { 240 list_for_each_entry(entry, &provider->clocks, node) {
240 if (entry->reg_offset == clkspec->args[0] && 241 if (entry->reg_offset == clkspec->args[0] &&
241 entry->bit_offset == clkspec->args[1]) 242 entry->bit_offset == clkspec->args[1]) {
243 found = true;
242 break; 244 break;
245 }
243 } 246 }
244 247
245 if (!entry) 248 if (!found)
246 return ERR_PTR(-EINVAL); 249 return ERR_PTR(-EINVAL);
247 250
248 return entry->clk; 251 return entry->clk;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 263bee76ef0d..6b8c4c458e8a 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -718,12 +718,13 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
718{ 718{
719 struct jz4780_dma_dev *jzdma = data; 719 struct jz4780_dma_dev *jzdma = data;
720 unsigned int nb_channels = jzdma->soc_data->nb_channels; 720 unsigned int nb_channels = jzdma->soc_data->nb_channels;
721 uint32_t pending, dmac; 721 unsigned long pending;
722 uint32_t dmac;
722 int i; 723 int i;
723 724
724 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); 725 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
725 726
726 for_each_set_bit(i, (unsigned long *)&pending, nb_channels) { 727 for_each_set_bit(i, &pending, nb_channels) {
727 if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i])) 728 if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
728 pending &= ~BIT(i); 729 pending &= ~BIT(i);
729 } 730 }
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 99d9f431ae2c..4ec84a633bd3 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -703,7 +703,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
703 spin_lock_irqsave(&sdma->channel_0_lock, flags); 703 spin_lock_irqsave(&sdma->channel_0_lock, flags);
704 704
705 bd0->mode.command = C0_SETPM; 705 bd0->mode.command = C0_SETPM;
706 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 706 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
707 bd0->mode.count = size / 2; 707 bd0->mode.count = size / 2;
708 bd0->buffer_addr = buf_phys; 708 bd0->buffer_addr = buf_phys;
709 bd0->ext_buffer_addr = address; 709 bd0->ext_buffer_addr = address;
@@ -1025,7 +1025,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
1025 context->gReg[7] = sdmac->watermark_level; 1025 context->gReg[7] = sdmac->watermark_level;
1026 1026
1027 bd0->mode.command = C0_SETDM; 1027 bd0->mode.command = C0_SETDM;
1028 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 1028 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1029 bd0->mode.count = sizeof(*context) / 4; 1029 bd0->mode.count = sizeof(*context) / 4;
1030 bd0->buffer_addr = sdma->context_phys; 1030 bd0->buffer_addr = sdma->context_phys;
1031 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 1031 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
@@ -2096,27 +2096,6 @@ static int sdma_probe(struct platform_device *pdev)
2096 if (pdata && pdata->script_addrs) 2096 if (pdata && pdata->script_addrs)
2097 sdma_add_scripts(sdma, pdata->script_addrs); 2097 sdma_add_scripts(sdma, pdata->script_addrs);
2098 2098
2099 if (pdata) {
2100 ret = sdma_get_firmware(sdma, pdata->fw_name);
2101 if (ret)
2102 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2103 } else {
2104 /*
2105 * Because that device tree does not encode ROM script address,
2106 * the RAM script in firmware is mandatory for device tree
2107 * probe, otherwise it fails.
2108 */
2109 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2110 &fw_name);
2111 if (ret)
2112 dev_warn(&pdev->dev, "failed to get firmware name\n");
2113 else {
2114 ret = sdma_get_firmware(sdma, fw_name);
2115 if (ret)
2116 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2117 }
2118 }
2119
2120 sdma->dma_device.dev = &pdev->dev; 2099 sdma->dma_device.dev = &pdev->dev;
2121 2100
2122 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 2101 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2161,6 +2140,33 @@ static int sdma_probe(struct platform_device *pdev)
2161 of_node_put(spba_bus); 2140 of_node_put(spba_bus);
2162 } 2141 }
2163 2142
2143 /*
2144 * Kick off firmware loading as the very last step:
2145 * attempt to load firmware only if we're not on the error path, because
2146 * the firmware callback requires a fully functional and allocated sdma
2147 * instance.
2148 */
2149 if (pdata) {
2150 ret = sdma_get_firmware(sdma, pdata->fw_name);
2151 if (ret)
2152 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2153 } else {
2154 /*
2155 * Because that device tree does not encode ROM script address,
2156 * the RAM script in firmware is mandatory for device tree
2157 * probe, otherwise it fails.
2158 */
2159 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2160 &fw_name);
2161 if (ret) {
2162 dev_warn(&pdev->dev, "failed to get firmware name\n");
2163 } else {
2164 ret = sdma_get_firmware(sdma, fw_name);
2165 if (ret)
2166 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2167 }
2168 }
2169
2164 return 0; 2170 return 0;
2165 2171
2166err_register: 2172err_register:
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4b43844f6af5..8e90a405939d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -799,6 +799,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
799 /* Number of bytes available to read */ 799 /* Number of bytes available to read */
800 avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); 800 avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
801 801
802 if (offset < bchan->head)
803 avail--;
804
802 list_for_each_entry_safe(async_desc, tmp, 805 list_for_each_entry_safe(async_desc, tmp,
803 &bchan->desc_list, desc_node) { 806 &bchan->desc_list, desc_node) {
804 /* Not enough data to read */ 807 /* Not enough data to read */
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index a2384184a7de..b07c17643210 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -47,11 +47,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
47 bgrt->version); 47 bgrt->version);
48 goto out; 48 goto out;
49 } 49 }
50 if (bgrt->status & 0xfe) {
51 pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
52 bgrt->status);
53 goto out;
54 }
55 if (bgrt->image_type != 0) { 50 if (bgrt->image_type != 0) {
56 pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", 51 pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
57 bgrt->image_type); 52 bgrt->image_type);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 16b2137d117c..4b7cf7bc0ded 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -1009,14 +1009,16 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1009 1009
1010 /* first try to find a slot in an existing linked list entry */ 1010 /* first try to find a slot in an existing linked list entry */
1011 for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) { 1011 for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
1012 rsv = __va(prsv); 1012 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1013 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size); 1013 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1014 if (index < rsv->size) { 1014 if (index < rsv->size) {
1015 rsv->entry[index].base = addr; 1015 rsv->entry[index].base = addr;
1016 rsv->entry[index].size = size; 1016 rsv->entry[index].size = size;
1017 1017
1018 memunmap(rsv);
1018 return 0; 1019 return 0;
1019 } 1020 }
1021 memunmap(rsv);
1020 } 1022 }
1021 1023
1022 /* no slot found - allocate a new linked list entry */ 1024 /* no slot found - allocate a new linked list entry */
@@ -1024,7 +1026,13 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1024 if (!rsv) 1026 if (!rsv)
1025 return -ENOMEM; 1027 return -ENOMEM;
1026 1028
1027 rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE); 1029 /*
1030 * The memremap() call above assumes that a linux_efi_memreserve entry
1031 * never crosses a page boundary, so let's ensure that this remains true
1032 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1033 * using SZ_4K explicitly in the size calculation below.
1034 */
1035 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1028 atomic_set(&rsv->count, 1); 1036 atomic_set(&rsv->count, 1);
1029 rsv->entry[0].base = addr; 1037 rsv->entry[0].base = addr;
1030 rsv->entry[0].size = size; 1038 rsv->entry[0].size = size;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 61e099826cbb..35dccc88ac0a 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -43,11 +43,13 @@ static int efibc_set_variable(const char *name, const char *value)
43 efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data); 43 efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
44 memcpy(&entry->var.VendorGuid, &guid, sizeof(guid)); 44 memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
45 45
46 ret = efivar_entry_set(entry, 46 ret = efivar_entry_set_safe(entry->var.VariableName,
47 EFI_VARIABLE_NON_VOLATILE 47 entry->var.VendorGuid,
48 | EFI_VARIABLE_BOOTSERVICE_ACCESS 48 EFI_VARIABLE_NON_VOLATILE
49 | EFI_VARIABLE_RUNTIME_ACCESS, 49 | EFI_VARIABLE_BOOTSERVICE_ACCESS
50 size, entry->var.Data, NULL); 50 | EFI_VARIABLE_RUNTIME_ACCESS,
51 false, size, entry->var.Data);
52
51 if (ret) 53 if (ret)
52 pr_err("failed to set %s EFI variable: 0x%x\n", 54 pr_err("failed to set %s EFI variable: 0x%x\n",
53 name, ret); 55 name, ret);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index aec7bd86ae7e..9c9b965d7d6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -118,8 +118,15 @@ static void of_gpio_flags_quirks(struct device_node *np,
118 * Legacy handling of SPI active high chip select. If we have a 118 * Legacy handling of SPI active high chip select. If we have a
119 * property named "cs-gpios" we need to inspect the child node 119 * property named "cs-gpios" we need to inspect the child node
120 * to determine if the flags should have inverted semantics. 120 * to determine if the flags should have inverted semantics.
121 *
122 * This does not apply to an SPI device named "spi-gpio", because
123 * these have traditionally obtained their own GPIOs by parsing
124 * the device tree directly and did not respect any "spi-cs-high"
125 * property on the SPI bus children.
121 */ 126 */
122 if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") && 127 if (IS_ENABLED(CONFIG_SPI_MASTER) &&
128 !strcmp(propname, "cs-gpios") &&
129 !of_device_is_compatible(np, "spi-gpio") &&
123 of_property_read_bool(np, "cs-gpios")) { 130 of_property_read_bool(np, "cs-gpios")) {
124 struct device_node *child; 131 struct device_node *child;
125 u32 cs; 132 u32 cs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b610e3b30d95..2f18c64d531f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1959,25 +1959,6 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
1959 mutex_unlock(&adev->srbm_mutex); 1959 mutex_unlock(&adev->srbm_mutex);
1960 1960
1961 gfx_v9_0_init_compute_vmid(adev); 1961 gfx_v9_0_init_compute_vmid(adev);
1962
1963 mutex_lock(&adev->grbm_idx_mutex);
1964 /*
1965 * making sure that the following register writes will be broadcasted
1966 * to all the shaders
1967 */
1968 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1969
1970 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1971 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1972 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1973 (adev->gfx.config.sc_prim_fifo_size_backend <<
1974 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1975 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1976 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1977 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1978 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1979 mutex_unlock(&adev->grbm_idx_mutex);
1980
1981} 1962}
1982 1963
1983static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 1964static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index f1d326caf69e..a7e8340baf90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -326,7 +326,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr)
326 if (ret) 326 if (ret)
327 return ret; 327 return ret;
328 328
329 ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); 329 ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
330 330
331 return ret; 331 return ret;
332} 332}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index ae64ff7153d6..1cd5a8b5cdc1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -916,8 +916,10 @@ static int init_thermal_controller(
916 PHM_PlatformCaps_ThermalController 916 PHM_PlatformCaps_ThermalController
917 ); 917 );
918 918
919 if (0 == powerplay_table->usFanTableOffset) 919 if (0 == powerplay_table->usFanTableOffset) {
920 hwmgr->thermal_controller.use_hw_fan_control = 1;
920 return 0; 921 return 0;
922 }
921 923
922 fan_table = (const PPTable_Generic_SubTable_Header *) 924 fan_table = (const PPTable_Generic_SubTable_Header *)
923 (((unsigned long)powerplay_table) + 925 (((unsigned long)powerplay_table) +
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index c92999aac07c..eccb26fddbd0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -694,6 +694,7 @@ struct pp_thermal_controller_info {
694 uint8_t ucType; 694 uint8_t ucType;
695 uint8_t ucI2cLine; 695 uint8_t ucI2cLine;
696 uint8_t ucI2cAddress; 696 uint8_t ucI2cAddress;
697 uint8_t use_hw_fan_control;
697 struct pp_fan_info fanInfo; 698 struct pp_fan_info fanInfo;
698 struct pp_advance_fan_control_parameters advanceFanControlParameters; 699 struct pp_advance_fan_control_parameters advanceFanControlParameters;
699}; 700};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2d4cfe14f72e..29e641c6a5db 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2092,6 +2092,10 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2092 return 0; 2092 return 0;
2093 } 2093 }
2094 2094
2095 /* use hardware fan control */
2096 if (hwmgr->thermal_controller.use_hw_fan_control)
2097 return 0;
2098
2095 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters. 2099 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
2096 usPWMMin * duty100; 2100 usPWMMin * duty100;
2097 do_div(tmp64, 10000); 2101 do_div(tmp64, 10000);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 72d01e873160..5418a1a87b2c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -760,7 +760,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
760 if (IS_ERR(gpu->cmdbuf_suballoc)) { 760 if (IS_ERR(gpu->cmdbuf_suballoc)) {
761 dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n"); 761 dev_err(gpu->dev, "Failed to create cmdbuf suballocator\n");
762 ret = PTR_ERR(gpu->cmdbuf_suballoc); 762 ret = PTR_ERR(gpu->cmdbuf_suballoc);
763 goto fail; 763 goto destroy_iommu;
764 } 764 }
765 765
766 /* Create buffer: */ 766 /* Create buffer: */
@@ -768,7 +768,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
768 PAGE_SIZE); 768 PAGE_SIZE);
769 if (ret) { 769 if (ret) {
770 dev_err(gpu->dev, "could not create command buffer\n"); 770 dev_err(gpu->dev, "could not create command buffer\n");
771 goto destroy_iommu; 771 goto destroy_suballoc;
772 } 772 }
773 773
774 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && 774 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
@@ -800,6 +800,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
800free_buffer: 800free_buffer:
801 etnaviv_cmdbuf_free(&gpu->buffer); 801 etnaviv_cmdbuf_free(&gpu->buffer);
802 gpu->buffer.suballoc = NULL; 802 gpu->buffer.suballoc = NULL;
803destroy_suballoc:
804 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
805 gpu->cmdbuf_suballoc = NULL;
803destroy_iommu: 806destroy_iommu:
804 etnaviv_iommu_destroy(gpu->mmu); 807 etnaviv_iommu_destroy(gpu->mmu);
805 gpu->mmu = NULL; 808 gpu->mmu = NULL;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 029fd8ec1857..f0d45ccc1aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1888,12 +1888,12 @@ static int ring_request_alloc(struct i915_request *request)
1888 */ 1888 */
1889 request->reserved_space += LEGACY_REQUEST_SIZE; 1889 request->reserved_space += LEGACY_REQUEST_SIZE;
1890 1890
1891 ret = switch_context(request); 1891 /* Unconditionally invalidate GPU caches and TLBs. */
1892 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1892 if (ret) 1893 if (ret)
1893 return ret; 1894 return ret;
1894 1895
1895 /* Unconditionally invalidate GPU caches and TLBs. */ 1896 ret = switch_context(request);
1896 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1897 if (ret) 1897 if (ret)
1898 return ret; 1898 return ret;
1899 1899
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9cc1d678674f..c436a28d50e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -91,14 +91,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
91 ipu_dc_disable(ipu); 91 ipu_dc_disable(ipu);
92 ipu_prg_disable(ipu); 92 ipu_prg_disable(ipu);
93 93
94 drm_crtc_vblank_off(crtc);
95
94 spin_lock_irq(&crtc->dev->event_lock); 96 spin_lock_irq(&crtc->dev->event_lock);
95 if (crtc->state->event) { 97 if (crtc->state->event && !crtc->state->active) {
96 drm_crtc_send_vblank_event(crtc, crtc->state->event); 98 drm_crtc_send_vblank_event(crtc, crtc->state->event);
97 crtc->state->event = NULL; 99 crtc->state->event = NULL;
98 } 100 }
99 spin_unlock_irq(&crtc->dev->event_lock); 101 spin_unlock_irq(&crtc->dev->event_lock);
100
101 drm_crtc_vblank_off(crtc);
102} 102}
103 103
104static void imx_drm_crtc_reset(struct drm_crtc *crtc) 104static void imx_drm_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index d11e2281dde6..7e43b25785f7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -63,7 +63,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
63 return 0; 63 return 0;
64 64
65err_free: 65err_free:
66 drm_gem_object_put_unlocked(&shmem->base); 66 drm_gem_handle_delete(file, args->handle);
67 return ret; 67 return ret;
68} 68}
69 69
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index e62fe24b1a2e..5bb0f0a084e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -619,11 +619,11 @@ static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
619 output = vgdev->outputs + scanout; 619 output = vgdev->outputs + scanout;
620 620
621 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); 621 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
622 drm_connector_update_edid_property(&output->conn, new_edid);
622 623
623 spin_lock(&vgdev->display_info_lock); 624 spin_lock(&vgdev->display_info_lock);
624 old_edid = output->edid; 625 old_edid = output->edid;
625 output->edid = new_edid; 626 output->edid = new_edid;
626 drm_connector_update_edid_property(&output->conn, output->edid);
627 spin_unlock(&vgdev->display_info_lock); 627 spin_unlock(&vgdev->display_info_lock);
628 628
629 kfree(old_edid); 629 kfree(old_edid);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index eac0c54c5970..b032d3899fa3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -80,6 +80,7 @@
80#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 80#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
81#define HID_DEVICE_ID_ALPS_U1 0x1215 81#define HID_DEVICE_ID_ALPS_U1 0x1215
82#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C 82#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
83#define HID_DEVICE_ID_ALPS_1222 0x1222
83 84
84 85
85#define USB_VENDOR_ID_AMI 0x046b 86#define USB_VENDOR_ID_AMI 0x046b
@@ -269,6 +270,7 @@
269#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d 270#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
270#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 271#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
271#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 272#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
273#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
272#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 274#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
273#define USB_DEVICE_ID_ASUS_AK1D 0x1125 275#define USB_DEVICE_ID_ASUS_AK1D 0x1125
274#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408 276#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
@@ -569,6 +571,7 @@
569 571
570#define USB_VENDOR_ID_HUION 0x256c 572#define USB_VENDOR_ID_HUION 0x256c
571#define USB_DEVICE_ID_HUION_TABLET 0x006e 573#define USB_DEVICE_ID_HUION_TABLET 0x006e
574#define USB_DEVICE_ID_HUION_HS64 0x006d
572 575
573#define USB_VENDOR_ID_IBM 0x04b3 576#define USB_VENDOR_ID_IBM 0x04b3
574#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100 577#define USB_DEVICE_ID_IBM_SCROLLPOINT_III 0x3100
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index e564bff86515..bfcf2ee58d14 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -30,6 +30,7 @@
30 30
31#define REPORT_ID_HIDPP_SHORT 0x10 31#define REPORT_ID_HIDPP_SHORT 0x10
32#define REPORT_ID_HIDPP_LONG 0x11 32#define REPORT_ID_HIDPP_LONG 0x11
33#define REPORT_ID_HIDPP_VERY_LONG 0x12
33 34
34#define HIDPP_REPORT_SHORT_LENGTH 7 35#define HIDPP_REPORT_SHORT_LENGTH 7
35#define HIDPP_REPORT_LONG_LENGTH 20 36#define HIDPP_REPORT_LONG_LENGTH 20
@@ -1242,7 +1243,8 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
1242 int ret; 1243 int ret;
1243 1244
1244 if ((buf[0] == REPORT_ID_HIDPP_SHORT) || 1245 if ((buf[0] == REPORT_ID_HIDPP_SHORT) ||
1245 (buf[0] == REPORT_ID_HIDPP_LONG)) { 1246 (buf[0] == REPORT_ID_HIDPP_LONG) ||
1247 (buf[0] == REPORT_ID_HIDPP_VERY_LONG)) {
1246 if (count < 2) 1248 if (count < 2)
1247 return -EINVAL; 1249 return -EINVAL;
1248 1250
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5df5dd56ecc8..b603c14d043b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1776,6 +1776,10 @@ static const struct hid_device_id mt_devices[] = {
1776 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, 1776 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1777 USB_VENDOR_ID_ALPS_JP, 1777 USB_VENDOR_ID_ALPS_JP,
1778 HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, 1778 HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
1779 { .driver_data = MT_CLS_WIN_8_DUAL,
1780 HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
1781 USB_VENDOR_ID_ALPS_JP,
1782 HID_DEVICE_ID_ALPS_1222) },
1779 1783
1780 /* Lenovo X1 TAB Gen 2 */ 1784 /* Lenovo X1 TAB Gen 2 */
1781 { .driver_data = MT_CLS_WIN_8_DUAL, 1785 { .driver_data = MT_CLS_WIN_8_DUAL,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e5ca6fe2ca57..671a285724f9 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -42,6 +42,7 @@ static const struct hid_device_id hid_quirks[] = {
42 { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET }, 42 { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
43 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT }, 43 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
44 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, 44 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
45 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL },
45 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT }, 46 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
46 { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD }, 47 { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
47 { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET }, 48 { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 8fe02d81265d..914fb527ae7a 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -369,6 +369,8 @@ static const struct hid_device_id uclogic_devices[] = {
369 USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, 369 USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
370 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, 370 { HID_USB_DEVICE(USB_VENDOR_ID_HUION,
371 USB_DEVICE_ID_HUION_TABLET) }, 371 USB_DEVICE_ID_HUION_TABLET) },
372 { HID_USB_DEVICE(USB_VENDOR_ID_HUION,
373 USB_DEVICE_ID_HUION_HS64) },
372 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, 374 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
373 USB_DEVICE_ID_HUION_TABLET) }, 375 USB_DEVICE_ID_HUION_TABLET) },
374 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, 376 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 0187c9f8fc22..273d784fff66 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -977,6 +977,8 @@ int uclogic_params_init(struct uclogic_params *params,
977 /* FALL THROUGH */ 977 /* FALL THROUGH */
978 case VID_PID(USB_VENDOR_ID_HUION, 978 case VID_PID(USB_VENDOR_ID_HUION,
979 USB_DEVICE_ID_HUION_TABLET): 979 USB_DEVICE_ID_HUION_TABLET):
980 case VID_PID(USB_VENDOR_ID_HUION,
981 USB_DEVICE_ID_HUION_HS64):
980 case VID_PID(USB_VENDOR_ID_UCLOGIC, 982 case VID_PID(USB_VENDOR_ID_UCLOGIC,
981 USB_DEVICE_ID_HUION_TABLET): 983 USB_DEVICE_ID_HUION_TABLET):
982 case VID_PID(USB_VENDOR_ID_UCLOGIC, 984 case VID_PID(USB_VENDOR_ID_UCLOGIC,
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 22ba21457035..aa2dbed30fc3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -816,9 +816,9 @@ static int load_fw_from_host(struct ishtp_cl_data *client_data)
816 goto end_err_fw_release; 816 goto end_err_fw_release;
817 817
818 release_firmware(fw); 818 release_firmware(fw);
819 kfree(filename);
820 dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n", 819 dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
821 filename); 820 filename);
821 kfree(filename);
822 return 0; 822 return 0;
823 823
824end_err_fw_release: 824end_err_fw_release:
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index c0487b34d2cf..6ba944b40fdb 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -891,7 +891,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
891 */ 891 */
892static int hid_ishtp_cl_suspend(struct device *device) 892static int hid_ishtp_cl_suspend(struct device *device)
893{ 893{
894 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 894 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
895 struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); 895 struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
896 struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl); 896 struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
897 897
@@ -912,7 +912,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
912 */ 912 */
913static int hid_ishtp_cl_resume(struct device *device) 913static int hid_ishtp_cl_resume(struct device *device)
914{ 914{
915 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 915 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
916 struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device); 916 struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
917 struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl); 917 struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
918 918
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 794e700d65f7..c47c3328a0f4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -471,7 +471,6 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
471 } 471 }
472 472
473 ishtp_device_ready = true; 473 ishtp_device_ready = true;
474 dev_set_drvdata(&device->dev, device);
475 474
476 return device; 475 return device;
477} 476}
@@ -640,6 +639,20 @@ void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
640EXPORT_SYMBOL(ishtp_get_drvdata); 639EXPORT_SYMBOL(ishtp_get_drvdata);
641 640
642/** 641/**
642 * ishtp_dev_to_cl_device() - get ishtp_cl_device instance from device instance
643 * @device: device instance
644 *
645 * Get ish_cl_device instance which embeds device instance in it.
646 *
647 * Return: pointer to ishtp_cl_device instance
648 */
649struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *device)
650{
651 return to_ishtp_cl_device(device);
652}
653EXPORT_SYMBOL(ishtp_dev_to_cl_device);
654
655/**
643 * ishtp_bus_new_client() - Create a new client 656 * ishtp_bus_new_client() - Create a new client
644 * @dev: ISHTP device instance 657 * @dev: ISHTP device instance
645 * 658 *
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index c67c961ab6cc..a4c1aacba1ff 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -89,8 +89,19 @@ static int csky_irq_set_affinity(struct irq_data *d,
89 if (cpu >= nr_cpu_ids) 89 if (cpu >= nr_cpu_ids)
90 return -EINVAL; 90 return -EINVAL;
91 91
92 /* Enable interrupt destination */ 92 /*
93 cpu |= BIT(31); 93 * The csky,mpintc could support auto irq deliver, but it only
94 * could deliver external irq to one cpu or all cpus. So it
95 * doesn't support deliver external irq to a group of cpus
96 * with cpu_mask.
97 * SO we only use auto deliver mode when affinity mask_val is
98 * equal to cpu_present_mask.
99 *
100 */
101 if (cpumask_equal(mask_val, cpu_present_mask))
102 cpu = 0;
103 else
104 cpu |= BIT(31);
94 105
95 writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset); 106 writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
96 107
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d29b44b677e4..35500801dc2b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
733} 733}
734 734
735static int its_wait_for_range_completion(struct its_node *its, 735static int its_wait_for_range_completion(struct its_node *its,
736 struct its_cmd_block *from, 736 u64 prev_idx,
737 struct its_cmd_block *to) 737 struct its_cmd_block *to)
738{ 738{
739 u64 rd_idx, from_idx, to_idx; 739 u64 rd_idx, to_idx, linear_idx;
740 u32 count = 1000000; /* 1s! */ 740 u32 count = 1000000; /* 1s! */
741 741
742 from_idx = its_cmd_ptr_to_offset(its, from); 742 /* Linearize to_idx if the command set has wrapped around */
743 to_idx = its_cmd_ptr_to_offset(its, to); 743 to_idx = its_cmd_ptr_to_offset(its, to);
744 if (to_idx < prev_idx)
745 to_idx += ITS_CMD_QUEUE_SZ;
746
747 linear_idx = prev_idx;
744 748
745 while (1) { 749 while (1) {
750 s64 delta;
751
746 rd_idx = readl_relaxed(its->base + GITS_CREADR); 752 rd_idx = readl_relaxed(its->base + GITS_CREADR);
747 753
748 /* Direct case */ 754 /*
749 if (from_idx < to_idx && rd_idx >= to_idx) 755 * Compute the read pointer progress, taking the
750 break; 756 * potential wrap-around into account.
757 */
758 delta = rd_idx - prev_idx;
759 if (rd_idx < prev_idx)
760 delta += ITS_CMD_QUEUE_SZ;
751 761
752 /* Wrapped case */ 762 linear_idx += delta;
753 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) 763 if (linear_idx >= to_idx)
754 break; 764 break;
755 765
756 count--; 766 count--;
757 if (!count) { 767 if (!count) {
758 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", 768 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
759 from_idx, to_idx, rd_idx); 769 to_idx, linear_idx);
760 return -1; 770 return -1;
761 } 771 }
772 prev_idx = rd_idx;
762 cpu_relax(); 773 cpu_relax();
763 udelay(1); 774 udelay(1);
764 } 775 }
@@ -775,6 +786,7 @@ void name(struct its_node *its, \
775 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ 786 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
776 synctype *sync_obj; \ 787 synctype *sync_obj; \
777 unsigned long flags; \ 788 unsigned long flags; \
789 u64 rd_idx; \
778 \ 790 \
779 raw_spin_lock_irqsave(&its->lock, flags); \ 791 raw_spin_lock_irqsave(&its->lock, flags); \
780 \ 792 \
@@ -796,10 +808,11 @@ void name(struct its_node *its, \
796 } \ 808 } \
797 \ 809 \
798post: \ 810post: \
811 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
799 next_cmd = its_post_commands(its); \ 812 next_cmd = its_post_commands(its); \
800 raw_spin_unlock_irqrestore(&its->lock, flags); \ 813 raw_spin_unlock_irqrestore(&its->lock, flags); \
801 \ 814 \
802 if (its_wait_for_range_completion(its, cmd, next_cmd)) \ 815 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
803 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ 816 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
804} 817}
805 818
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d32268cc1174..f3985469c221 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
388 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 388 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
389 cd = irq_data_get_irq_chip_data(d); 389 cd = irq_data_get_irq_chip_data(d);
390 390
391 write_gic_vl_map(intr, cd->map); 391 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
392 if (cd->mask) 392 if (cd->mask)
393 write_gic_vl_smask(BIT(intr)); 393 write_gic_vl_smask(BIT(intr));
394} 394}
@@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
517 spin_lock_irqsave(&gic_lock, flags); 517 spin_lock_irqsave(&gic_lock, flags);
518 for_each_online_cpu(cpu) { 518 for_each_online_cpu(cpu) {
519 write_gic_vl_other(mips_cm_vp_id(cpu)); 519 write_gic_vl_other(mips_cm_vp_id(cpu));
520 write_gic_vo_map(intr, map); 520 write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
521 } 521 }
522 spin_unlock_irqrestore(&gic_lock, flags); 522 spin_unlock_irqrestore(&gic_lock, flags);
523 523
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 011b60a49e3f..ef4d625d2d80 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
159 parent_fwspec.param[1] = vint_desc->vint_id; 159 parent_fwspec.param[1] = vint_desc->vint_id;
160 160
161 parent_virq = irq_create_fwspec_mapping(&parent_fwspec); 161 parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
162 if (parent_virq <= 0) { 162 if (parent_virq == 0) {
163 kfree(vint_desc); 163 kfree(vint_desc);
164 return ERR_PTR(parent_virq); 164 return ERR_PTR(-EINVAL);
165 } 165 }
166 vint_desc->parent_virq = parent_virq; 166 vint_desc->parent_virq = parent_virq;
167 167
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 352e803f566e..728733a514c7 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -140,8 +140,8 @@ static char __init *dm_parse_table_entry(struct dm_device *dev, char *str)
140 return ERR_PTR(-EINVAL); 140 return ERR_PTR(-EINVAL);
141 } 141 }
142 /* target_args */ 142 /* target_args */
143 dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL, 143 dev->target_args_array[n] = kstrndup(field[3], DM_MAX_STR_SIZE,
144 DM_MAX_STR_SIZE); 144 GFP_KERNEL);
145 if (!dev->target_args_array[n]) 145 if (!dev->target_args_array[n])
146 return ERR_PTR(-ENOMEM); 146 return ERR_PTR(-ENOMEM);
147 147
@@ -272,10 +272,10 @@ static int __init dm_init_init(void)
272 return 0; 272 return 0;
273 273
274 if (strlen(create) >= DM_MAX_STR_SIZE) { 274 if (strlen(create) >= DM_MAX_STR_SIZE) {
275 DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); 275 DMERR("Argument is too big. Limit is %d", DM_MAX_STR_SIZE);
276 return -EINVAL; 276 return -EINVAL;
277 } 277 }
278 str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE); 278 str = kstrndup(create, DM_MAX_STR_SIZE, GFP_KERNEL);
279 if (!str) 279 if (!str)
280 return -ENOMEM; 280 return -ENOMEM;
281 281
@@ -283,7 +283,7 @@ static int __init dm_init_init(void)
283 if (r) 283 if (r)
284 goto out; 284 goto out;
285 285
286 DMINFO("waiting for all devices to be available before creating mapped devices\n"); 286 DMINFO("waiting for all devices to be available before creating mapped devices");
287 wait_for_device_probe(); 287 wait_for_device_probe();
288 288
289 list_for_each_entry(dev, &devices, list) { 289 list_for_each_entry(dev, &devices, list) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 9ea2b0291f20..e549392e0ea5 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -60,6 +60,7 @@
60 60
61#define WRITE_LOG_VERSION 1ULL 61#define WRITE_LOG_VERSION 1ULL
62#define WRITE_LOG_MAGIC 0x6a736677736872ULL 62#define WRITE_LOG_MAGIC 0x6a736677736872ULL
63#define WRITE_LOG_SUPER_SECTOR 0
63 64
64/* 65/*
65 * The disk format for this is braindead simple. 66 * The disk format for this is braindead simple.
@@ -115,6 +116,7 @@ struct log_writes_c {
115 struct list_head logging_blocks; 116 struct list_head logging_blocks;
116 wait_queue_head_t wait; 117 wait_queue_head_t wait;
117 struct task_struct *log_kthread; 118 struct task_struct *log_kthread;
119 struct completion super_done;
118}; 120};
119 121
120struct pending_block { 122struct pending_block {
@@ -180,6 +182,14 @@ static void log_end_io(struct bio *bio)
180 bio_put(bio); 182 bio_put(bio);
181} 183}
182 184
185static void log_end_super(struct bio *bio)
186{
187 struct log_writes_c *lc = bio->bi_private;
188
189 complete(&lc->super_done);
190 log_end_io(bio);
191}
192
183/* 193/*
184 * Meant to be called if there is an error, it will free all the pages 194 * Meant to be called if there is an error, it will free all the pages
185 * associated with the block. 195 * associated with the block.
@@ -215,7 +225,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
215 bio->bi_iter.bi_size = 0; 225 bio->bi_iter.bi_size = 0;
216 bio->bi_iter.bi_sector = sector; 226 bio->bi_iter.bi_sector = sector;
217 bio_set_dev(bio, lc->logdev->bdev); 227 bio_set_dev(bio, lc->logdev->bdev);
218 bio->bi_end_io = log_end_io; 228 bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
229 log_end_super : log_end_io;
219 bio->bi_private = lc; 230 bio->bi_private = lc;
220 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 231 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
221 232
@@ -418,11 +429,18 @@ static int log_super(struct log_writes_c *lc)
418 super.nr_entries = cpu_to_le64(lc->logged_entries); 429 super.nr_entries = cpu_to_le64(lc->logged_entries);
419 super.sectorsize = cpu_to_le32(lc->sectorsize); 430 super.sectorsize = cpu_to_le32(lc->sectorsize);
420 431
421 if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { 432 if (write_metadata(lc, &super, sizeof(super), NULL, 0,
433 WRITE_LOG_SUPER_SECTOR)) {
422 DMERR("Couldn't write super"); 434 DMERR("Couldn't write super");
423 return -1; 435 return -1;
424 } 436 }
425 437
438 /*
439 * Super sector should be writen in-order, otherwise the
440 * nr_entries could be rewritten incorrectly by an old bio.
441 */
442 wait_for_completion_io(&lc->super_done);
443
426 return 0; 444 return 0;
427} 445}
428 446
@@ -531,6 +549,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
531 INIT_LIST_HEAD(&lc->unflushed_blocks); 549 INIT_LIST_HEAD(&lc->unflushed_blocks);
532 INIT_LIST_HEAD(&lc->logging_blocks); 550 INIT_LIST_HEAD(&lc->logging_blocks);
533 init_waitqueue_head(&lc->wait); 551 init_waitqueue_head(&lc->wait);
552 init_completion(&lc->super_done);
534 atomic_set(&lc->io_blocks, 0); 553 atomic_set(&lc->io_blocks, 0);
535 atomic_set(&lc->pending_blocks, 0); 554 atomic_set(&lc->pending_blocks, 0);
536 555
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 350cf0451456..ec8b27e20de3 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -561,7 +561,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
561 gfp = GFP_NOIO; 561 gfp = GFP_NOIO;
562 } 562 }
563 argv = kmalloc_array(new_size, sizeof(*argv), gfp); 563 argv = kmalloc_array(new_size, sizeof(*argv), gfp);
564 if (argv) { 564 if (argv && old_argv) {
565 memcpy(argv, old_argv, *size * sizeof(*argv)); 565 memcpy(argv, old_argv, *size * sizeof(*argv));
566 *size = new_size; 566 *size = new_size;
567 } 567 }
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 720d06531aa3..ea24ff0612e3 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -235,8 +235,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
235 BUG(); 235 BUG();
236 } 236 }
237 237
238 DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str, 238 DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
239 block); 239 type_str, block);
240 240
241 if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) 241 if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
242 DMERR("%s: reached maximum errors", v->data_dev->name); 242 DMERR("%s: reached maximum errors", v->data_dev->name);
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index fe8efba2d45f..857991cb3cbb 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -204,12 +204,11 @@ static struct irq_chip stmfx_irq_chip = {
204static irqreturn_t stmfx_irq_handler(int irq, void *data) 204static irqreturn_t stmfx_irq_handler(int irq, void *data)
205{ 205{
206 struct stmfx *stmfx = data; 206 struct stmfx *stmfx = data;
207 unsigned long n, pending; 207 unsigned long bits;
208 u32 ack; 208 u32 pending, ack;
209 int ret; 209 int n, ret;
210 210
211 ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, 211 ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, &pending);
212 (u32 *)&pending);
213 if (ret) 212 if (ret)
214 return IRQ_NONE; 213 return IRQ_NONE;
215 214
@@ -224,7 +223,8 @@ static irqreturn_t stmfx_irq_handler(int irq, void *data)
224 return IRQ_NONE; 223 return IRQ_NONE;
225 } 224 }
226 225
227 for_each_set_bit(n, &pending, STMFX_REG_IRQ_SRC_MAX) 226 bits = pending;
227 for_each_set_bit(n, &bits, STMFX_REG_IRQ_SRC_MAX)
228 handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n)); 228 handle_nested_irq(irq_find_mapping(stmfx->irq_domain, n));
229 229
230 return IRQ_HANDLED; 230 return IRQ_HANDLED;
diff --git a/drivers/mtd/nand/raw/ingenic/Kconfig b/drivers/mtd/nand/raw/ingenic/Kconfig
index 19a96ce515c1..66b7cffdb0c2 100644
--- a/drivers/mtd/nand/raw/ingenic/Kconfig
+++ b/drivers/mtd/nand/raw/ingenic/Kconfig
@@ -16,7 +16,7 @@ config MTD_NAND_JZ4780
16if MTD_NAND_JZ4780 16if MTD_NAND_JZ4780
17 17
18config MTD_NAND_INGENIC_ECC 18config MTD_NAND_INGENIC_ECC
19 tristate 19 bool
20 20
21config MTD_NAND_JZ4740_ECC 21config MTD_NAND_JZ4740_ECC
22 tristate "Hardware BCH support for JZ4740 SoC" 22 tristate "Hardware BCH support for JZ4740 SoC"
diff --git a/drivers/mtd/nand/raw/ingenic/Makefile b/drivers/mtd/nand/raw/ingenic/Makefile
index 1ac4f455baea..b63d36889263 100644
--- a/drivers/mtd/nand/raw/ingenic/Makefile
+++ b/drivers/mtd/nand/raw/ingenic/Makefile
@@ -2,7 +2,9 @@
2obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o 2obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
3obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o 3obj-$(CONFIG_MTD_NAND_JZ4780) += ingenic_nand.o
4 4
5obj-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o 5ingenic_nand-y += ingenic_nand_drv.o
6ingenic_nand-$(CONFIG_MTD_NAND_INGENIC_ECC) += ingenic_ecc.o
7
6obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o 8obj-$(CONFIG_MTD_NAND_JZ4740_ECC) += jz4740_ecc.o
7obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o 9obj-$(CONFIG_MTD_NAND_JZ4725B_BCH) += jz4725b_bch.o
8obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o 10obj-$(CONFIG_MTD_NAND_JZ4780_BCH) += jz4780_bch.o
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index d3e085c5685a..c954189606f6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -30,7 +30,6 @@ int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
30{ 30{
31 return ecc->ops->calculate(ecc, params, buf, ecc_code); 31 return ecc->ops->calculate(ecc, params, buf, ecc_code);
32} 32}
33EXPORT_SYMBOL(ingenic_ecc_calculate);
34 33
35/** 34/**
36 * ingenic_ecc_correct() - detect and correct bit errors 35 * ingenic_ecc_correct() - detect and correct bit errors
@@ -51,7 +50,6 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
51{ 50{
52 return ecc->ops->correct(ecc, params, buf, ecc_code); 51 return ecc->ops->correct(ecc, params, buf, ecc_code);
53} 52}
54EXPORT_SYMBOL(ingenic_ecc_correct);
55 53
56/** 54/**
57 * ingenic_ecc_get() - get the ECC controller device 55 * ingenic_ecc_get() - get the ECC controller device
@@ -111,7 +109,6 @@ struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *of_node)
111 } 109 }
112 return ecc; 110 return ecc;
113} 111}
114EXPORT_SYMBOL(of_ingenic_ecc_get);
115 112
116/** 113/**
117 * ingenic_ecc_release() - release the ECC controller device 114 * ingenic_ecc_release() - release the ECC controller device
@@ -122,7 +119,6 @@ void ingenic_ecc_release(struct ingenic_ecc *ecc)
122 clk_disable_unprepare(ecc->clk); 119 clk_disable_unprepare(ecc->clk);
123 put_device(ecc->dev); 120 put_device(ecc->dev);
124} 121}
125EXPORT_SYMBOL(ingenic_ecc_release);
126 122
127int ingenic_ecc_probe(struct platform_device *pdev) 123int ingenic_ecc_probe(struct platform_device *pdev)
128{ 124{
@@ -159,8 +155,3 @@ int ingenic_ecc_probe(struct platform_device *pdev)
159 return 0; 155 return 0;
160} 156}
161EXPORT_SYMBOL(ingenic_ecc_probe); 157EXPORT_SYMBOL(ingenic_ecc_probe);
162
163MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
164MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
165MODULE_DESCRIPTION("Ingenic ECC common driver");
166MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index d7b7c0f13909..d7b7c0f13909 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index b5b68aa16eb3..6eb131292eb2 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -4662,7 +4662,6 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4662 memorg = nanddev_get_memorg(&chip->base); 4662 memorg = nanddev_get_memorg(&chip->base);
4663 memorg->planes_per_lun = 1; 4663 memorg->planes_per_lun = 1;
4664 memorg->luns_per_target = 1; 4664 memorg->luns_per_target = 1;
4665 memorg->ntargets = 1;
4666 4665
4667 /* 4666 /*
4668 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) 4667 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
@@ -5027,6 +5026,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5027 if (ret) 5026 if (ret)
5028 return ret; 5027 return ret;
5029 5028
5029 memorg->ntargets = maxchips;
5030
5030 /* Read the flash type */ 5031 /* Read the flash type */
5031 ret = nand_detect(chip, table); 5032 ret = nand_detect(chip, table);
5032 if (ret) { 5033 if (ret) {
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index b021a5720b42..89773293c64d 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -51,6 +51,7 @@
51#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) 51#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
52#define NFC_REG_SPARE_AREA 0x00A0 52#define NFC_REG_SPARE_AREA 0x00A0
53#define NFC_REG_PAT_ID 0x00A4 53#define NFC_REG_PAT_ID 0x00A4
54#define NFC_REG_MDMA_CNT 0x00C4
54#define NFC_RAM0_BASE 0x0400 55#define NFC_RAM0_BASE 0x0400
55#define NFC_RAM1_BASE 0x0800 56#define NFC_RAM1_BASE 0x0800
56 57
@@ -69,6 +70,7 @@
69#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8) 70#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
70#define NFC_SAM BIT(12) 71#define NFC_SAM BIT(12)
71#define NFC_RAM_METHOD BIT(14) 72#define NFC_RAM_METHOD BIT(14)
73#define NFC_DMA_TYPE_NORMAL BIT(15)
72#define NFC_DEBUG_CTL BIT(31) 74#define NFC_DEBUG_CTL BIT(31)
73 75
74/* define bit use in NFC_ST */ 76/* define bit use in NFC_ST */
@@ -205,14 +207,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
205 * NAND Controller capabilities structure: stores NAND controller capabilities 207 * NAND Controller capabilities structure: stores NAND controller capabilities
206 * for distinction between compatible strings. 208 * for distinction between compatible strings.
207 * 209 *
208 * @sram_through_ahb: On A23, we choose to access the internal RAM through AHB 210 * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM
209 * instead of MBUS (less configuration). A10, A10s, A13 and 211 * through MBUS on A23/A33 needs extra configuration.
210 * A20 use the MBUS but no extra configuration is needed.
211 * @reg_io_data: I/O data register 212 * @reg_io_data: I/O data register
212 * @dma_maxburst: DMA maxburst 213 * @dma_maxburst: DMA maxburst
213 */ 214 */
214struct sunxi_nfc_caps { 215struct sunxi_nfc_caps {
215 bool sram_through_ahb; 216 bool extra_mbus_conf;
216 unsigned int reg_io_data; 217 unsigned int reg_io_data;
217 unsigned int dma_maxburst; 218 unsigned int dma_maxburst;
218}; 219};
@@ -368,28 +369,12 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf,
368 goto err_unmap_buf; 369 goto err_unmap_buf;
369 } 370 }
370 371
371 /* 372 writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
372 * On A23, we suppose the "internal RAM" (p.12 of the NFC user manual) 373 nfc->regs + NFC_REG_CTL);
373 * refers to the NAND controller's internal SRAM. This memory is mapped
374 * and so is accessible from the AHB. It seems that it can also be
375 * accessed by the MBUS. MBUS accesses are mandatory when using the
376 * internal DMA instead of the external DMA engine.
377 *
378 * During DMA I/O operation, either we access this memory from the AHB
379 * by clearing the NFC_RAM_METHOD bit, or we set the bit and use the
380 * MBUS. In this case, we should also configure the MBUS DMA length
381 * NFC_REG_MDMA_CNT(0xC4) to be chunksize * nchunks. NAND I/O over MBUS
382 * are also limited to 32kiB pages.
383 */
384 if (nfc->caps->sram_through_ahb)
385 writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
386 nfc->regs + NFC_REG_CTL);
387 else
388 writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
389 nfc->regs + NFC_REG_CTL);
390
391 writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); 374 writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
392 writel(chunksize, nfc->regs + NFC_REG_CNT); 375 writel(chunksize, nfc->regs + NFC_REG_CNT);
376 if (nfc->caps->extra_mbus_conf)
377 writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT);
393 378
394 dmat = dmaengine_submit(dmad); 379 dmat = dmaengine_submit(dmad);
395 380
@@ -2151,6 +2136,11 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
2151 dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; 2136 dmac_cfg.src_maxburst = nfc->caps->dma_maxburst;
2152 dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; 2137 dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst;
2153 dmaengine_slave_config(nfc->dmac, &dmac_cfg); 2138 dmaengine_slave_config(nfc->dmac, &dmac_cfg);
2139
2140 if (nfc->caps->extra_mbus_conf)
2141 writel(readl(nfc->regs + NFC_REG_CTL) |
2142 NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL);
2143
2154 } else { 2144 } else {
2155 dev_warn(dev, "failed to request rxtx DMA channel\n"); 2145 dev_warn(dev, "failed to request rxtx DMA channel\n");
2156 } 2146 }
@@ -2200,7 +2190,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
2200}; 2190};
2201 2191
2202static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = { 2192static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = {
2203 .sram_through_ahb = true, 2193 .extra_mbus_conf = true,
2204 .reg_io_data = NFC_REG_A23_IO_DATA, 2194 .reg_io_data = NFC_REG_A23_IO_DATA,
2205 .dma_maxburst = 8, 2195 .dma_maxburst = 8,
2206}; 2196};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e5586390026a..e6c646007cda 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -180,7 +180,7 @@ static const struct spinand_info gigadevice_spinand_table[] = {
180 SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, 180 SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
181 gd5fxgq4xa_ecc_get_status)), 181 gd5fxgq4xa_ecc_get_status)),
182 SPINAND_INFO("GD5F4GQ4xA", 0xF4, 182 SPINAND_INFO("GD5F4GQ4xA", 0xF4,
183 NAND_MEMORG(1, 2048, 64, 64, 4096, 40, 1, 1, 1), 183 NAND_MEMORG(1, 2048, 64, 64, 4096, 80, 1, 1, 1),
184 NAND_ECCREQ(8, 512), 184 NAND_ECCREQ(8, 512),
185 SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 185 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
186 &write_cache_variants, 186 &write_cache_variants,
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 6502727049a8..21def3f8fb36 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -100,7 +100,7 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
100 100
101static const struct spinand_info macronix_spinand_table[] = { 101static const struct spinand_info macronix_spinand_table[] = {
102 SPINAND_INFO("MX35LF1GE4AB", 0x12, 102 SPINAND_INFO("MX35LF1GE4AB", 0x12,
103 NAND_MEMORG(1, 2048, 64, 64, 1024, 40, 1, 1, 1), 103 NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
104 NAND_ECCREQ(4, 512), 104 NAND_ECCREQ(4, 512),
105 SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 105 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
106 &write_cache_variants, 106 &write_cache_variants,
@@ -109,7 +109,7 @@ static const struct spinand_info macronix_spinand_table[] = {
109 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, 109 SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
110 mx35lf1ge4ab_ecc_get_status)), 110 mx35lf1ge4ab_ecc_get_status)),
111 SPINAND_INFO("MX35LF2GE4AB", 0x22, 111 SPINAND_INFO("MX35LF2GE4AB", 0x22,
112 NAND_MEMORG(1, 2048, 64, 64, 2048, 20, 2, 1, 1), 112 NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
113 NAND_ECCREQ(4, 512), 113 NAND_ECCREQ(4, 512),
114 SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 114 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
115 &write_cache_variants, 115 &write_cache_variants,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 73172d7f512b..0c2ec1c21434 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1636,6 +1636,95 @@ static int sr2_bit7_quad_enable(struct spi_nor *nor)
1636 return 0; 1636 return 0;
1637} 1637}
1638 1638
1639/**
1640 * spi_nor_clear_sr_bp() - clear the Status Register Block Protection bits.
1641 * @nor: pointer to a 'struct spi_nor'
1642 *
1643 * Read-modify-write function that clears the Block Protection bits from the
1644 * Status Register without affecting other bits.
1645 *
1646 * Return: 0 on success, -errno otherwise.
1647 */
1648static int spi_nor_clear_sr_bp(struct spi_nor *nor)
1649{
1650 int ret;
1651 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1652
1653 ret = read_sr(nor);
1654 if (ret < 0) {
1655 dev_err(nor->dev, "error while reading status register\n");
1656 return ret;
1657 }
1658
1659 write_enable(nor);
1660
1661 ret = write_sr(nor, ret & ~mask);
1662 if (ret) {
1663 dev_err(nor->dev, "write to status register failed\n");
1664 return ret;
1665 }
1666
1667 ret = spi_nor_wait_till_ready(nor);
1668 if (ret)
1669 dev_err(nor->dev, "timeout while writing status register\n");
1670 return ret;
1671}
1672
1673/**
1674 * spi_nor_spansion_clear_sr_bp() - clear the Status Register Block Protection
1675 * bits on spansion flashes.
1676 * @nor: pointer to a 'struct spi_nor'
1677 *
1678 * Read-modify-write function that clears the Block Protection bits from the
1679 * Status Register without affecting other bits. The function is tightly
1680 * coupled with the spansion_quad_enable() function. Both assume that the Write
1681 * Register with 16 bits, together with the Read Configuration Register (35h)
1682 * instructions are supported.
1683 *
1684 * Return: 0 on success, -errno otherwise.
1685 */
1686static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1687{
1688 int ret;
1689 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1690 u8 sr_cr[2] = {0};
1691
1692 /* Check current Quad Enable bit value. */
1693 ret = read_cr(nor);
1694 if (ret < 0) {
1695 dev_err(nor->dev,
1696 "error while reading configuration register\n");
1697 return ret;
1698 }
1699
1700 /*
1701 * When the configuration register Quad Enable bit is one, only the
1702 * Write Status (01h) command with two data bytes may be used.
1703 */
1704 if (ret & CR_QUAD_EN_SPAN) {
1705 sr_cr[1] = ret;
1706
1707 ret = read_sr(nor);
1708 if (ret < 0) {
1709 dev_err(nor->dev,
1710 "error while reading status register\n");
1711 return ret;
1712 }
1713 sr_cr[0] = ret & ~mask;
1714
1715 ret = write_sr_cr(nor, sr_cr);
1716 if (ret)
1717 dev_err(nor->dev, "16-bit write register failed\n");
1718 return ret;
1719 }
1720
1721 /*
1722 * If the Quad Enable bit is zero, use the Write Status (01h) command
1723 * with one data byte.
1724 */
1725 return spi_nor_clear_sr_bp(nor);
1726}
1727
1639/* Used when the "_ext_id" is two bytes at most */ 1728/* Used when the "_ext_id" is two bytes at most */
1640#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ 1729#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
1641 .id = { \ 1730 .id = { \
@@ -3660,6 +3749,8 @@ static int spi_nor_init_params(struct spi_nor *nor,
3660 default: 3749 default:
3661 /* Kept only for backward compatibility purpose. */ 3750 /* Kept only for backward compatibility purpose. */
3662 params->quad_enable = spansion_quad_enable; 3751 params->quad_enable = spansion_quad_enable;
3752 if (nor->clear_sr_bp)
3753 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
3663 break; 3754 break;
3664 } 3755 }
3665 3756
@@ -3912,17 +4003,13 @@ static int spi_nor_init(struct spi_nor *nor)
3912{ 4003{
3913 int err; 4004 int err;
3914 4005
3915 /* 4006 if (nor->clear_sr_bp) {
3916 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up 4007 err = nor->clear_sr_bp(nor);
3917 * with the software protection bits set 4008 if (err) {
3918 */ 4009 dev_err(nor->dev,
3919 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || 4010 "fail to clear block protection bits\n");
3920 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || 4011 return err;
3921 JEDEC_MFR(nor->info) == SNOR_MFR_SST || 4012 }
3922 nor->info->flags & SPI_NOR_HAS_LOCK) {
3923 write_enable(nor);
3924 write_sr(nor, 0);
3925 spi_nor_wait_till_ready(nor);
3926 } 4013 }
3927 4014
3928 if (nor->quad_enable) { 4015 if (nor->quad_enable) {
@@ -4047,6 +4134,16 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
4047 if (info->flags & SPI_S3AN) 4134 if (info->flags & SPI_S3AN)
4048 nor->flags |= SNOR_F_READY_XSR_RDY; 4135 nor->flags |= SNOR_F_READY_XSR_RDY;
4049 4136
4137 /*
4138 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
4139 * with the software protection bits set.
4140 */
4141 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
4142 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
4143 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
4144 nor->info->flags & SPI_NOR_HAS_LOCK)
4145 nor->clear_sr_bp = spi_nor_clear_sr_bp;
4146
4050 /* Parse the Serial Flash Discoverable Parameters table. */ 4147 /* Parse the Serial Flash Discoverable Parameters table. */
4051 ret = spi_nor_init_params(nor, &params); 4148 ret = spi_nor_init_params(nor, &params);
4052 if (ret) 4149 if (ret)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 407f4095a37a..799fc38c5c34 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4320,12 +4320,12 @@ void bond_setup(struct net_device *bond_dev)
4320 bond_dev->features |= NETIF_F_NETNS_LOCAL; 4320 bond_dev->features |= NETIF_F_NETNS_LOCAL;
4321 4321
4322 bond_dev->hw_features = BOND_VLAN_FEATURES | 4322 bond_dev->hw_features = BOND_VLAN_FEATURES |
4323 NETIF_F_HW_VLAN_CTAG_TX |
4324 NETIF_F_HW_VLAN_CTAG_RX | 4323 NETIF_F_HW_VLAN_CTAG_RX |
4325 NETIF_F_HW_VLAN_CTAG_FILTER; 4324 NETIF_F_HW_VLAN_CTAG_FILTER;
4326 4325
4327 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; 4326 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
4328 bond_dev->features |= bond_dev->hw_features; 4327 bond_dev->features |= bond_dev->hw_features;
4328 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
4329} 4329}
4330 4330
4331/* Destroy a bonding device. 4331/* Destroy a bonding device.
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index f46086fa9064..db91b213eae1 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -436,9 +436,9 @@ int ksz_switch_register(struct ksz_device *dev,
436 return PTR_ERR(dev->reset_gpio); 436 return PTR_ERR(dev->reset_gpio);
437 437
438 if (dev->reset_gpio) { 438 if (dev->reset_gpio) {
439 gpiod_set_value(dev->reset_gpio, 1); 439 gpiod_set_value_cansleep(dev->reset_gpio, 1);
440 mdelay(10); 440 mdelay(10);
441 gpiod_set_value(dev->reset_gpio, 0); 441 gpiod_set_value_cansleep(dev->reset_gpio, 0);
442 } 442 }
443 443
444 mutex_init(&dev->dev_mutex); 444 mutex_init(&dev->dev_mutex);
@@ -487,7 +487,7 @@ void ksz_switch_remove(struct ksz_device *dev)
487 dsa_unregister_switch(dev->ds); 487 dsa_unregister_switch(dev->ds);
488 488
489 if (dev->reset_gpio) 489 if (dev->reset_gpio)
490 gpiod_set_value(dev->reset_gpio, 1); 490 gpiod_set_value_cansleep(dev->reset_gpio, 1);
491 491
492} 492}
493EXPORT_SYMBOL(ksz_switch_remove); 493EXPORT_SYMBOL(ksz_switch_remove);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 18bc035da850..1fff462a4175 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -843,9 +843,14 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
843 return err; 843 return err;
844 844
845 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { 845 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
846 if (hweight < AQ_VLAN_MAX_FILTERS) 846 if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
847 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true); 847 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
848 !(aq_nic->packet_filter & IFF_PROMISC));
849 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
850 } else {
848 /* otherwise left in promiscue mode */ 851 /* otherwise left in promiscue mode */
852 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
853 }
849 } 854 }
850 855
851 return err; 856 return err;
@@ -866,6 +871,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
866 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl)) 871 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
867 return -EOPNOTSUPP; 872 return -EOPNOTSUPP;
868 873
874 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
869 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false); 875 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
870 if (err) 876 if (err)
871 return err; 877 return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0da5e161ec5d..41172fbebddd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -126,6 +126,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
126 126
127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk; 127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
128 cfg->features = cfg->aq_hw_caps->hw_features; 128 cfg->features = cfg->aq_hw_caps->hw_features;
129 cfg->is_vlan_force_promisc = true;
129} 130}
130 131
131static int aq_nic_update_link_status(struct aq_nic_s *self) 132static int aq_nic_update_link_status(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index eb2e3c7c36f9..0f22f5d5691b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,6 +35,7 @@ struct aq_nic_cfg_s {
35 u32 flow_control; 35 u32 flow_control;
36 u32 link_speed_msk; 36 u32 link_speed_msk;
37 u32 wol; 37 u32 wol;
38 bool is_vlan_force_promisc;
38 u16 is_mc_list_enabled; 39 u16 is_mc_list_enabled;
39 u16 mc_list_count; 40 u16 mc_list_count;
40 bool is_autoneg; 41 bool is_autoneg;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 1c7593d54035..13ac2661a473 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -778,8 +778,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
778 unsigned int packet_filter) 778 unsigned int packet_filter)
779{ 779{
780 unsigned int i = 0U; 780 unsigned int i = 0U;
781 struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
782
783 hw_atl_rpfl2promiscuous_mode_en_set(self,
784 IS_FILTER_ENABLED(IFF_PROMISC));
785
786 hw_atl_rpf_vlan_prom_mode_en_set(self,
787 IS_FILTER_ENABLED(IFF_PROMISC) ||
788 cfg->is_vlan_force_promisc);
781 789
782 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
783 hw_atl_rpfl2multicast_flr_en_set(self, 790 hw_atl_rpfl2multicast_flr_en_set(self,
784 IS_FILTER_ENABLED(IFF_ALLMULTI), 0); 791 IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
785 792
@@ -788,13 +795,13 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
788 795
789 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); 796 hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
790 797
791 self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); 798 cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
792 799
793 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i) 800 for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
794 hw_atl_rpfl2_uc_flr_en_set(self, 801 hw_atl_rpfl2_uc_flr_en_set(self,
795 (self->aq_nic_cfg->is_mc_list_enabled && 802 (cfg->is_mc_list_enabled &&
796 (i <= self->aq_nic_cfg->mc_list_count)) ? 803 (i <= cfg->mc_list_count)) ?
797 1U : 0U, i); 804 1U : 0U, i);
798 805
799 return aq_hw_err_from_flags(self); 806 return aq_hw_err_from_flags(self);
800} 807}
@@ -1086,7 +1093,7 @@ static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
1086static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) 1093static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
1087{ 1094{
1088 /* set promisc in case of disabing the vland filter */ 1095 /* set promisc in case of disabing the vland filter */
1089 hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable); 1096 hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
1090 1097
1091 return aq_hw_err_from_flags(self); 1098 return aq_hw_err_from_flags(self);
1092} 1099}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2375a13bb446..262a28ff81fc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4180,7 +4180,7 @@ static int macb_probe(struct platform_device *pdev)
4180 if (PTR_ERR(mac) == -EPROBE_DEFER) { 4180 if (PTR_ERR(mac) == -EPROBE_DEFER) {
4181 err = -EPROBE_DEFER; 4181 err = -EPROBE_DEFER;
4182 goto err_out_free_netdev; 4182 goto err_out_free_netdev;
4183 } else if (!IS_ERR(mac)) { 4183 } else if (!IS_ERR_OR_NULL(mac)) {
4184 ether_addr_copy(bp->dev->dev_addr, mac); 4184 ether_addr_copy(bp->dev->dev_addr, mac);
4185 } else { 4185 } else {
4186 macb_get_hwaddr(bp); 4186 macb_get_hwaddr(bp);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8a6785173228..492f8769ac12 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -891,7 +891,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
891 u64 *data) 891 u64 *data)
892{ 892{
893 struct be_adapter *adapter = netdev_priv(netdev); 893 struct be_adapter *adapter = netdev_priv(netdev);
894 int status; 894 int status, cnt;
895 u8 link_status = 0; 895 u8 link_status = 0;
896 896
897 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { 897 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
@@ -902,6 +902,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
902 902
903 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 903 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
904 904
905 /* check link status before offline tests */
906 link_status = netif_carrier_ok(netdev);
907
905 if (test->flags & ETH_TEST_FL_OFFLINE) { 908 if (test->flags & ETH_TEST_FL_OFFLINE) {
906 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) 909 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
907 test->flags |= ETH_TEST_FL_FAILED; 910 test->flags |= ETH_TEST_FL_FAILED;
@@ -922,13 +925,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
922 test->flags |= ETH_TEST_FL_FAILED; 925 test->flags |= ETH_TEST_FL_FAILED;
923 } 926 }
924 927
925 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); 928 /* link status was down prior to test */
926 if (status) { 929 if (!link_status) {
927 test->flags |= ETH_TEST_FL_FAILED;
928 data[4] = -1;
929 } else if (!link_status) {
930 test->flags |= ETH_TEST_FL_FAILED; 930 test->flags |= ETH_TEST_FL_FAILED;
931 data[4] = 1; 931 data[4] = 1;
932 return;
933 }
934
935 for (cnt = 10; cnt; cnt--) {
936 status = be_cmd_link_status_query(adapter, NULL, &link_status,
937 0);
938 if (status) {
939 test->flags |= ETH_TEST_FL_FAILED;
940 data[4] = -1;
941 break;
942 }
943
944 if (link_status)
945 break;
946
947 msleep_interruptible(500);
932 } 948 }
933} 949}
934 950
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 67f9bb6e941b..9b036c857b1d 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
1057 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 1057 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
1058 1058
1059 /* Enable all known interrupts by setting the interrupt mask. */ 1059 /* Enable all known interrupts by setting the interrupt mask. */
1060 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); 1060 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
1061 sw32(cr, RxENA | sr32(cr)); 1061 sw32(cr, RxENA | sr32(cr));
1062 sw32(ier, IE); 1062 sw32(ier, IE);
1063 1063
@@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1578 sw32(txdp, sis_priv->tx_ring_dma); 1578 sw32(txdp, sis_priv->tx_ring_dma);
1579 1579
1580 /* Enable all known interrupts by setting the interrupt mask. */ 1580 /* Enable all known interrupts by setting the interrupt mask. */
1581 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); 1581 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
1582} 1582}
1583 1583
1584/** 1584/**
@@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1618 spin_unlock_irqrestore(&sis_priv->lock, flags); 1618 spin_unlock_irqrestore(&sis_priv->lock, flags);
1619 return NETDEV_TX_OK; 1619 return NETDEV_TX_OK;
1620 } 1620 }
1621 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); 1621 sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
1622 sw32(cr, TxENA | sr32(cr)); 1622 sw32(cr, TxENA | sr32(cr));
1623 1623
1624 sis_priv->cur_tx ++; 1624 sis_priv->cur_tx ++;
@@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1674 do { 1674 do {
1675 status = sr32(isr); 1675 status = sr32(isr);
1676 1676
1677 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) 1677 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
1678 /* nothing intresting happened */ 1678 /* nothing intresting happened */
1679 break; 1679 break;
1680 handled = 1; 1680 handled = 1;
@@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1684 /* Rx interrupt */ 1684 /* Rx interrupt */
1685 sis900_rx(net_dev); 1685 sis900_rx(net_dev);
1686 1686
1687 if (status & (TxURN | TxERR | TxIDLE)) 1687 if (status & (TxURN | TxERR | TxIDLE | TxDESC))
1688 /* Tx interrupt */ 1688 /* Tx interrupt */
1689 sis900_finish_xmit(net_dev); 1689 sis900_finish_xmit(net_dev);
1690 1690
@@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
1896 1896
1897 if (tx_status & OWN) { 1897 if (tx_status & OWN) {
1898 /* The packet is not transmitted yet (owned by hardware) ! 1898 /* The packet is not transmitted yet (owned by hardware) !
1899 * Note: the interrupt is generated only when Tx Machine 1899 * Note: this is an almost impossible condition
1900 * is idle, so this is an almost impossible case */ 1900 * in case of TxDESC ('descriptor interrupt') */
1901 break; 1901 break;
1902 } 1902 }
1903 1903
@@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
2473 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 2473 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2474 2474
2475 /* Enable all known interrupts by setting the interrupt mask. */ 2475 /* Enable all known interrupts by setting the interrupt mask. */
2476 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); 2476 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
2477 sw32(cr, RxENA | sr32(cr)); 2477 sw32(cr, RxENA | sr32(cr));
2478 sw32(ier, IE); 2478 sw32(ier, IE);
2479 2479
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 2dcdf761d525..020159622559 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -112,7 +112,7 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
112 * programmed with (2^32 – <new_sec_value>) 112 * programmed with (2^32 – <new_sec_value>)
113 */ 113 */
114 if (gmac4) 114 if (gmac4)
115 sec = (100000000ULL - sec); 115 sec = -sec;
116 116
117 value = readl(ioaddr + PTP_TCR); 117 value = readl(ioaddr + PTP_TCR);
118 if (value & PTP_TCR_TSCTRLSSR) 118 if (value & PTP_TCR_TSCTRLSSR)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 06dd51f47cfd..06358fe5b245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2947,12 +2947,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2947 2947
2948 /* Manage tx mitigation */ 2948 /* Manage tx mitigation */
2949 tx_q->tx_count_frames += nfrags + 1; 2949 tx_q->tx_count_frames += nfrags + 1;
2950 if (priv->tx_coal_frames <= tx_q->tx_count_frames) { 2950 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2951 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2952 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2953 priv->hwts_tx_en)) {
2954 stmmac_tx_timer_arm(priv, queue);
2955 } else {
2956 tx_q->tx_count_frames = 0;
2951 stmmac_set_tx_ic(priv, desc); 2957 stmmac_set_tx_ic(priv, desc);
2952 priv->xstats.tx_set_ic_bit++; 2958 priv->xstats.tx_set_ic_bit++;
2953 tx_q->tx_count_frames = 0;
2954 } else {
2955 stmmac_tx_timer_arm(priv, queue);
2956 } 2959 }
2957 2960
2958 skb_tx_timestamp(skb); 2961 skb_tx_timestamp(skb);
@@ -3166,12 +3169,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3166 * element in case of no SG. 3169 * element in case of no SG.
3167 */ 3170 */
3168 tx_q->tx_count_frames += nfrags + 1; 3171 tx_q->tx_count_frames += nfrags + 1;
3169 if (priv->tx_coal_frames <= tx_q->tx_count_frames) { 3172 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3173 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3174 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3175 priv->hwts_tx_en)) {
3176 stmmac_tx_timer_arm(priv, queue);
3177 } else {
3178 tx_q->tx_count_frames = 0;
3170 stmmac_set_tx_ic(priv, desc); 3179 stmmac_set_tx_ic(priv, desc);
3171 priv->xstats.tx_set_ic_bit++; 3180 priv->xstats.tx_set_ic_bit++;
3172 tx_q->tx_count_frames = 0;
3173 } else {
3174 stmmac_tx_timer_arm(priv, queue);
3175 } 3181 }
3176 3182
3177 skb_tx_timestamp(skb); 3183 skb_tx_timestamp(skb);
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index ff61dd8748de..66c8e65f6872 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
63MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support"); 63MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
64MODULE_LICENSE("Dual BSD/GPL"); 64MODULE_LICENSE("Dual BSD/GPL");
65MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); 65MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
66MODULE_SOFTDEP("pre: arc4");
66MODULE_VERSION("1.0.2"); 67MODULE_VERSION("1.0.2");
67 68
68static unsigned int 69static unsigned int
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b48006e7fa2f..36916bf51ee6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2128,12 +2128,12 @@ static void team_setup(struct net_device *dev)
2128 dev->features |= NETIF_F_NETNS_LOCAL; 2128 dev->features |= NETIF_F_NETNS_LOCAL;
2129 2129
2130 dev->hw_features = TEAM_VLAN_FEATURES | 2130 dev->hw_features = TEAM_VLAN_FEATURES |
2131 NETIF_F_HW_VLAN_CTAG_TX |
2132 NETIF_F_HW_VLAN_CTAG_RX | 2131 NETIF_F_HW_VLAN_CTAG_RX |
2133 NETIF_F_HW_VLAN_CTAG_FILTER; 2132 NETIF_F_HW_VLAN_CTAG_FILTER;
2134 2133
2135 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; 2134 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2136 dev->features |= dev->hw_features; 2135 dev->features |= dev->hw_features;
2136 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2137} 2137}
2138 2138
2139static int team_newlink(struct net *src_net, struct net_device *dev, 2139static int team_newlink(struct net *src_net, struct net_device *dev,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d080f8048e52..8b4ad10cf940 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1482,7 +1482,7 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1482 * different. Ignore the current interface if the number of endpoints 1482 * different. Ignore the current interface if the number of endpoints
1483 * equals the number for the diag interface (two). 1483 * equals the number for the diag interface (two).
1484 */ 1484 */
1485 info = (void *)&id->driver_info; 1485 info = (void *)id->driver_info;
1486 1486
1487 if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) { 1487 if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
1488 if (desc->bNumEndpoints == 2) 1488 if (desc->bNumEndpoints == 2)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 11b9525dff27..311b0cc6eb98 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -350,8 +350,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
350{ 350{
351 struct dst_entry *dst = skb_dst(skb); 351 struct dst_entry *dst = skb_dst(skb);
352 struct net_device *dev = dst->dev; 352 struct net_device *dev = dst->dev;
353 const struct in6_addr *nexthop;
353 struct neighbour *neigh; 354 struct neighbour *neigh;
354 struct in6_addr *nexthop;
355 int ret; 355 int ret;
356 356
357 nf_reset(skb); 357 nf_reset(skb);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 98af9ecd4a90..ca3793002e2f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -859,7 +859,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
859 pci_dev->bus->self->skip_bus_pm = true; 859 pci_dev->bus->self->skip_bus_pm = true;
860 } 860 }
861 861
862 if (pci_dev->skip_bus_pm && !pm_suspend_via_firmware()) { 862 if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
863 dev_dbg(dev, "PCI PM: Skipped\n"); 863 dev_dbg(dev, "PCI PM: Skipped\n");
864 goto Fixup; 864 goto Fixup;
865 } 865 }
@@ -914,10 +914,10 @@ static int pci_pm_resume_noirq(struct device *dev)
914 /* 914 /*
915 * In the suspend-to-idle case, devices left in D0 during suspend will 915 * In the suspend-to-idle case, devices left in D0 during suspend will
916 * stay in D0, so it is not necessary to restore or update their 916 * stay in D0, so it is not necessary to restore or update their
917 * configuration here and attempting to put them into D0 again may 917 * configuration here and attempting to put them into D0 again is
918 * confuse some firmware, so avoid doing that. 918 * pointless, so avoid doing that.
919 */ 919 */
920 if (!pci_dev->skip_bus_pm || pm_suspend_via_firmware()) 920 if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
921 pci_pm_default_resume_early(pci_dev); 921 pci_pm_default_resume_early(pci_dev);
922 922
923 pci_fixup_device(pci_fixup_resume_early, pci_dev); 923 pci_fixup_device(pci_fixup_resume_early, pci_dev);
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index f464f8cd274b..7e526bcf5e0b 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d)
113 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, 113 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
114 eint->regs->mask_set); 114 eint->regs->mask_set);
115 115
116 eint->cur_mask[d->hwirq >> 5] &= ~mask;
117
116 writel(mask, reg); 118 writel(mask, reg);
117} 119}
118 120
@@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d)
123 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, 125 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
124 eint->regs->mask_clr); 126 eint->regs->mask_clr);
125 127
128 eint->cur_mask[d->hwirq >> 5] |= mask;
129
126 writel(mask, reg); 130 writel(mask, reg);
127 131
128 if (eint->dual_edge[d->hwirq]) 132 if (eint->dual_edge[d->hwirq])
@@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
217 } 221 }
218} 222}
219 223
220static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
221 void __iomem *base, u32 *buf)
222{
223 int port;
224 void __iomem *reg;
225
226 for (port = 0; port < eint->hw->ports; port++) {
227 reg = base + eint->regs->mask + (port << 2);
228 buf[port] = ~readl_relaxed(reg);
229 /* Mask is 0 when irq is enabled, and 1 when disabled. */
230 }
231}
232
233static int mtk_eint_irq_request_resources(struct irq_data *d) 224static int mtk_eint_irq_request_resources(struct irq_data *d)
234{ 225{
235 struct mtk_eint *eint = irq_data_get_irq_chip_data(d); 226 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
@@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
318 struct irq_chip *chip = irq_desc_get_chip(desc); 309 struct irq_chip *chip = irq_desc_get_chip(desc);
319 struct mtk_eint *eint = irq_desc_get_handler_data(desc); 310 struct mtk_eint *eint = irq_desc_get_handler_data(desc);
320 unsigned int status, eint_num; 311 unsigned int status, eint_num;
321 int offset, index, virq; 312 int offset, mask_offset, index, virq;
322 void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat); 313 void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
323 int dual_edge, start_level, curr_level; 314 int dual_edge, start_level, curr_level;
324 315
@@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
328 status = readl(reg); 319 status = readl(reg);
329 while (status) { 320 while (status) {
330 offset = __ffs(status); 321 offset = __ffs(status);
322 mask_offset = eint_num >> 5;
331 index = eint_num + offset; 323 index = eint_num + offset;
332 virq = irq_find_mapping(eint->domain, index); 324 virq = irq_find_mapping(eint->domain, index);
333 status &= ~BIT(offset); 325 status &= ~BIT(offset);
334 326
327 /*
328 * If we get an interrupt on pin that was only required
329 * for wake (but no real interrupt requested), mask the
330 * interrupt (as would mtk_eint_resume do anyway later
331 * in the resume sequence).
332 */
333 if (eint->wake_mask[mask_offset] & BIT(offset) &&
334 !(eint->cur_mask[mask_offset] & BIT(offset))) {
335 writel_relaxed(BIT(offset), reg -
336 eint->regs->stat +
337 eint->regs->mask_set);
338 }
339
335 dual_edge = eint->dual_edge[index]; 340 dual_edge = eint->dual_edge[index];
336 if (dual_edge) { 341 if (dual_edge) {
337 /* 342 /*
@@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
370 375
371int mtk_eint_do_suspend(struct mtk_eint *eint) 376int mtk_eint_do_suspend(struct mtk_eint *eint)
372{ 377{
373 mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
374 mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask); 378 mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
375 379
376 return 0; 380 return 0;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 568ca96cdb6d..3a235487e38d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -771,6 +771,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
771 if (ret < 0) 771 if (ret < 0)
772 goto fail; 772 goto fail;
773 773
774 ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
775 if (ret < 0)
776 goto fail;
777
774 mcp->irq_controller = 778 mcp->irq_controller =
775 device_property_read_bool(dev, "interrupt-controller"); 779 device_property_read_bool(dev, "interrupt-controller");
776 if (mcp->irq && mcp->irq_controller) { 780 if (mcp->irq && mcp->irq_controller) {
@@ -812,10 +816,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
812 goto fail; 816 goto fail;
813 } 817 }
814 818
815 ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
816 if (ret < 0)
817 goto fail;
818
819 if (one_regmap_config) { 819 if (one_regmap_config) {
820 mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL, 820 mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
821 "mcp23xxx-pinctrl.%d", raw_chip_address); 821 "mcp23xxx-pinctrl.%d", raw_chip_address);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 3b4ca52d2456..fb76fb2e9ea5 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -396,7 +396,7 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info,
396 return -1; 396 return -1;
397} 397}
398 398
399#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32))) 399#define REG_ALT(msb, info, p) (OCELOT_GPIO_ALT0 * (info)->stride + 4 * ((msb) + ((info)->stride * ((p) / 32))))
400 400
401static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev, 401static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
402 unsigned int selector, unsigned int group) 402 unsigned int selector, unsigned int group)
@@ -412,19 +412,21 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
412 412
413 /* 413 /*
414 * f is encoded on two bits. 414 * f is encoded on two bits.
415 * bit 0 of f goes in BIT(pin) of ALT0, bit 1 of f goes in BIT(pin) of 415 * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of
416 * ALT1 416 * ALT[1]
417 * This is racy because both registers can't be updated at the same time 417 * This is racy because both registers can't be updated at the same time
418 * but it doesn't matter much for now. 418 * but it doesn't matter much for now.
419 */ 419 */
420 regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, pin->pin), 420 regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
421 BIT(p), f << p); 421 BIT(p), f << p);
422 regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, pin->pin), 422 regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
423 BIT(p), f << (p - 1)); 423 BIT(p), f << (p - 1));
424 424
425 return 0; 425 return 0;
426} 426}
427 427
428#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
429
428static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev, 430static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
429 struct pinctrl_gpio_range *range, 431 struct pinctrl_gpio_range *range,
430 unsigned int pin, bool input) 432 unsigned int pin, bool input)
@@ -432,7 +434,7 @@ static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
432 struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); 434 struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
433 unsigned int p = pin % 32; 435 unsigned int p = pin % 32;
434 436
435 regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, p), BIT(p), 437 regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p),
436 input ? 0 : BIT(p)); 438 input ? 0 : BIT(p));
437 439
438 return 0; 440 return 0;
@@ -445,9 +447,9 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev,
445 struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); 447 struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
446 unsigned int p = offset % 32; 448 unsigned int p = offset % 32;
447 449
448 regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT0, info, offset), 450 regmap_update_bits(info->map, REG_ALT(0, info, offset),
449 BIT(p), 0); 451 BIT(p), 0);
450 regmap_update_bits(info->map, REG(OCELOT_GPIO_ALT1, info, offset), 452 regmap_update_bits(info->map, REG_ALT(1, info, offset),
451 BIT(p), 0); 453 BIT(p), 0);
452 454
453 return 0; 455 return 0;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ecee4b3ff073..377b07b2feeb 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
763 struct pvscsi_adapter *adapter = shost_priv(host); 763 struct pvscsi_adapter *adapter = shost_priv(host);
764 struct pvscsi_ctx *ctx; 764 struct pvscsi_ctx *ctx;
765 unsigned long flags; 765 unsigned long flags;
766 unsigned char op;
766 767
767 spin_lock_irqsave(&adapter->hw_lock, flags); 768 spin_lock_irqsave(&adapter->hw_lock, flags);
768 769
@@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
775 } 776 }
776 777
777 cmd->scsi_done = done; 778 cmd->scsi_done = done;
779 op = cmd->cmnd[0];
778 780
779 dev_dbg(&cmd->device->sdev_gendev, 781 dev_dbg(&cmd->device->sdev_gendev,
780 "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); 782 "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
781 783
782 spin_unlock_irqrestore(&adapter->hw_lock, flags); 784 spin_unlock_irqrestore(&adapter->hw_lock, flags);
783 785
784 pvscsi_kick_io(adapter, cmd->cmnd[0]); 786 pvscsi_kick_io(adapter, op);
785 787
786 return 0; 788 return 0;
787} 789}
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 524ecdc2a9bb..2ec355003524 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
22obj-$(CONFIG_SOC_SAMSUNG) += samsung/ 22obj-$(CONFIG_SOC_SAMSUNG) += samsung/
23obj-y += sunxi/ 23obj-y += sunxi/
24obj-$(CONFIG_ARCH_TEGRA) += tegra/ 24obj-$(CONFIG_ARCH_TEGRA) += tegra/
25obj-$(CONFIG_SOC_TI) += ti/ 25obj-y += ti/
26obj-$(CONFIG_ARCH_U8500) += ux500/ 26obj-$(CONFIG_ARCH_U8500) += ux500/
27obj-$(CONFIG_PLAT_VERSATILE) += versatile/ 27obj-$(CONFIG_PLAT_VERSATILE) += versatile/
28obj-y += xilinx/ 28obj-y += xilinx/
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index ea0859f7b185..d7d50d48d05d 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -75,10 +75,10 @@ config TI_SCI_PM_DOMAINS
75 called ti_sci_pm_domains. Note this is needed early in boot before 75 called ti_sci_pm_domains. Note this is needed early in boot before
76 rootfs may be available. 76 rootfs may be available.
77 77
78endif # SOC_TI
79
78config TI_SCI_INTA_MSI_DOMAIN 80config TI_SCI_INTA_MSI_DOMAIN
79 bool 81 bool
80 select GENERIC_MSI_IRQ_DOMAIN 82 select GENERIC_MSI_IRQ_DOMAIN
81 help 83 help
82 Driver to enable Interrupt Aggregator specific MSI Domain. 84 Driver to enable Interrupt Aggregator specific MSI Domain.
83
84endif # SOC_TI
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index b6e4862cc242..51ddca2033e0 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -81,6 +81,12 @@ out:
81 return CHAP_DIGEST_UNKNOWN; 81 return CHAP_DIGEST_UNKNOWN;
82} 82}
83 83
84static void chap_close(struct iscsi_conn *conn)
85{
86 kfree(conn->auth_protocol);
87 conn->auth_protocol = NULL;
88}
89
84static struct iscsi_chap *chap_server_open( 90static struct iscsi_chap *chap_server_open(
85 struct iscsi_conn *conn, 91 struct iscsi_conn *conn,
86 struct iscsi_node_auth *auth, 92 struct iscsi_node_auth *auth,
@@ -118,7 +124,7 @@ static struct iscsi_chap *chap_server_open(
118 case CHAP_DIGEST_UNKNOWN: 124 case CHAP_DIGEST_UNKNOWN:
119 default: 125 default:
120 pr_err("Unsupported CHAP_A value\n"); 126 pr_err("Unsupported CHAP_A value\n");
121 kfree(conn->auth_protocol); 127 chap_close(conn);
122 return NULL; 128 return NULL;
123 } 129 }
124 130
@@ -133,19 +139,13 @@ static struct iscsi_chap *chap_server_open(
133 * Generate Challenge. 139 * Generate Challenge.
134 */ 140 */
135 if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { 141 if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
136 kfree(conn->auth_protocol); 142 chap_close(conn);
137 return NULL; 143 return NULL;
138 } 144 }
139 145
140 return chap; 146 return chap;
141} 147}
142 148
143static void chap_close(struct iscsi_conn *conn)
144{
145 kfree(conn->auth_protocol);
146 conn->auth_protocol = NULL;
147}
148
149static int chap_server_compute_md5( 149static int chap_server_compute_md5(
150 struct iscsi_conn *conn, 150 struct iscsi_conn *conn,
151 struct iscsi_node_auth *auth, 151 struct iscsi_node_auth *auth,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index f4a075303e9a..6949ea8bc387 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -502,7 +502,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
502 502
503 /* Always in 512 byte units for Linux/Block */ 503 /* Always in 512 byte units for Linux/Block */
504 block_lba += sg->length >> SECTOR_SHIFT; 504 block_lba += sg->length >> SECTOR_SHIFT;
505 sectors -= 1; 505 sectors -= sg->length >> SECTOR_SHIFT;
506 } 506 }
507 507
508 iblock_submit_bios(&list); 508 iblock_submit_bios(&list);
diff --git a/fs/Kconfig b/fs/Kconfig
index f1046cf6ad85..bfb1c6095c7a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -11,7 +11,6 @@ config DCACHE_WORD_ACCESS
11 11
12config VALIDATE_FS_PARSER 12config VALIDATE_FS_PARSER
13 bool "Validate filesystem parameter description" 13 bool "Validate filesystem parameter description"
14 default y
15 help 14 help
16 Enable this to perform validation of the parameter description for a 15 Enable this to perform validation of the parameter description for a
17 filesystem when it is registered. 16 filesystem when it is registered.
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index d441bef72163..915010464572 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -275,9 +275,9 @@ static void afs_break_one_callback(struct afs_server *server,
275 struct afs_super_info *as = AFS_FS_S(cbi->sb); 275 struct afs_super_info *as = AFS_FS_S(cbi->sb);
276 struct afs_volume *volume = as->volume; 276 struct afs_volume *volume = as->volume;
277 277
278 write_lock(&volume->cb_break_lock); 278 write_lock(&volume->cb_v_break_lock);
279 volume->cb_v_break++; 279 volume->cb_v_break++;
280 write_unlock(&volume->cb_break_lock); 280 write_unlock(&volume->cb_v_break_lock);
281 } else { 281 } else {
282 data.volume = NULL; 282 data.volume = NULL;
283 data.fid = *fid; 283 data.fid = *fid;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index b42d9d09669c..18a50d4febcf 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -56,6 +56,16 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
56} 56}
57 57
58/* 58/*
59 * Set the file size and block count. Estimate the number of 512 bytes blocks
60 * used, rounded up to nearest 1K for consistency with other AFS clients.
61 */
62static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
63{
64 i_size_write(&vnode->vfs_inode, size);
65 vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
66}
67
68/*
59 * Initialise an inode from the vnode status. 69 * Initialise an inode from the vnode status.
60 */ 70 */
61static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key, 71static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
@@ -124,12 +134,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
124 return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type); 134 return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type);
125 } 135 }
126 136
127 /* 137 afs_set_i_size(vnode, status->size);
128 * Estimate 512 bytes blocks used, rounded up to nearest 1K
129 * for consistency with other AFS clients.
130 */
131 inode->i_blocks = ((i_size_read(inode) + 1023) >> 10) << 1;
132 i_size_write(&vnode->vfs_inode, status->size);
133 138
134 vnode->invalid_before = status->data_version; 139 vnode->invalid_before = status->data_version;
135 inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); 140 inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
@@ -207,11 +212,13 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
207 212
208 if (expected_version && 213 if (expected_version &&
209 *expected_version != status->data_version) { 214 *expected_version != status->data_version) {
210 kdebug("vnode modified %llx on {%llx:%llu} [exp %llx] %s", 215 if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
211 (unsigned long long) status->data_version, 216 pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s\n",
212 vnode->fid.vid, vnode->fid.vnode, 217 vnode->fid.vid, vnode->fid.vnode,
213 (unsigned long long) *expected_version, 218 (unsigned long long)*expected_version,
214 fc->type ? fc->type->name : "???"); 219 (unsigned long long)status->data_version,
220 fc->type ? fc->type->name : "???");
221
215 vnode->invalid_before = status->data_version; 222 vnode->invalid_before = status->data_version;
216 if (vnode->status.type == AFS_FTYPE_DIR) { 223 if (vnode->status.type == AFS_FTYPE_DIR) {
217 if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) 224 if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
@@ -230,7 +237,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
230 237
231 if (data_changed) { 238 if (data_changed) {
232 inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); 239 inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
233 i_size_write(&vnode->vfs_inode, status->size); 240 afs_set_i_size(vnode, status->size);
234 } 241 }
235} 242}
236 243
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 8a67bf741880..7ee63526c6a2 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -109,10 +109,8 @@ struct afs_call {
109 struct rxrpc_call *rxcall; /* RxRPC call handle */ 109 struct rxrpc_call *rxcall; /* RxRPC call handle */
110 struct key *key; /* security for this call */ 110 struct key *key; /* security for this call */
111 struct afs_net *net; /* The network namespace */ 111 struct afs_net *net; /* The network namespace */
112 union { 112 struct afs_server *server; /* The fileserver record if fs op (pins ref) */
113 struct afs_server *server; 113 struct afs_vlserver *vlserver; /* The vlserver record if vl op */
114 struct afs_vlserver *vlserver;
115 };
116 struct afs_cb_interest *cbi; /* Callback interest for server used */ 114 struct afs_cb_interest *cbi; /* Callback interest for server used */
117 struct afs_vnode *lvnode; /* vnode being locked */ 115 struct afs_vnode *lvnode; /* vnode being locked */
118 void *request; /* request data (first part) */ 116 void *request; /* request data (first part) */
@@ -616,7 +614,7 @@ struct afs_volume {
616 unsigned int servers_seq; /* Incremented each time ->servers changes */ 614 unsigned int servers_seq; /* Incremented each time ->servers changes */
617 615
618 unsigned cb_v_break; /* Break-everything counter. */ 616 unsigned cb_v_break; /* Break-everything counter. */
619 rwlock_t cb_break_lock; 617 rwlock_t cb_v_break_lock;
620 618
621 afs_voltype_t type; /* type of volume */ 619 afs_voltype_t type; /* type of volume */
622 short error; 620 short error;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 08fdb3951c49..1a414300b654 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -43,6 +43,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
43 atomic_set(&volume->usage, 1); 43 atomic_set(&volume->usage, 1);
44 INIT_LIST_HEAD(&volume->proc_link); 44 INIT_LIST_HEAD(&volume->proc_link);
45 rwlock_init(&volume->servers_lock); 45 rwlock_init(&volume->servers_lock);
46 rwlock_init(&volume->cb_v_break_lock);
46 memcpy(volume->name, vldb->name, vldb->name_len + 1); 47 memcpy(volume->name, vldb->name, vldb->name_len + 1);
47 48
48 slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask); 49 slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask);
diff --git a/fs/aio.c b/fs/aio.c
index 3490d1fa0e16..c1e581dd32f5 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2095,6 +2095,7 @@ SYSCALL_DEFINE6(io_pgetevents,
2095 struct __aio_sigset ksig = { NULL, }; 2095 struct __aio_sigset ksig = { NULL, };
2096 sigset_t ksigmask, sigsaved; 2096 sigset_t ksigmask, sigsaved;
2097 struct timespec64 ts; 2097 struct timespec64 ts;
2098 bool interrupted;
2098 int ret; 2099 int ret;
2099 2100
2100 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2101 if (timeout && unlikely(get_timespec64(&ts, timeout)))
@@ -2108,8 +2109,10 @@ SYSCALL_DEFINE6(io_pgetevents,
2108 return ret; 2109 return ret;
2109 2110
2110 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2111 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2111 restore_user_sigmask(ksig.sigmask, &sigsaved); 2112
2112 if (signal_pending(current) && !ret) 2113 interrupted = signal_pending(current);
2114 restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
2115 if (interrupted && !ret)
2113 ret = -ERESTARTNOHAND; 2116 ret = -ERESTARTNOHAND;
2114 2117
2115 return ret; 2118 return ret;
@@ -2128,6 +2131,7 @@ SYSCALL_DEFINE6(io_pgetevents_time32,
2128 struct __aio_sigset ksig = { NULL, }; 2131 struct __aio_sigset ksig = { NULL, };
2129 sigset_t ksigmask, sigsaved; 2132 sigset_t ksigmask, sigsaved;
2130 struct timespec64 ts; 2133 struct timespec64 ts;
2134 bool interrupted;
2131 int ret; 2135 int ret;
2132 2136
2133 if (timeout && unlikely(get_old_timespec32(&ts, timeout))) 2137 if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
@@ -2142,8 +2146,10 @@ SYSCALL_DEFINE6(io_pgetevents_time32,
2142 return ret; 2146 return ret;
2143 2147
2144 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2148 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2145 restore_user_sigmask(ksig.sigmask, &sigsaved); 2149
2146 if (signal_pending(current) && !ret) 2150 interrupted = signal_pending(current);
2151 restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
2152 if (interrupted && !ret)
2147 ret = -ERESTARTNOHAND; 2153 ret = -ERESTARTNOHAND;
2148 2154
2149 return ret; 2155 return ret;
@@ -2193,6 +2199,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2193 struct __compat_aio_sigset ksig = { NULL, }; 2199 struct __compat_aio_sigset ksig = { NULL, };
2194 sigset_t ksigmask, sigsaved; 2200 sigset_t ksigmask, sigsaved;
2195 struct timespec64 t; 2201 struct timespec64 t;
2202 bool interrupted;
2196 int ret; 2203 int ret;
2197 2204
2198 if (timeout && get_old_timespec32(&t, timeout)) 2205 if (timeout && get_old_timespec32(&t, timeout))
@@ -2206,8 +2213,10 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2206 return ret; 2213 return ret;
2207 2214
2208 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2215 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2209 restore_user_sigmask(ksig.sigmask, &sigsaved); 2216
2210 if (signal_pending(current) && !ret) 2217 interrupted = signal_pending(current);
2218 restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
2219 if (interrupted && !ret)
2211 ret = -ERESTARTNOHAND; 2220 ret = -ERESTARTNOHAND;
2212 2221
2213 return ret; 2222 return ret;
@@ -2226,6 +2235,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2226 struct __compat_aio_sigset ksig = { NULL, }; 2235 struct __compat_aio_sigset ksig = { NULL, };
2227 sigset_t ksigmask, sigsaved; 2236 sigset_t ksigmask, sigsaved;
2228 struct timespec64 t; 2237 struct timespec64 t;
2238 bool interrupted;
2229 int ret; 2239 int ret;
2230 2240
2231 if (timeout && get_timespec64(&t, timeout)) 2241 if (timeout && get_timespec64(&t, timeout))
@@ -2239,8 +2249,10 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2239 return ret; 2249 return ret;
2240 2250
2241 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2251 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2242 restore_user_sigmask(ksig.sigmask, &sigsaved); 2252
2243 if (signal_pending(current) && !ret) 2253 interrupted = signal_pending(current);
2254 restore_user_sigmask(ksig.sigmask, &sigsaved, interrupted);
2255 if (interrupted && !ret)
2244 ret = -ERESTARTNOHAND; 2256 ret = -ERESTARTNOHAND;
2245 2257
2246 return ret; 2258 return ret;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 82a48e830018..e4b59e76afb0 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -856,9 +856,14 @@ err:
856 856
857static int load_flat_shared_library(int id, struct lib_info *libs) 857static int load_flat_shared_library(int id, struct lib_info *libs)
858{ 858{
859 /*
860 * This is a fake bprm struct; only the members "buf", "file" and
861 * "filename" are actually used.
862 */
859 struct linux_binprm bprm; 863 struct linux_binprm bprm;
860 int res; 864 int res;
861 char buf[16]; 865 char buf[16];
866 loff_t pos = 0;
862 867
863 memset(&bprm, 0, sizeof(bprm)); 868 memset(&bprm, 0, sizeof(bprm));
864 869
@@ -872,25 +877,11 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
872 if (IS_ERR(bprm.file)) 877 if (IS_ERR(bprm.file))
873 return res; 878 return res;
874 879
875 bprm.cred = prepare_exec_creds(); 880 res = kernel_read(bprm.file, bprm.buf, BINPRM_BUF_SIZE, &pos);
876 res = -ENOMEM;
877 if (!bprm.cred)
878 goto out;
879
880 /* We don't really care about recalculating credentials at this point
881 * as we're past the point of no return and are dealing with shared
882 * libraries.
883 */
884 bprm.called_set_creds = 1;
885 881
886 res = prepare_binprm(&bprm); 882 if (res >= 0)
887
888 if (!res)
889 res = load_flat_file(&bprm, libs, id, NULL); 883 res = load_flat_file(&bprm, libs, id, NULL);
890 884
891 abort_creds(bprm.cred);
892
893out:
894 allow_write_access(bprm.file); 885 allow_write_access(bprm.file);
895 fput(bprm.file); 886 fput(bprm.file);
896 887
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 6af2d0d4a87a..c8a9b89b922d 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2121,9 +2121,10 @@ retry:
2121 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 2121 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2122 dout("build_path path+%d: %p SNAPDIR\n", 2122 dout("build_path path+%d: %p SNAPDIR\n",
2123 pos, temp); 2123 pos, temp);
2124 } else if (stop_on_nosnap && inode && 2124 } else if (stop_on_nosnap && inode && dentry != temp &&
2125 ceph_snap(inode) == CEPH_NOSNAP) { 2125 ceph_snap(inode) == CEPH_NOSNAP) {
2126 spin_unlock(&temp->d_lock); 2126 spin_unlock(&temp->d_lock);
2127 pos++; /* get rid of any prepended '/' */
2127 break; 2128 break;
2128 } else { 2129 } else {
2129 pos -= temp->d_name.len; 2130 pos -= temp->d_name.len;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 3fdc6a41b304..9fd56b0acd7e 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2372,6 +2372,41 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2372 kfree(dfs_rsp); 2372 kfree(dfs_rsp);
2373 return rc; 2373 return rc;
2374} 2374}
2375
2376static int
2377parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2378 u32 plen, char **target_path,
2379 struct cifs_sb_info *cifs_sb)
2380{
2381 unsigned int sub_len;
2382 unsigned int sub_offset;
2383
2384 /* We only handle Symbolic Link : MS-FSCC 2.1.2.4 */
2385 if (le32_to_cpu(symlink_buf->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2386 cifs_dbg(VFS, "srv returned invalid symlink buffer\n");
2387 return -EIO;
2388 }
2389
2390 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2391 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2392 if (sub_offset + 20 > plen ||
2393 sub_offset + sub_len + 20 > plen) {
2394 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2395 return -EIO;
2396 }
2397
2398 *target_path = cifs_strndup_from_utf16(
2399 symlink_buf->PathBuffer + sub_offset,
2400 sub_len, true, cifs_sb->local_nls);
2401 if (!(*target_path))
2402 return -ENOMEM;
2403
2404 convert_delimiter(*target_path, '/');
2405 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2406
2407 return 0;
2408}
2409
2375#define SMB2_SYMLINK_STRUCT_SIZE \ 2410#define SMB2_SYMLINK_STRUCT_SIZE \
2376 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) 2411 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2377 2412
@@ -2401,11 +2436,13 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2401 struct kvec close_iov[1]; 2436 struct kvec close_iov[1];
2402 struct smb2_create_rsp *create_rsp; 2437 struct smb2_create_rsp *create_rsp;
2403 struct smb2_ioctl_rsp *ioctl_rsp; 2438 struct smb2_ioctl_rsp *ioctl_rsp;
2404 char *ioctl_buf; 2439 struct reparse_data_buffer *reparse_buf;
2405 u32 plen; 2440 u32 plen;
2406 2441
2407 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); 2442 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2408 2443
2444 *target_path = NULL;
2445
2409 if (smb3_encryption_required(tcon)) 2446 if (smb3_encryption_required(tcon))
2410 flags |= CIFS_TRANSFORM_REQ; 2447 flags |= CIFS_TRANSFORM_REQ;
2411 2448
@@ -2483,17 +2520,36 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2483 if ((rc == 0) && (is_reparse_point)) { 2520 if ((rc == 0) && (is_reparse_point)) {
2484 /* See MS-FSCC 2.3.23 */ 2521 /* See MS-FSCC 2.3.23 */
2485 2522
2486 ioctl_buf = (char *)ioctl_rsp + le32_to_cpu(ioctl_rsp->OutputOffset); 2523 reparse_buf = (struct reparse_data_buffer *)
2524 ((char *)ioctl_rsp +
2525 le32_to_cpu(ioctl_rsp->OutputOffset));
2487 plen = le32_to_cpu(ioctl_rsp->OutputCount); 2526 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2488 2527
2489 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) > 2528 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2490 rsp_iov[1].iov_len) { 2529 rsp_iov[1].iov_len) {
2491 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", plen); 2530 cifs_dbg(VFS, "srv returned invalid ioctl len: %d\n",
2531 plen);
2532 rc = -EIO;
2533 goto querty_exit;
2534 }
2535
2536 if (plen < 8) {
2537 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2538 "at least 8 bytes but was %d\n", plen);
2539 rc = -EIO;
2540 goto querty_exit;
2541 }
2542
2543 if (plen < le16_to_cpu(reparse_buf->ReparseDataLength) + 8) {
2544 cifs_dbg(VFS, "srv returned invalid reparse buf "
2545 "length: %d\n", plen);
2492 rc = -EIO; 2546 rc = -EIO;
2493 goto querty_exit; 2547 goto querty_exit;
2494 } 2548 }
2495 2549
2496 /* Do stuff with ioctl_buf/plen */ 2550 rc = parse_reparse_symlink(
2551 (struct reparse_symlink_data_buffer *)reparse_buf,
2552 plen, target_path, cifs_sb);
2497 goto querty_exit; 2553 goto querty_exit;
2498 } 2554 }
2499 2555
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index c7d5813bebd8..858353d20c39 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -914,7 +914,19 @@ struct reparse_mount_point_data_buffer {
914 __u8 PathBuffer[0]; /* Variable Length */ 914 __u8 PathBuffer[0]; /* Variable Length */
915} __packed; 915} __packed;
916 916
917/* See MS-FSCC 2.1.2.4 and cifspdu.h for struct reparse_symlink_data */ 917#define SYMLINK_FLAG_RELATIVE 0x00000001
918
919struct reparse_symlink_data_buffer {
920 __le32 ReparseTag;
921 __le16 ReparseDataLength;
922 __u16 Reserved;
923 __le16 SubstituteNameOffset;
924 __le16 SubstituteNameLength;
925 __le16 PrintNameOffset;
926 __le16 PrintNameLength;
927 __le32 Flags;
928 __u8 PathBuffer[0]; /* Variable Length */
929} __packed;
918 930
919/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */ 931/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
920 932
diff --git a/fs/dax.c b/fs/dax.c
index 2e48c7ebb973..d2c90bf1969a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -720,12 +720,11 @@ static void *dax_insert_entry(struct xa_state *xas,
720 720
721 xas_reset(xas); 721 xas_reset(xas);
722 xas_lock_irq(xas); 722 xas_lock_irq(xas);
723 if (dax_entry_size(entry) != dax_entry_size(new_entry)) { 723 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
724 void *old;
725
724 dax_disassociate_entry(entry, mapping, false); 726 dax_disassociate_entry(entry, mapping, false);
725 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); 727 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
726 }
727
728 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
729 /* 728 /*
730 * Only swap our new entry into the page cache if the current 729 * Only swap our new entry into the page cache if the current
731 * entry is a zero page or an empty entry. If a normal PTE or 730 * entry is a zero page or an empty entry. If a normal PTE or
@@ -734,7 +733,7 @@ static void *dax_insert_entry(struct xa_state *xas,
734 * existing entry is a PMD, we will just leave the PMD in the 733 * existing entry is a PMD, we will just leave the PMD in the
735 * tree and dirty it if necessary. 734 * tree and dirty it if necessary.
736 */ 735 */
737 void *old = dax_lock_entry(xas, new_entry); 736 old = dax_lock_entry(xas, new_entry);
738 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | 737 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
739 DAX_LOCKED)); 738 DAX_LOCKED));
740 entry = new_entry; 739 entry = new_entry;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index c6f513100cc9..4c74c768ae43 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -2325,7 +2325,7 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2325 2325
2326 error = do_epoll_wait(epfd, events, maxevents, timeout); 2326 error = do_epoll_wait(epfd, events, maxevents, timeout);
2327 2327
2328 restore_user_sigmask(sigmask, &sigsaved); 2328 restore_user_sigmask(sigmask, &sigsaved, error == -EINTR);
2329 2329
2330 return error; 2330 return error;
2331} 2331}
@@ -2350,7 +2350,7 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2350 2350
2351 err = do_epoll_wait(epfd, events, maxevents, timeout); 2351 err = do_epoll_wait(epfd, events, maxevents, timeout);
2352 2352
2353 restore_user_sigmask(sigmask, &sigsaved); 2353 restore_user_sigmask(sigmask, &sigsaved, err == -EINTR);
2354 2354
2355 return err; 2355 return err;
2356} 2356}
diff --git a/fs/inode.c b/fs/inode.c
index df6542ec3b88..2bf21e2c90fc 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -362,7 +362,7 @@ EXPORT_SYMBOL(inc_nlink);
362 362
363static void __address_space_init_once(struct address_space *mapping) 363static void __address_space_init_once(struct address_space *mapping)
364{ 364{
365 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ); 365 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT);
366 init_rwsem(&mapping->i_mmap_rwsem); 366 init_rwsem(&mapping->i_mmap_rwsem);
367 INIT_LIST_HEAD(&mapping->private_list); 367 INIT_LIST_HEAD(&mapping->private_list);
368 spin_lock_init(&mapping->private_lock); 368 spin_lock_init(&mapping->private_lock);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 86a2bd721900..4ef62a45045d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -579,6 +579,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
579 state->cur_req++; 579 state->cur_req++;
580 } 580 }
581 581
582 req->file = NULL;
582 req->ctx = ctx; 583 req->ctx = ctx;
583 req->flags = 0; 584 req->flags = 0;
584 /* one is dropped after submission, the other at completion */ 585 /* one is dropped after submission, the other at completion */
@@ -1801,10 +1802,8 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
1801 req->sequence = ctx->cached_sq_head - 1; 1802 req->sequence = ctx->cached_sq_head - 1;
1802 } 1803 }
1803 1804
1804 if (!io_op_needs_file(s->sqe)) { 1805 if (!io_op_needs_file(s->sqe))
1805 req->file = NULL;
1806 return 0; 1806 return 0;
1807 }
1808 1807
1809 if (flags & IOSQE_FIXED_FILE) { 1808 if (flags & IOSQE_FIXED_FILE) {
1810 if (unlikely(!ctx->user_files || 1809 if (unlikely(!ctx->user_files ||
@@ -2201,11 +2200,12 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2201 } 2200 }
2202 2201
2203 ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events); 2202 ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events);
2204 if (ret == -ERESTARTSYS)
2205 ret = -EINTR;
2206 2203
2207 if (sig) 2204 if (sig)
2208 restore_user_sigmask(sig, &sigsaved); 2205 restore_user_sigmask(sig, &sigsaved, ret == -ERESTARTSYS);
2206
2207 if (ret == -ERESTARTSYS)
2208 ret = -EINTR;
2209 2209
2210 return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0; 2210 return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
2211} 2211}
diff --git a/fs/namespace.c b/fs/namespace.c
index 7660c2749c96..6fbc9126367a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2596,11 +2596,12 @@ static int do_move_mount(struct path *old_path, struct path *new_path)
2596 if (!check_mnt(p)) 2596 if (!check_mnt(p))
2597 goto out; 2597 goto out;
2598 2598
2599 /* The thing moved should be either ours or completely unattached. */ 2599 /* The thing moved must be mounted... */
2600 if (attached && !check_mnt(old)) 2600 if (!is_mounted(&old->mnt))
2601 goto out; 2601 goto out;
2602 2602
2603 if (!attached && !(ns && is_anon_ns(ns))) 2603 /* ... and either ours or the root of anon namespace */
2604 if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
2604 goto out; 2605 goto out;
2605 2606
2606 if (old->mnt.mnt_flags & MNT_LOCKED) 2607 if (old->mnt.mnt_flags & MNT_LOCKED)
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index a809989807d6..19f856f45689 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -18,7 +18,7 @@
18 18
19#define NFSDBG_FACILITY NFSDBG_PNFS_LD 19#define NFSDBG_FACILITY NFSDBG_PNFS_LD
20 20
21static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS; 21static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
22static unsigned int dataserver_retrans; 22static unsigned int dataserver_retrans;
23 23
24static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg); 24static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 618e66078ee5..1a0cdeb3b875 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1563,7 +1563,7 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1563 * Never use more than a third of the remaining memory, 1563 * Never use more than a third of the remaining memory,
1564 * unless it's the only way to give this client a slot: 1564 * unless it's the only way to give this client a slot:
1565 */ 1565 */
1566 avail = clamp_t(int, avail, slotsize, total_avail/3); 1566 avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
1567 num = min_t(int, num, avail / slotsize); 1567 num = min_t(int, num, avail / slotsize);
1568 nfsd_drc_mem_used += num * slotsize; 1568 nfsd_drc_mem_used += num * slotsize;
1569 spin_unlock(&nfsd_drc_lock); 1569 spin_unlock(&nfsd_drc_lock);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 2edbb657f859..55180501b915 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -462,7 +462,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
462 * a program is not able to use ptrace(2) in that case. It is 462 * a program is not able to use ptrace(2) in that case. It is
463 * safe because the task has stopped executing permanently. 463 * safe because the task has stopped executing permanently.
464 */ 464 */
465 if (permitted && (task->flags & PF_DUMPCORE)) { 465 if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
466 if (try_get_task_stack(task)) { 466 if (try_get_task_stack(task)) {
467 eip = KSTK_EIP(task); 467 eip = KSTK_EIP(task);
468 esp = KSTK_ESP(task); 468 esp = KSTK_ESP(task);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9c8ca6cd3ce4..255f6754c70d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3077,8 +3077,7 @@ static const struct file_operations proc_tgid_base_operations = {
3077 3077
3078struct pid *tgid_pidfd_to_pid(const struct file *file) 3078struct pid *tgid_pidfd_to_pid(const struct file *file)
3079{ 3079{
3080 if (!d_is_dir(file->f_path.dentry) || 3080 if (file->f_op != &proc_tgid_base_operations)
3081 (file->f_op != &proc_tgid_base_operations))
3082 return ERR_PTR(-EBADF); 3081 return ERR_PTR(-EBADF);
3083 3082
3084 return proc_pid(file_inode(file)); 3083 return proc_pid(file_inode(file));
diff --git a/fs/select.c b/fs/select.c
index 6cbc9ff56ba0..a4d8f6e8b63c 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -758,10 +758,9 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
758 return ret; 758 return ret;
759 759
760 ret = core_sys_select(n, inp, outp, exp, to); 760 ret = core_sys_select(n, inp, outp, exp, to);
761 restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND);
761 ret = poll_select_copy_remaining(&end_time, tsp, type, ret); 762 ret = poll_select_copy_remaining(&end_time, tsp, type, ret);
762 763
763 restore_user_sigmask(sigmask, &sigsaved);
764
765 return ret; 764 return ret;
766} 765}
767 766
@@ -1106,8 +1105,7 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
1106 1105
1107 ret = do_sys_poll(ufds, nfds, to); 1106 ret = do_sys_poll(ufds, nfds, to);
1108 1107
1109 restore_user_sigmask(sigmask, &sigsaved); 1108 restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
1110
1111 /* We can restart this syscall, usually */ 1109 /* We can restart this syscall, usually */
1112 if (ret == -EINTR) 1110 if (ret == -EINTR)
1113 ret = -ERESTARTNOHAND; 1111 ret = -ERESTARTNOHAND;
@@ -1142,8 +1140,7 @@ SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
1142 1140
1143 ret = do_sys_poll(ufds, nfds, to); 1141 ret = do_sys_poll(ufds, nfds, to);
1144 1142
1145 restore_user_sigmask(sigmask, &sigsaved); 1143 restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
1146
1147 /* We can restart this syscall, usually */ 1144 /* We can restart this syscall, usually */
1148 if (ret == -EINTR) 1145 if (ret == -EINTR)
1149 ret = -ERESTARTNOHAND; 1146 ret = -ERESTARTNOHAND;
@@ -1350,10 +1347,9 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1350 return ret; 1347 return ret;
1351 1348
1352 ret = compat_core_sys_select(n, inp, outp, exp, to); 1349 ret = compat_core_sys_select(n, inp, outp, exp, to);
1350 restore_user_sigmask(sigmask, &sigsaved, ret == -ERESTARTNOHAND);
1353 ret = poll_select_copy_remaining(&end_time, tsp, type, ret); 1351 ret = poll_select_copy_remaining(&end_time, tsp, type, ret);
1354 1352
1355 restore_user_sigmask(sigmask, &sigsaved);
1356
1357 return ret; 1353 return ret;
1358} 1354}
1359 1355
@@ -1425,8 +1421,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds,
1425 1421
1426 ret = do_sys_poll(ufds, nfds, to); 1422 ret = do_sys_poll(ufds, nfds, to);
1427 1423
1428 restore_user_sigmask(sigmask, &sigsaved); 1424 restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
1429
1430 /* We can restart this syscall, usually */ 1425 /* We can restart this syscall, usually */
1431 if (ret == -EINTR) 1426 if (ret == -EINTR)
1432 ret = -ERESTARTNOHAND; 1427 ret = -ERESTARTNOHAND;
@@ -1461,8 +1456,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
1461 1456
1462 ret = do_sys_poll(ufds, nfds, to); 1457 ret = do_sys_poll(ufds, nfds, to);
1463 1458
1464 restore_user_sigmask(sigmask, &sigsaved); 1459 restore_user_sigmask(sigmask, &sigsaved, ret == -EINTR);
1465
1466 /* We can restart this syscall, usually */ 1460 /* We can restart this syscall, usually */
1467 if (ret == -EINTR) 1461 if (ret == -EINTR)
1468 ret = -ERESTARTNOHAND; 1462 ret = -ERESTARTNOHAND;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ae0b8b5f69e6..ccbdbd62f0d8 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -40,6 +40,16 @@ enum userfaultfd_state {
40/* 40/*
41 * Start with fault_pending_wqh and fault_wqh so they're more likely 41 * Start with fault_pending_wqh and fault_wqh so they're more likely
42 * to be in the same cacheline. 42 * to be in the same cacheline.
43 *
44 * Locking order:
45 * fd_wqh.lock
46 * fault_pending_wqh.lock
47 * fault_wqh.lock
48 * event_wqh.lock
49 *
50 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
51 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
52 * also taken in IRQ context.
43 */ 53 */
44struct userfaultfd_ctx { 54struct userfaultfd_ctx {
45 /* waitqueue head for the pending (i.e. not read) userfaults */ 55 /* waitqueue head for the pending (i.e. not read) userfaults */
@@ -458,7 +468,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
458 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : 468 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
459 TASK_KILLABLE; 469 TASK_KILLABLE;
460 470
461 spin_lock(&ctx->fault_pending_wqh.lock); 471 spin_lock_irq(&ctx->fault_pending_wqh.lock);
462 /* 472 /*
463 * After the __add_wait_queue the uwq is visible to userland 473 * After the __add_wait_queue the uwq is visible to userland
464 * through poll/read(). 474 * through poll/read().
@@ -470,7 +480,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
470 * __add_wait_queue. 480 * __add_wait_queue.
471 */ 481 */
472 set_current_state(blocking_state); 482 set_current_state(blocking_state);
473 spin_unlock(&ctx->fault_pending_wqh.lock); 483 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
474 484
475 if (!is_vm_hugetlb_page(vmf->vma)) 485 if (!is_vm_hugetlb_page(vmf->vma))
476 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, 486 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -552,13 +562,13 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
552 * kernel stack can be released after the list_del_init. 562 * kernel stack can be released after the list_del_init.
553 */ 563 */
554 if (!list_empty_careful(&uwq.wq.entry)) { 564 if (!list_empty_careful(&uwq.wq.entry)) {
555 spin_lock(&ctx->fault_pending_wqh.lock); 565 spin_lock_irq(&ctx->fault_pending_wqh.lock);
556 /* 566 /*
557 * No need of list_del_init(), the uwq on the stack 567 * No need of list_del_init(), the uwq on the stack
558 * will be freed shortly anyway. 568 * will be freed shortly anyway.
559 */ 569 */
560 list_del(&uwq.wq.entry); 570 list_del(&uwq.wq.entry);
561 spin_unlock(&ctx->fault_pending_wqh.lock); 571 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
562 } 572 }
563 573
564 /* 574 /*
@@ -583,7 +593,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
583 init_waitqueue_entry(&ewq->wq, current); 593 init_waitqueue_entry(&ewq->wq, current);
584 release_new_ctx = NULL; 594 release_new_ctx = NULL;
585 595
586 spin_lock(&ctx->event_wqh.lock); 596 spin_lock_irq(&ctx->event_wqh.lock);
587 /* 597 /*
588 * After the __add_wait_queue the uwq is visible to userland 598 * After the __add_wait_queue the uwq is visible to userland
589 * through poll/read(). 599 * through poll/read().
@@ -613,15 +623,15 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
613 break; 623 break;
614 } 624 }
615 625
616 spin_unlock(&ctx->event_wqh.lock); 626 spin_unlock_irq(&ctx->event_wqh.lock);
617 627
618 wake_up_poll(&ctx->fd_wqh, EPOLLIN); 628 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
619 schedule(); 629 schedule();
620 630
621 spin_lock(&ctx->event_wqh.lock); 631 spin_lock_irq(&ctx->event_wqh.lock);
622 } 632 }
623 __set_current_state(TASK_RUNNING); 633 __set_current_state(TASK_RUNNING);
624 spin_unlock(&ctx->event_wqh.lock); 634 spin_unlock_irq(&ctx->event_wqh.lock);
625 635
626 if (release_new_ctx) { 636 if (release_new_ctx) {
627 struct vm_area_struct *vma; 637 struct vm_area_struct *vma;
@@ -918,10 +928,10 @@ wakeup:
918 * the last page faults that may have been already waiting on 928 * the last page faults that may have been already waiting on
919 * the fault_*wqh. 929 * the fault_*wqh.
920 */ 930 */
921 spin_lock(&ctx->fault_pending_wqh.lock); 931 spin_lock_irq(&ctx->fault_pending_wqh.lock);
922 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); 932 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
923 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); 933 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
924 spin_unlock(&ctx->fault_pending_wqh.lock); 934 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
925 935
926 /* Flush pending events that may still wait on event_wqh */ 936 /* Flush pending events that may still wait on event_wqh */
927 wake_up_all(&ctx->event_wqh); 937 wake_up_all(&ctx->event_wqh);
@@ -1134,7 +1144,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1134 1144
1135 if (!ret && msg->event == UFFD_EVENT_FORK) { 1145 if (!ret && msg->event == UFFD_EVENT_FORK) {
1136 ret = resolve_userfault_fork(ctx, fork_nctx, msg); 1146 ret = resolve_userfault_fork(ctx, fork_nctx, msg);
1137 spin_lock(&ctx->event_wqh.lock); 1147 spin_lock_irq(&ctx->event_wqh.lock);
1138 if (!list_empty(&fork_event)) { 1148 if (!list_empty(&fork_event)) {
1139 /* 1149 /*
1140 * The fork thread didn't abort, so we can 1150 * The fork thread didn't abort, so we can
@@ -1180,7 +1190,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1180 if (ret) 1190 if (ret)
1181 userfaultfd_ctx_put(fork_nctx); 1191 userfaultfd_ctx_put(fork_nctx);
1182 } 1192 }
1183 spin_unlock(&ctx->event_wqh.lock); 1193 spin_unlock_irq(&ctx->event_wqh.lock);
1184 } 1194 }
1185 1195
1186 return ret; 1196 return ret;
@@ -1219,14 +1229,14 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1219static void __wake_userfault(struct userfaultfd_ctx *ctx, 1229static void __wake_userfault(struct userfaultfd_ctx *ctx,
1220 struct userfaultfd_wake_range *range) 1230 struct userfaultfd_wake_range *range)
1221{ 1231{
1222 spin_lock(&ctx->fault_pending_wqh.lock); 1232 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1223 /* wake all in the range and autoremove */ 1233 /* wake all in the range and autoremove */
1224 if (waitqueue_active(&ctx->fault_pending_wqh)) 1234 if (waitqueue_active(&ctx->fault_pending_wqh))
1225 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 1235 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1226 range); 1236 range);
1227 if (waitqueue_active(&ctx->fault_wqh)) 1237 if (waitqueue_active(&ctx->fault_wqh))
1228 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); 1238 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1229 spin_unlock(&ctx->fault_pending_wqh.lock); 1239 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1230} 1240}
1231 1241
1232static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, 1242static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
@@ -1881,7 +1891,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1881 wait_queue_entry_t *wq; 1891 wait_queue_entry_t *wq;
1882 unsigned long pending = 0, total = 0; 1892 unsigned long pending = 0, total = 0;
1883 1893
1884 spin_lock(&ctx->fault_pending_wqh.lock); 1894 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1885 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { 1895 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
1886 pending++; 1896 pending++;
1887 total++; 1897 total++;
@@ -1889,7 +1899,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
1889 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { 1899 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
1890 total++; 1900 total++;
1891 } 1901 }
1892 spin_unlock(&ctx->fault_pending_wqh.lock); 1902 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1893 1903
1894 /* 1904 /*
1895 * If more protocols will be added, there will be all shown 1905 * If more protocols will be added, there will be all shown
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index 82c9e0c020b2..e10470ed7c4f 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -130,7 +130,7 @@
130#define CLKID_MALI_1_SEL 172 130#define CLKID_MALI_1_SEL 172
131#define CLKID_MALI_1 174 131#define CLKID_MALI_1 174
132#define CLKID_MALI 175 132#define CLKID_MALI 175
133#define CLKID_MPLL_5OM 177 133#define CLKID_MPLL_50M 177
134#define CLKID_CPU_CLK 187 134#define CLKID_CPU_CLK 187
135#define CLKID_PCIE_PLL 201 135#define CLKID_PCIE_PLL 201
136#define CLKID_VDEC_1 204 136#define CLKID_VDEC_1 204
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
index 6a0b70a37d78..3b21d0522c91 100644
--- a/include/dt-bindings/clock/sifive-fu540-prci.h
+++ b/include/dt-bindings/clock/sifive-fu540-prci.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
2/* 2/*
3 * Copyright (C) 2018-2019 SiFive, Inc. 3 * Copyright (C) 2018-2019 SiFive, Inc.
4 * Wesley Terpstra 4 * Wesley Terpstra
diff --git a/include/linux/device.h b/include/linux/device.h
index 848fc71c6ba6..4a295e324ac5 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -704,7 +704,8 @@ extern unsigned long devm_get_free_pages(struct device *dev,
704 gfp_t gfp_mask, unsigned int order); 704 gfp_t gfp_mask, unsigned int order);
705extern void devm_free_pages(struct device *dev, unsigned long addr); 705extern void devm_free_pages(struct device *dev, unsigned long addr);
706 706
707void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); 707void __iomem *devm_ioremap_resource(struct device *dev,
708 const struct resource *res);
708 709
709void __iomem *devm_of_iomap(struct device *dev, 710void __iomem *devm_of_iomap(struct device *dev,
710 struct device_node *node, int index, 711 struct device_node *node, int index,
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 16255c2ca2f4..0d6b4bc191c5 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -103,6 +103,7 @@ void ishtp_put_device(struct ishtp_cl_device *cl_dev);
103void ishtp_get_device(struct ishtp_cl_device *cl_dev); 103void ishtp_get_device(struct ishtp_cl_device *cl_dev);
104void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data); 104void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
105void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device); 105void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
106struct ishtp_cl_device *ishtp_dev_to_cl_device(struct device *dev);
106int ishtp_register_event_cb(struct ishtp_cl_device *device, 107int ishtp_register_event_cb(struct ishtp_cl_device *device,
107 void (*read_cb)(struct ishtp_cl_device *)); 108 void (*read_cb)(struct ishtp_cl_device *));
108struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev, 109struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 74b1ee9027f5..0c9bc231107f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -93,7 +93,8 @@
93#define DIV_ROUND_DOWN_ULL(ll, d) \ 93#define DIV_ROUND_DOWN_ULL(ll, d) \
94 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) 94 ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
95 95
96#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d)) 96#define DIV_ROUND_UP_ULL(ll, d) \
97 DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
97 98
98#if BITS_PER_LONG == 32 99#if BITS_PER_LONG == 32
99# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) 100# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index b3d360b0ee3d..9f57cdfcc93d 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -373,6 +373,8 @@ struct flash_info;
373 * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR 373 * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
374 * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is 374 * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
375 * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode 375 * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode
376 * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from
377 * the SPI NOR Status Register.
376 * completely locked 378 * completely locked
377 * @priv: the private data 379 * @priv: the private data
378 */ 380 */
@@ -410,6 +412,7 @@ struct spi_nor {
410 int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); 412 int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
411 int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); 413 int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
412 int (*quad_enable)(struct spi_nor *nor); 414 int (*quad_enable)(struct spi_nor *nor);
415 int (*clear_sr_bp)(struct spi_nor *nor);
413 416
414 void *priv; 417 void *priv;
415}; 418};
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9ec3544baee2..fe0b29bf2df7 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -333,19 +333,6 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
333 mapping_gfp_mask(mapping)); 333 mapping_gfp_mask(mapping));
334} 334}
335 335
336static inline struct page *find_subpage(struct page *page, pgoff_t offset)
337{
338 unsigned long mask;
339
340 if (PageHuge(page))
341 return page;
342
343 VM_BUG_ON_PAGE(PageTail(page), page);
344
345 mask = (1UL << compound_order(page)) - 1;
346 return page + (offset & mask);
347}
348
349struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
350struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); 337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
351unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 338unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2ddae518dce6..16e38c286d46 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -241,6 +241,7 @@ struct perf_event;
241#define PERF_PMU_CAP_NO_INTERRUPT 0x01 241#define PERF_PMU_CAP_NO_INTERRUPT 0x01
242#define PERF_PMU_CAP_NO_NMI 0x02 242#define PERF_PMU_CAP_NO_NMI 0x02
243#define PERF_PMU_CAP_AUX_NO_SG 0x04 243#define PERF_PMU_CAP_AUX_NO_SG 0x04
244#define PERF_PMU_CAP_EXTENDED_REGS 0x08
244#define PERF_PMU_CAP_EXCLUSIVE 0x10 245#define PERF_PMU_CAP_EXCLUSIVE 0x10
245#define PERF_PMU_CAP_ITRACE 0x20 246#define PERF_PMU_CAP_ITRACE 0x20
246#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 247#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 476747456bca..2d12e97d5e7b 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -11,6 +11,11 @@ struct perf_regs {
11 11
12#ifdef CONFIG_HAVE_PERF_REGS 12#ifdef CONFIG_HAVE_PERF_REGS
13#include <asm/perf_regs.h> 13#include <asm/perf_regs.h>
14
15#ifndef PERF_REG_EXTENDED_MASK
16#define PERF_REG_EXTENDED_MASK 0
17#endif
18
14u64 perf_reg_value(struct pt_regs *regs, int idx); 19u64 perf_reg_value(struct pt_regs *regs, int idx);
15int perf_reg_validate(u64 mask); 20int perf_reg_validate(u64 mask);
16u64 perf_reg_abi(struct task_struct *task); 21u64 perf_reg_abi(struct task_struct *task);
@@ -18,6 +23,9 @@ void perf_get_regs_user(struct perf_regs *regs_user,
18 struct pt_regs *regs, 23 struct pt_regs *regs,
19 struct pt_regs *regs_user_copy); 24 struct pt_regs *regs_user_copy);
20#else 25#else
26
27#define PERF_REG_EXTENDED_MASK 0
28
21static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 29static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
22{ 30{
23 return 0; 31 return 0;
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 7bb77850c65a..3c202a11a79e 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -68,7 +68,7 @@ static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
68 68
69static inline void *pfn_t_to_virt(pfn_t pfn) 69static inline void *pfn_t_to_virt(pfn_t pfn)
70{ 70{
71 if (pfn_t_has_page(pfn)) 71 if (pfn_t_has_page(pfn) && !is_device_private_page(pfn_t_to_page(pfn)))
72 return __va(pfn_t_to_phys(pfn)); 72 return __va(pfn_t_to_phys(pfn));
73 return NULL; 73 return NULL;
74} 74}
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 9702016734b1..78c2bb376954 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -276,7 +276,7 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
276extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, 276extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
277 sigset_t *oldset, size_t sigsetsize); 277 sigset_t *oldset, size_t sigsetsize);
278extern void restore_user_sigmask(const void __user *usigmask, 278extern void restore_user_sigmask(const void __user *usigmask,
279 sigset_t *sigsaved); 279 sigset_t *sigsaved, bool interrupted);
280extern void set_current_blocked(sigset_t *); 280extern void set_current_blocked(sigset_t *);
281extern void __set_current_blocked(const sigset_t *); 281extern void __set_current_blocked(const sigset_t *);
282extern int show_unhandled_signals; 282extern int show_unhandled_signals;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 8594001e8be8..f0d262ad7b78 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -209,8 +209,9 @@ extern int suspend_valid_only_mem(suspend_state_t state);
209 209
210extern unsigned int pm_suspend_global_flags; 210extern unsigned int pm_suspend_global_flags;
211 211
212#define PM_SUSPEND_FLAG_FW_SUSPEND (1 << 0) 212#define PM_SUSPEND_FLAG_FW_SUSPEND BIT(0)
213#define PM_SUSPEND_FLAG_FW_RESUME (1 << 1) 213#define PM_SUSPEND_FLAG_FW_RESUME BIT(1)
214#define PM_SUSPEND_FLAG_NO_PLATFORM BIT(2)
214 215
215static inline void pm_suspend_clear_flags(void) 216static inline void pm_suspend_clear_flags(void)
216{ 217{
@@ -227,6 +228,11 @@ static inline void pm_set_resume_via_firmware(void)
227 pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME; 228 pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME;
228} 229}
229 230
231static inline void pm_set_suspend_no_platform(void)
232{
233 pm_suspend_global_flags |= PM_SUSPEND_FLAG_NO_PLATFORM;
234}
235
230/** 236/**
231 * pm_suspend_via_firmware - Check if platform firmware will suspend the system. 237 * pm_suspend_via_firmware - Check if platform firmware will suspend the system.
232 * 238 *
@@ -268,6 +274,22 @@ static inline bool pm_resume_via_firmware(void)
268 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME); 274 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME);
269} 275}
270 276
277/**
278 * pm_suspend_no_platform - Check if platform may change device power states.
279 *
280 * To be called during system-wide power management transitions to sleep states
281 * or during the subsequent system-wide transitions back to the working state.
282 *
283 * Return 'true' if the power states of devices remain under full control of the
284 * kernel throughout the system-wide suspend and resume cycle in progress (that
285 * is, if a device is put into a certain power state during suspend, it can be
286 * expected to remain in that state during resume).
287 */
288static inline bool pm_suspend_no_platform(void)
289{
290 return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_NO_PLATFORM);
291}
292
271/* Suspend-to-idle state machnine. */ 293/* Suspend-to-idle state machnine. */
272enum s2idle_states { 294enum s2idle_states {
273 S2IDLE_STATE_NONE, /* Not suspended/suspending. */ 295 S2IDLE_STATE_NONE, /* Not suspended/suspending. */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 0e01e6129145..5921599b6dc4 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -265,6 +265,7 @@ enum xa_lock_type {
265#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) 265#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
266#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) 266#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
267#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) 267#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
268#define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
268#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ 269#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
269 (__force unsigned)(mark))) 270 (__force unsigned)(mark)))
270 271
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 4790beaa86e0..ee7405e759ba 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -262,8 +262,8 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk)
262 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; 262 inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
263} 263}
264 264
265static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, 265static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
266 struct in6_addr *daddr) 266 const struct in6_addr *daddr)
267{ 267{
268 if (rt->rt6i_flags & RTF_GATEWAY) 268 if (rt->rt6i_flags & RTF_GATEWAY)
269 return &rt->rt6i_gateway; 269 return &rt->rt6i_gateway;
diff --git a/include/net/route.h b/include/net/route.h
index 065b47754f05..55ff71ffb796 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -221,6 +221,7 @@ void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
221struct rtable *rt_dst_alloc(struct net_device *dev, 221struct rtable *rt_dst_alloc(struct net_device *dev,
222 unsigned int flags, u16 type, 222 unsigned int flags, u16 type,
223 bool nopolicy, bool noxfrm, bool will_cache); 223 bool nopolicy, bool noxfrm, bool will_cache);
224struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
224 225
225struct in_ifaddr; 226struct in_ifaddr;
226void fib_add_ifaddr(struct in_ifaddr *); 227void fib_add_ifaddr(struct in_ifaddr *);
diff --git a/include/net/tls.h b/include/net/tls.h
index 4a55ce6a303f..53d96bca220d 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -373,21 +373,6 @@ static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
373 return !!ctx->partially_sent_record; 373 return !!ctx->partially_sent_record;
374} 374}
375 375
376static inline int tls_complete_pending_work(struct sock *sk,
377 struct tls_context *ctx,
378 int flags, long *timeo)
379{
380 int rc = 0;
381
382 if (unlikely(sk->sk_write_pending))
383 rc = wait_on_pending_writer(sk, timeo);
384
385 if (!rc && tls_is_partially_sent_record(ctx))
386 rc = tls_push_partial_record(sk, ctx, flags);
387
388 return rc;
389}
390
391static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 376static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
392{ 377{
393 return tls_ctx->pending_open_record_frags; 378 return tls_ctx->pending_open_record_frags;
diff --git a/init/initramfs.c b/init/initramfs.c
index 178130fd61c2..c47dad0884f7 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -617,7 +617,7 @@ static inline void clean_rootfs(void)
617#endif /* CONFIG_BLK_DEV_RAM */ 617#endif /* CONFIG_BLK_DEV_RAM */
618 618
619#ifdef CONFIG_BLK_DEV_RAM 619#ifdef CONFIG_BLK_DEV_RAM
620static void populate_initrd_image(char *err) 620static void __init populate_initrd_image(char *err)
621{ 621{
622 ssize_t written; 622 ssize_t written;
623 int fd; 623 int fd;
@@ -637,7 +637,7 @@ static void populate_initrd_image(char *err)
637 ksys_close(fd); 637 ksys_close(fd);
638} 638}
639#else 639#else
640static void populate_initrd_image(char *err) 640static void __init populate_initrd_image(char *err)
641{ 641{
642 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); 642 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
643} 643}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 077fde6fb953..ef1c565edc5d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1964,6 +1964,9 @@ static ssize_t write_cpuhp_fail(struct device *dev,
1964 if (ret) 1964 if (ret)
1965 return ret; 1965 return ret;
1966 1966
1967 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
1968 return -EINVAL;
1969
1967 /* 1970 /*
1968 * Cannot fail STARTING/DYING callbacks. 1971 * Cannot fail STARTING/DYING callbacks.
1969 */ 1972 */
@@ -2339,6 +2342,9 @@ static int __init mitigations_parse_cmdline(char *arg)
2339 cpu_mitigations = CPU_MITIGATIONS_AUTO; 2342 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2340 else if (!strcmp(arg, "auto,nosmt")) 2343 else if (!strcmp(arg, "auto,nosmt"))
2341 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; 2344 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2345 else
2346 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2347 arg);
2342 2348
2343 return 0; 2349 return 0;
2344} 2350}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 23efe6792abc..ab52cf510378 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5007,6 +5007,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
5007 if (perf_event_check_period(event, value)) 5007 if (perf_event_check_period(event, value))
5008 return -EINVAL; 5008 return -EINVAL;
5009 5009
5010 if (!event->attr.freq && (value & (1ULL << 63)))
5011 return -EINVAL;
5012
5010 event_function_call(event, __perf_event_period, &value); 5013 event_function_call(event, __perf_event_period, &value);
5011 5014
5012 return 0; 5015 return 0;
@@ -5925,7 +5928,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
5925 if (user_mode(regs)) { 5928 if (user_mode(regs)) {
5926 regs_user->abi = perf_reg_abi(current); 5929 regs_user->abi = perf_reg_abi(current);
5927 regs_user->regs = regs; 5930 regs_user->regs = regs;
5928 } else if (current->mm) { 5931 } else if (!(current->flags & PF_KTHREAD)) {
5929 perf_get_regs_user(regs_user, regs, regs_user_copy); 5932 perf_get_regs_user(regs_user, regs, regs_user_copy);
5930 } else { 5933 } else {
5931 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; 5934 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
@@ -10041,6 +10044,12 @@ void perf_pmu_unregister(struct pmu *pmu)
10041} 10044}
10042EXPORT_SYMBOL_GPL(perf_pmu_unregister); 10045EXPORT_SYMBOL_GPL(perf_pmu_unregister);
10043 10046
10047static inline bool has_extended_regs(struct perf_event *event)
10048{
10049 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
10050 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
10051}
10052
10044static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) 10053static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
10045{ 10054{
10046 struct perf_event_context *ctx = NULL; 10055 struct perf_event_context *ctx = NULL;
@@ -10072,12 +10081,16 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
10072 perf_event_ctx_unlock(event->group_leader, ctx); 10081 perf_event_ctx_unlock(event->group_leader, ctx);
10073 10082
10074 if (!ret) { 10083 if (!ret) {
10084 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
10085 has_extended_regs(event))
10086 ret = -EOPNOTSUPP;
10087
10075 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && 10088 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
10076 event_has_any_exclude_flag(event)) { 10089 event_has_any_exclude_flag(event))
10077 if (event->destroy)
10078 event->destroy(event);
10079 ret = -EINVAL; 10090 ret = -EINVAL;
10080 } 10091
10092 if (ret && event->destroy)
10093 event->destroy(event);
10081 } 10094 }
10082 10095
10083 if (ret) 10096 if (ret)
diff --git a/kernel/fork.c b/kernel/fork.c
index 75675b9bf6df..fe83343da24b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -248,7 +248,11 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
248 struct page *page = alloc_pages_node(node, THREADINFO_GFP, 248 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
249 THREAD_SIZE_ORDER); 249 THREAD_SIZE_ORDER);
250 250
251 return page ? page_address(page) : NULL; 251 if (likely(page)) {
252 tsk->stack = page_address(page);
253 return tsk->stack;
254 }
255 return NULL;
252#endif 256#endif
253} 257}
254 258
@@ -1712,31 +1716,6 @@ const struct file_operations pidfd_fops = {
1712#endif 1716#endif
1713}; 1717};
1714 1718
1715/**
1716 * pidfd_create() - Create a new pid file descriptor.
1717 *
1718 * @pid: struct pid that the pidfd will reference
1719 *
1720 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
1721 *
1722 * Note, that this function can only be called after the fd table has
1723 * been unshared to avoid leaking the pidfd to the new process.
1724 *
1725 * Return: On success, a cloexec pidfd is returned.
1726 * On error, a negative errno number will be returned.
1727 */
1728static int pidfd_create(struct pid *pid)
1729{
1730 int fd;
1731
1732 fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
1733 O_RDWR | O_CLOEXEC);
1734 if (fd < 0)
1735 put_pid(pid);
1736
1737 return fd;
1738}
1739
1740static void __delayed_free_task(struct rcu_head *rhp) 1719static void __delayed_free_task(struct rcu_head *rhp)
1741{ 1720{
1742 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 1721 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
@@ -1774,6 +1753,7 @@ static __latent_entropy struct task_struct *copy_process(
1774 int pidfd = -1, retval; 1753 int pidfd = -1, retval;
1775 struct task_struct *p; 1754 struct task_struct *p;
1776 struct multiprocess_signals delayed; 1755 struct multiprocess_signals delayed;
1756 struct file *pidfile = NULL;
1777 1757
1778 /* 1758 /*
1779 * Don't allow sharing the root directory with processes in a different 1759 * Don't allow sharing the root directory with processes in a different
@@ -1822,8 +1802,6 @@ static __latent_entropy struct task_struct *copy_process(
1822 } 1802 }
1823 1803
1824 if (clone_flags & CLONE_PIDFD) { 1804 if (clone_flags & CLONE_PIDFD) {
1825 int reserved;
1826
1827 /* 1805 /*
1828 * - CLONE_PARENT_SETTID is useless for pidfds and also 1806 * - CLONE_PARENT_SETTID is useless for pidfds and also
1829 * parent_tidptr is used to return pidfds. 1807 * parent_tidptr is used to return pidfds.
@@ -1834,16 +1812,6 @@ static __latent_entropy struct task_struct *copy_process(
1834 if (clone_flags & 1812 if (clone_flags &
1835 (CLONE_DETACHED | CLONE_PARENT_SETTID | CLONE_THREAD)) 1813 (CLONE_DETACHED | CLONE_PARENT_SETTID | CLONE_THREAD))
1836 return ERR_PTR(-EINVAL); 1814 return ERR_PTR(-EINVAL);
1837
1838 /*
1839 * Verify that parent_tidptr is sane so we can potentially
1840 * reuse it later.
1841 */
1842 if (get_user(reserved, parent_tidptr))
1843 return ERR_PTR(-EFAULT);
1844
1845 if (reserved != 0)
1846 return ERR_PTR(-EINVAL);
1847 } 1815 }
1848 1816
1849 /* 1817 /*
@@ -2058,11 +2026,21 @@ static __latent_entropy struct task_struct *copy_process(
2058 * if the fd table isn't shared). 2026 * if the fd table isn't shared).
2059 */ 2027 */
2060 if (clone_flags & CLONE_PIDFD) { 2028 if (clone_flags & CLONE_PIDFD) {
2061 retval = pidfd_create(pid); 2029 retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2062 if (retval < 0) 2030 if (retval < 0)
2063 goto bad_fork_free_pid; 2031 goto bad_fork_free_pid;
2064 2032
2065 pidfd = retval; 2033 pidfd = retval;
2034
2035 pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
2036 O_RDWR | O_CLOEXEC);
2037 if (IS_ERR(pidfile)) {
2038 put_unused_fd(pidfd);
2039 retval = PTR_ERR(pidfile);
2040 goto bad_fork_free_pid;
2041 }
2042 get_pid(pid); /* held by pidfile now */
2043
2066 retval = put_user(pidfd, parent_tidptr); 2044 retval = put_user(pidfd, parent_tidptr);
2067 if (retval) 2045 if (retval)
2068 goto bad_fork_put_pidfd; 2046 goto bad_fork_put_pidfd;
@@ -2180,6 +2158,9 @@ static __latent_entropy struct task_struct *copy_process(
2180 goto bad_fork_cancel_cgroup; 2158 goto bad_fork_cancel_cgroup;
2181 } 2159 }
2182 2160
2161 /* past the last point of failure */
2162 if (pidfile)
2163 fd_install(pidfd, pidfile);
2183 2164
2184 init_task_pid_links(p); 2165 init_task_pid_links(p);
2185 if (likely(p->pid)) { 2166 if (likely(p->pid)) {
@@ -2246,8 +2227,10 @@ bad_fork_cancel_cgroup:
2246bad_fork_cgroup_threadgroup_change_end: 2227bad_fork_cgroup_threadgroup_change_end:
2247 cgroup_threadgroup_change_end(current); 2228 cgroup_threadgroup_change_end(current);
2248bad_fork_put_pidfd: 2229bad_fork_put_pidfd:
2249 if (clone_flags & CLONE_PIDFD) 2230 if (clone_flags & CLONE_PIDFD) {
2250 ksys_close(pidfd); 2231 fput(pidfile);
2232 put_unused_fd(pidfd);
2233 }
2251bad_fork_free_pid: 2234bad_fork_free_pid:
2252 if (pid != &init_struct_pid) 2235 if (pid != &init_struct_pid)
2253 free_pid(pid); 2236 free_pid(pid);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 9505101ed2bc..096211299c07 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -493,6 +493,9 @@ int suspend_devices_and_enter(suspend_state_t state)
493 493
494 pm_suspend_target_state = state; 494 pm_suspend_target_state = state;
495 495
496 if (state == PM_SUSPEND_TO_IDLE)
497 pm_set_suspend_no_platform();
498
496 error = platform_suspend_begin(state); 499 error = platform_suspend_begin(state);
497 if (error) 500 if (error)
498 goto Close; 501 goto Close;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8456b6e2205f..705887f63288 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -79,9 +79,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
79 */ 79 */
80static void ptrace_link(struct task_struct *child, struct task_struct *new_parent) 80static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
81{ 81{
82 rcu_read_lock(); 82 __ptrace_link(child, new_parent, current_cred());
83 __ptrace_link(child, new_parent, __task_cred(new_parent));
84 rcu_read_unlock();
85} 83}
86 84
87/** 85/**
diff --git a/kernel/signal.c b/kernel/signal.c
index d622eac9d169..edf8915ddd54 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2912,7 +2912,8 @@ EXPORT_SYMBOL(set_compat_user_sigmask);
2912 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and 2912 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2913 * epoll_pwait where a new sigmask is passed in from userland for the syscalls. 2913 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2914 */ 2914 */
2915void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved) 2915void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved,
2916 bool interrupted)
2916{ 2917{
2917 2918
2918 if (!usigmask) 2919 if (!usigmask)
@@ -2922,7 +2923,7 @@ void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2922 * Restoring sigmask here can lead to delivering signals that the above 2923 * Restoring sigmask here can lead to delivering signals that the above
2923 * syscalls are intended to block because of the sigmask passed in. 2924 * syscalls are intended to block because of the sigmask passed in.
2924 */ 2925 */
2925 if (signal_pending(current)) { 2926 if (interrupted) {
2926 current->saved_sigmask = *sigsaved; 2927 current->saved_sigmask = *sigsaved;
2927 set_restore_sigmask(); 2928 set_restore_sigmask();
2928 return; 2929 return;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 38277af44f5c..576c41644e77 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -34,7 +34,6 @@
34#include <linux/hash.h> 34#include <linux/hash.h>
35#include <linux/rcupdate.h> 35#include <linux/rcupdate.h>
36#include <linux/kprobes.h> 36#include <linux/kprobes.h>
37#include <linux/memory.h>
38 37
39#include <trace/events/sched.h> 38#include <trace/events/sched.h>
40 39
@@ -2611,12 +2610,10 @@ static void ftrace_run_update_code(int command)
2611{ 2610{
2612 int ret; 2611 int ret;
2613 2612
2614 mutex_lock(&text_mutex);
2615
2616 ret = ftrace_arch_code_modify_prepare(); 2613 ret = ftrace_arch_code_modify_prepare();
2617 FTRACE_WARN_ON(ret); 2614 FTRACE_WARN_ON(ret);
2618 if (ret) 2615 if (ret)
2619 goto out_unlock; 2616 return;
2620 2617
2621 /* 2618 /*
2622 * By default we use stop_machine() to modify the code. 2619 * By default we use stop_machine() to modify the code.
@@ -2628,9 +2625,6 @@ static void ftrace_run_update_code(int command)
2628 2625
2629 ret = ftrace_arch_code_modify_post_process(); 2626 ret = ftrace_arch_code_modify_post_process();
2630 FTRACE_WARN_ON(ret); 2627 FTRACE_WARN_ON(ret);
2631
2632out_unlock:
2633 mutex_unlock(&text_mutex);
2634} 2628}
2635 2629
2636static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2630static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
@@ -5784,7 +5778,6 @@ void ftrace_module_enable(struct module *mod)
5784 struct ftrace_page *pg; 5778 struct ftrace_page *pg;
5785 5779
5786 mutex_lock(&ftrace_lock); 5780 mutex_lock(&ftrace_lock);
5787 mutex_lock(&text_mutex);
5788 5781
5789 if (ftrace_disabled) 5782 if (ftrace_disabled)
5790 goto out_unlock; 5783 goto out_unlock;
@@ -5846,7 +5839,6 @@ void ftrace_module_enable(struct module *mod)
5846 ftrace_arch_code_modify_post_process(); 5839 ftrace_arch_code_modify_post_process();
5847 5840
5848 out_unlock: 5841 out_unlock:
5849 mutex_unlock(&text_mutex);
5850 mutex_unlock(&ftrace_lock); 5842 mutex_unlock(&ftrace_lock);
5851 5843
5852 process_cached_mods(mod->name); 5844 process_cached_mods(mod->name);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 83e08b78dbee..c3aabb576fe5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6719,11 +6719,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6719 break; 6719 break;
6720 } 6720 }
6721#endif 6721#endif
6722 if (!tr->allocated_snapshot) { 6722 if (tr->allocated_snapshot)
6723 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6724 &tr->trace_buffer, iter->cpu_file);
6725 else
6723 ret = tracing_alloc_snapshot_instance(tr); 6726 ret = tracing_alloc_snapshot_instance(tr);
6724 if (ret < 0) 6727 if (ret < 0)
6725 break; 6728 break;
6726 }
6727 local_irq_disable(); 6729 local_irq_disable();
6728 /* Now, we're going to swap */ 6730 /* Now, we're going to swap */
6729 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 6731 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
@@ -7126,12 +7128,24 @@ static ssize_t tracing_err_log_write(struct file *file,
7126 return count; 7128 return count;
7127} 7129}
7128 7130
7131static int tracing_err_log_release(struct inode *inode, struct file *file)
7132{
7133 struct trace_array *tr = inode->i_private;
7134
7135 trace_array_put(tr);
7136
7137 if (file->f_mode & FMODE_READ)
7138 seq_release(inode, file);
7139
7140 return 0;
7141}
7142
7129static const struct file_operations tracing_err_log_fops = { 7143static const struct file_operations tracing_err_log_fops = {
7130 .open = tracing_err_log_open, 7144 .open = tracing_err_log_open,
7131 .write = tracing_err_log_write, 7145 .write = tracing_err_log_write,
7132 .read = seq_read, 7146 .read = seq_read,
7133 .llseek = seq_lseek, 7147 .llseek = seq_lseek,
7134 .release = tracing_release_generic_tr, 7148 .release = tracing_err_log_release,
7135}; 7149};
7136 7150
7137static int tracing_buffers_open(struct inode *inode, struct file *filp) 7151static int tracing_buffers_open(struct inode *inode, struct file *filp)
diff --git a/lib/devres.c b/lib/devres.c
index 69bed2f38306..6a0e9bd6524a 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap);
131 * if (IS_ERR(base)) 131 * if (IS_ERR(base))
132 * return PTR_ERR(base); 132 * return PTR_ERR(base);
133 */ 133 */
134void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) 134void __iomem *devm_ioremap_resource(struct device *dev,
135 const struct resource *res)
135{ 136{
136 resource_size_t size; 137 resource_size_t size;
137 void __iomem *dest_ptr; 138 void __iomem *dest_ptr;
diff --git a/lib/idr.c b/lib/idr.c
index c34e256d2f01..66a374892482 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -228,11 +228,21 @@ void *idr_get_next(struct idr *idr, int *nextid)
228{ 228{
229 struct radix_tree_iter iter; 229 struct radix_tree_iter iter;
230 void __rcu **slot; 230 void __rcu **slot;
231 void *entry = NULL;
231 unsigned long base = idr->idr_base; 232 unsigned long base = idr->idr_base;
232 unsigned long id = *nextid; 233 unsigned long id = *nextid;
233 234
234 id = (id < base) ? 0 : id - base; 235 id = (id < base) ? 0 : id - base;
235 slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); 236 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
237 entry = rcu_dereference_raw(*slot);
238 if (!entry)
239 continue;
240 if (!xa_is_internal(entry))
241 break;
242 if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
243 break;
244 slot = radix_tree_iter_retry(&iter);
245 }
236 if (!slot) 246 if (!slot)
237 return NULL; 247 return NULL;
238 id = iter.index + base; 248 id = iter.index + base;
@@ -241,7 +251,7 @@ void *idr_get_next(struct idr *idr, int *nextid)
241 return NULL; 251 return NULL;
242 252
243 *nextid = id; 253 *nextid = id;
244 return rcu_dereference_raw(*slot); 254 return entry;
245} 255}
246EXPORT_SYMBOL(idr_get_next); 256EXPORT_SYMBOL(idr_get_next);
247 257
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 82b19e4f1189..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -24,6 +24,7 @@
24int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) 24int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
25{ 25{
26 mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; 26 mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
27 struct karatsuba_ctx karactx = {};
27 mpi_ptr_t xp_marker = NULL; 28 mpi_ptr_t xp_marker = NULL;
28 mpi_ptr_t tspace = NULL; 29 mpi_ptr_t tspace = NULL;
29 mpi_ptr_t rp, ep, mp, bp; 30 mpi_ptr_t rp, ep, mp, bp;
@@ -150,13 +151,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
150 int c; 151 int c;
151 mpi_limb_t e; 152 mpi_limb_t e;
152 mpi_limb_t carry_limb; 153 mpi_limb_t carry_limb;
153 struct karatsuba_ctx karactx;
154 154
155 xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); 155 xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
156 if (!xp) 156 if (!xp)
157 goto enomem; 157 goto enomem;
158 158
159 memset(&karactx, 0, sizeof karactx);
160 negative_result = (ep[0] & 1) && base->sign; 159 negative_result = (ep[0] & 1) && base->sign;
161 160
162 i = esize - 1; 161 i = esize - 1;
@@ -281,8 +280,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
281 if (mod_shift_cnt) 280 if (mod_shift_cnt)
282 mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); 281 mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
283 MPN_NORMALIZE(rp, rsize); 282 MPN_NORMALIZE(rp, rsize);
284
285 mpihelp_release_karatsuba_ctx(&karactx);
286 } 283 }
287 284
288 if (negative_result && rsize) { 285 if (negative_result && rsize) {
@@ -299,6 +296,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
299leave: 296leave:
300 rc = 0; 297 rc = 0;
301enomem: 298enomem:
299 mpihelp_release_karatsuba_ctx(&karactx);
302 if (assign_rp) 300 if (assign_rp)
303 mpi_assign_limb_space(res, rp, size); 301 mpi_assign_limb_space(res, rp, size);
304 if (mp_marker) 302 if (mp_marker)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 5d4bad8bd96a..9d631a7b6a70 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -38,6 +38,12 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
38 return xa_store(xa, index, xa_mk_index(index), gfp); 38 return xa_store(xa, index, xa_mk_index(index), gfp);
39} 39}
40 40
41static void xa_insert_index(struct xarray *xa, unsigned long index)
42{
43 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
44 GFP_KERNEL) != 0);
45}
46
41static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 47static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
42{ 48{
43 u32 id; 49 u32 id;
@@ -338,6 +344,37 @@ static noinline void check_xa_shrink(struct xarray *xa)
338 } 344 }
339} 345}
340 346
347static noinline void check_insert(struct xarray *xa)
348{
349 unsigned long i;
350
351 for (i = 0; i < 1024; i++) {
352 xa_insert_index(xa, i);
353 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
354 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
355 xa_erase_index(xa, i);
356 }
357
358 for (i = 10; i < BITS_PER_LONG; i++) {
359 xa_insert_index(xa, 1UL << i);
360 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
361 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
362 xa_erase_index(xa, 1UL << i);
363
364 xa_insert_index(xa, (1UL << i) - 1);
365 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
366 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
367 xa_erase_index(xa, (1UL << i) - 1);
368 }
369
370 xa_insert_index(xa, ~0UL);
371 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
372 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
373 xa_erase_index(xa, ~0UL);
374
375 XA_BUG_ON(xa, !xa_empty(xa));
376}
377
341static noinline void check_cmpxchg(struct xarray *xa) 378static noinline void check_cmpxchg(struct xarray *xa)
342{ 379{
343 void *FIVE = xa_mk_value(5); 380 void *FIVE = xa_mk_value(5);
@@ -1527,6 +1564,7 @@ static int xarray_checks(void)
1527 check_xa_mark(&array); 1564 check_xa_mark(&array);
1528 check_xa_shrink(&array); 1565 check_xa_shrink(&array);
1529 check_xas_erase(&array); 1566 check_xas_erase(&array);
1567 check_insert(&array);
1530 check_cmpxchg(&array); 1568 check_cmpxchg(&array);
1531 check_reserve(&array); 1569 check_reserve(&array);
1532 check_reserve(&xa0); 1570 check_reserve(&xa0);
diff --git a/lib/xarray.c b/lib/xarray.c
index 6be3acbb861f..446b956c9188 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -298,6 +298,8 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
298 xas_destroy(xas); 298 xas_destroy(xas);
299 return false; 299 return false;
300 } 300 }
301 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
302 gfp |= __GFP_ACCOUNT;
301 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 303 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
302 if (!xas->xa_alloc) 304 if (!xas->xa_alloc)
303 return false; 305 return false;
@@ -325,6 +327,8 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
325 xas_destroy(xas); 327 xas_destroy(xas);
326 return false; 328 return false;
327 } 329 }
330 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
331 gfp |= __GFP_ACCOUNT;
328 if (gfpflags_allow_blocking(gfp)) { 332 if (gfpflags_allow_blocking(gfp)) {
329 xas_unlock_type(xas, lock_type); 333 xas_unlock_type(xas, lock_type);
330 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 334 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -358,8 +362,12 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
358 if (node) { 362 if (node) {
359 xas->xa_alloc = NULL; 363 xas->xa_alloc = NULL;
360 } else { 364 } else {
361 node = kmem_cache_alloc(radix_tree_node_cachep, 365 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
362 GFP_NOWAIT | __GFP_NOWARN); 366
367 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
368 gfp |= __GFP_ACCOUNT;
369
370 node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
363 if (!node) { 371 if (!node) {
364 xas_set_err(xas, -ENOMEM); 372 xas_set_err(xas, -ENOMEM);
365 return NULL; 373 return NULL;
diff --git a/mm/filemap.c b/mm/filemap.c
index df2006ba0cfa..6dd9a2274c80 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -281,11 +281,11 @@ EXPORT_SYMBOL(delete_from_page_cache);
281 * @pvec: pagevec with pages to delete 281 * @pvec: pagevec with pages to delete
282 * 282 *
283 * The function walks over mapping->i_pages and removes pages passed in @pvec 283 * The function walks over mapping->i_pages and removes pages passed in @pvec
284 * from the mapping. The function expects @pvec to be sorted by page index 284 * from the mapping. The function expects @pvec to be sorted by page index.
285 * and is optimised for it to be dense.
286 * It tolerates holes in @pvec (mapping entries at those indices are not 285 * It tolerates holes in @pvec (mapping entries at those indices are not
287 * modified). The function expects only THP head pages to be present in the 286 * modified). The function expects only THP head pages to be present in the
288 * @pvec. 287 * @pvec and takes care to delete all corresponding tail pages from the
288 * mapping as well.
289 * 289 *
290 * The function expects the i_pages lock to be held. 290 * The function expects the i_pages lock to be held.
291 */ 291 */
@@ -294,44 +294,40 @@ static void page_cache_delete_batch(struct address_space *mapping,
294{ 294{
295 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); 295 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
296 int total_pages = 0; 296 int total_pages = 0;
297 int i = 0; 297 int i = 0, tail_pages = 0;
298 struct page *page; 298 struct page *page;
299 299
300 mapping_set_update(&xas, mapping); 300 mapping_set_update(&xas, mapping);
301 xas_for_each(&xas, page, ULONG_MAX) { 301 xas_for_each(&xas, page, ULONG_MAX) {
302 if (i >= pagevec_count(pvec)) 302 if (i >= pagevec_count(pvec) && !tail_pages)
303 break; 303 break;
304
305 /* A swap/dax/shadow entry got inserted? Skip it. */
306 if (xa_is_value(page)) 304 if (xa_is_value(page))
307 continue; 305 continue;
308 /* 306 if (!tail_pages) {
309 * A page got inserted in our range? Skip it. We have our 307 /*
310 * pages locked so they are protected from being removed. 308 * Some page got inserted in our range? Skip it. We
311 * If we see a page whose index is higher than ours, it 309 * have our pages locked so they are protected from
312 * means our page has been removed, which shouldn't be 310 * being removed.
313 * possible because we're holding the PageLock. 311 */
314 */ 312 if (page != pvec->pages[i]) {
315 if (page != pvec->pages[i]) { 313 VM_BUG_ON_PAGE(page->index >
316 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, 314 pvec->pages[i]->index, page);
317 page); 315 continue;
318 continue; 316 }
319 } 317 WARN_ON_ONCE(!PageLocked(page));
320 318 if (PageTransHuge(page) && !PageHuge(page))
321 WARN_ON_ONCE(!PageLocked(page)); 319 tail_pages = HPAGE_PMD_NR - 1;
322
323 if (page->index == xas.xa_index)
324 page->mapping = NULL; 320 page->mapping = NULL;
325 /* Leave page->index set: truncation lookup relies on it */ 321 /*
326 322 * Leave page->index set: truncation lookup relies
327 /* 323 * upon it
328 * Move to the next page in the vector if this is a regular 324 */
329 * page or the index is of the last sub-page of this compound
330 * page.
331 */
332 if (page->index + (1UL << compound_order(page)) - 1 ==
333 xas.xa_index)
334 i++; 325 i++;
326 } else {
327 VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages
328 != pvec->pages[i]->index, page);
329 tail_pages--;
330 }
335 xas_store(&xas, NULL); 331 xas_store(&xas, NULL);
336 total_pages++; 332 total_pages++;
337 } 333 }
@@ -1498,7 +1494,7 @@ EXPORT_SYMBOL(page_cache_prev_miss);
1498struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) 1494struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1499{ 1495{
1500 XA_STATE(xas, &mapping->i_pages, offset); 1496 XA_STATE(xas, &mapping->i_pages, offset);
1501 struct page *page; 1497 struct page *head, *page;
1502 1498
1503 rcu_read_lock(); 1499 rcu_read_lock();
1504repeat: 1500repeat:
@@ -1513,19 +1509,25 @@ repeat:
1513 if (!page || xa_is_value(page)) 1509 if (!page || xa_is_value(page))
1514 goto out; 1510 goto out;
1515 1511
1516 if (!page_cache_get_speculative(page)) 1512 head = compound_head(page);
1513 if (!page_cache_get_speculative(head))
1514 goto repeat;
1515
1516 /* The page was split under us? */
1517 if (compound_head(page) != head) {
1518 put_page(head);
1517 goto repeat; 1519 goto repeat;
1520 }
1518 1521
1519 /* 1522 /*
1520 * Has the page moved or been split? 1523 * Has the page moved?
1521 * This is part of the lockless pagecache protocol. See 1524 * This is part of the lockless pagecache protocol. See
1522 * include/linux/pagemap.h for details. 1525 * include/linux/pagemap.h for details.
1523 */ 1526 */
1524 if (unlikely(page != xas_reload(&xas))) { 1527 if (unlikely(page != xas_reload(&xas))) {
1525 put_page(page); 1528 put_page(head);
1526 goto repeat; 1529 goto repeat;
1527 } 1530 }
1528 page = find_subpage(page, offset);
1529out: 1531out:
1530 rcu_read_unlock(); 1532 rcu_read_unlock();
1531 1533
@@ -1707,6 +1709,7 @@ unsigned find_get_entries(struct address_space *mapping,
1707 1709
1708 rcu_read_lock(); 1710 rcu_read_lock();
1709 xas_for_each(&xas, page, ULONG_MAX) { 1711 xas_for_each(&xas, page, ULONG_MAX) {
1712 struct page *head;
1710 if (xas_retry(&xas, page)) 1713 if (xas_retry(&xas, page))
1711 continue; 1714 continue;
1712 /* 1715 /*
@@ -1717,13 +1720,17 @@ unsigned find_get_entries(struct address_space *mapping,
1717 if (xa_is_value(page)) 1720 if (xa_is_value(page))
1718 goto export; 1721 goto export;
1719 1722
1720 if (!page_cache_get_speculative(page)) 1723 head = compound_head(page);
1724 if (!page_cache_get_speculative(head))
1721 goto retry; 1725 goto retry;
1722 1726
1723 /* Has the page moved or been split? */ 1727 /* The page was split under us? */
1728 if (compound_head(page) != head)
1729 goto put_page;
1730
1731 /* Has the page moved? */
1724 if (unlikely(page != xas_reload(&xas))) 1732 if (unlikely(page != xas_reload(&xas)))
1725 goto put_page; 1733 goto put_page;
1726 page = find_subpage(page, xas.xa_index);
1727 1734
1728export: 1735export:
1729 indices[ret] = xas.xa_index; 1736 indices[ret] = xas.xa_index;
@@ -1732,7 +1739,7 @@ export:
1732 break; 1739 break;
1733 continue; 1740 continue;
1734put_page: 1741put_page:
1735 put_page(page); 1742 put_page(head);
1736retry: 1743retry:
1737 xas_reset(&xas); 1744 xas_reset(&xas);
1738 } 1745 }
@@ -1774,27 +1781,33 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1774 1781
1775 rcu_read_lock(); 1782 rcu_read_lock();
1776 xas_for_each(&xas, page, end) { 1783 xas_for_each(&xas, page, end) {
1784 struct page *head;
1777 if (xas_retry(&xas, page)) 1785 if (xas_retry(&xas, page))
1778 continue; 1786 continue;
1779 /* Skip over shadow, swap and DAX entries */ 1787 /* Skip over shadow, swap and DAX entries */
1780 if (xa_is_value(page)) 1788 if (xa_is_value(page))
1781 continue; 1789 continue;
1782 1790
1783 if (!page_cache_get_speculative(page)) 1791 head = compound_head(page);
1792 if (!page_cache_get_speculative(head))
1784 goto retry; 1793 goto retry;
1785 1794
1786 /* Has the page moved or been split? */ 1795 /* The page was split under us? */
1796 if (compound_head(page) != head)
1797 goto put_page;
1798
1799 /* Has the page moved? */
1787 if (unlikely(page != xas_reload(&xas))) 1800 if (unlikely(page != xas_reload(&xas)))
1788 goto put_page; 1801 goto put_page;
1789 1802
1790 pages[ret] = find_subpage(page, xas.xa_index); 1803 pages[ret] = page;
1791 if (++ret == nr_pages) { 1804 if (++ret == nr_pages) {
1792 *start = xas.xa_index + 1; 1805 *start = xas.xa_index + 1;
1793 goto out; 1806 goto out;
1794 } 1807 }
1795 continue; 1808 continue;
1796put_page: 1809put_page:
1797 put_page(page); 1810 put_page(head);
1798retry: 1811retry:
1799 xas_reset(&xas); 1812 xas_reset(&xas);
1800 } 1813 }
@@ -1839,6 +1852,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1839 1852
1840 rcu_read_lock(); 1853 rcu_read_lock();
1841 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1854 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1855 struct page *head;
1842 if (xas_retry(&xas, page)) 1856 if (xas_retry(&xas, page))
1843 continue; 1857 continue;
1844 /* 1858 /*
@@ -1848,19 +1862,24 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1848 if (xa_is_value(page)) 1862 if (xa_is_value(page))
1849 break; 1863 break;
1850 1864
1851 if (!page_cache_get_speculative(page)) 1865 head = compound_head(page);
1866 if (!page_cache_get_speculative(head))
1852 goto retry; 1867 goto retry;
1853 1868
1854 /* Has the page moved or been split? */ 1869 /* The page was split under us? */
1870 if (compound_head(page) != head)
1871 goto put_page;
1872
1873 /* Has the page moved? */
1855 if (unlikely(page != xas_reload(&xas))) 1874 if (unlikely(page != xas_reload(&xas)))
1856 goto put_page; 1875 goto put_page;
1857 1876
1858 pages[ret] = find_subpage(page, xas.xa_index); 1877 pages[ret] = page;
1859 if (++ret == nr_pages) 1878 if (++ret == nr_pages)
1860 break; 1879 break;
1861 continue; 1880 continue;
1862put_page: 1881put_page:
1863 put_page(page); 1882 put_page(head);
1864retry: 1883retry:
1865 xas_reset(&xas); 1884 xas_reset(&xas);
1866 } 1885 }
@@ -1896,6 +1915,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1896 1915
1897 rcu_read_lock(); 1916 rcu_read_lock();
1898 xas_for_each_marked(&xas, page, end, tag) { 1917 xas_for_each_marked(&xas, page, end, tag) {
1918 struct page *head;
1899 if (xas_retry(&xas, page)) 1919 if (xas_retry(&xas, page))
1900 continue; 1920 continue;
1901 /* 1921 /*
@@ -1906,21 +1926,26 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1906 if (xa_is_value(page)) 1926 if (xa_is_value(page))
1907 continue; 1927 continue;
1908 1928
1909 if (!page_cache_get_speculative(page)) 1929 head = compound_head(page);
1930 if (!page_cache_get_speculative(head))
1910 goto retry; 1931 goto retry;
1911 1932
1912 /* Has the page moved or been split? */ 1933 /* The page was split under us? */
1934 if (compound_head(page) != head)
1935 goto put_page;
1936
1937 /* Has the page moved? */
1913 if (unlikely(page != xas_reload(&xas))) 1938 if (unlikely(page != xas_reload(&xas)))
1914 goto put_page; 1939 goto put_page;
1915 1940
1916 pages[ret] = find_subpage(page, xas.xa_index); 1941 pages[ret] = page;
1917 if (++ret == nr_pages) { 1942 if (++ret == nr_pages) {
1918 *index = xas.xa_index + 1; 1943 *index = xas.xa_index + 1;
1919 goto out; 1944 goto out;
1920 } 1945 }
1921 continue; 1946 continue;
1922put_page: 1947put_page:
1923 put_page(page); 1948 put_page(head);
1924retry: 1949retry:
1925 xas_reset(&xas); 1950 xas_reset(&xas);
1926 } 1951 }
@@ -2603,7 +2628,7 @@ void filemap_map_pages(struct vm_fault *vmf,
2603 pgoff_t last_pgoff = start_pgoff; 2628 pgoff_t last_pgoff = start_pgoff;
2604 unsigned long max_idx; 2629 unsigned long max_idx;
2605 XA_STATE(xas, &mapping->i_pages, start_pgoff); 2630 XA_STATE(xas, &mapping->i_pages, start_pgoff);
2606 struct page *page; 2631 struct page *head, *page;
2607 2632
2608 rcu_read_lock(); 2633 rcu_read_lock();
2609 xas_for_each(&xas, page, end_pgoff) { 2634 xas_for_each(&xas, page, end_pgoff) {
@@ -2612,19 +2637,24 @@ void filemap_map_pages(struct vm_fault *vmf,
2612 if (xa_is_value(page)) 2637 if (xa_is_value(page))
2613 goto next; 2638 goto next;
2614 2639
2640 head = compound_head(page);
2641
2615 /* 2642 /*
2616 * Check for a locked page first, as a speculative 2643 * Check for a locked page first, as a speculative
2617 * reference may adversely influence page migration. 2644 * reference may adversely influence page migration.
2618 */ 2645 */
2619 if (PageLocked(page)) 2646 if (PageLocked(head))
2620 goto next; 2647 goto next;
2621 if (!page_cache_get_speculative(page)) 2648 if (!page_cache_get_speculative(head))
2622 goto next; 2649 goto next;
2623 2650
2624 /* Has the page moved or been split? */ 2651 /* The page was split under us? */
2652 if (compound_head(page) != head)
2653 goto skip;
2654
2655 /* Has the page moved? */
2625 if (unlikely(page != xas_reload(&xas))) 2656 if (unlikely(page != xas_reload(&xas)))
2626 goto skip; 2657 goto skip;
2627 page = find_subpage(page, xas.xa_index);
2628 2658
2629 if (!PageUptodate(page) || 2659 if (!PageUptodate(page) ||
2630 PageReadahead(page) || 2660 PageReadahead(page) ||
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bb8b617e34ed..885642c82aaa 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2496,9 +2496,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2496 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2496 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2497 shmem_uncharge(head->mapping->host, 1); 2497 shmem_uncharge(head->mapping->host, 1);
2498 put_page(head + i); 2498 put_page(head + i);
2499 } else if (!PageAnon(page)) {
2500 __xa_store(&head->mapping->i_pages, head[i].index,
2501 head + i, 0);
2502 } 2499 }
2503 } 2500 }
2504 2501
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ac843d32b019..ede7e7f5d1ab 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1510,16 +1510,29 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1510 1510
1511/* 1511/*
1512 * Dissolve a given free hugepage into free buddy pages. This function does 1512 * Dissolve a given free hugepage into free buddy pages. This function does
1513 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the 1513 * nothing for in-use hugepages and non-hugepages.
1514 * dissolution fails because a give page is not a free hugepage, or because 1514 * This function returns values like below:
1515 * free hugepages are fully reserved. 1515 *
1516 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1517 * (allocated or reserved.)
1518 * 0: successfully dissolved free hugepages or the page is not a
1519 * hugepage (considered as already dissolved)
1516 */ 1520 */
1517int dissolve_free_huge_page(struct page *page) 1521int dissolve_free_huge_page(struct page *page)
1518{ 1522{
1519 int rc = -EBUSY; 1523 int rc = -EBUSY;
1520 1524
1525 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1526 if (!PageHuge(page))
1527 return 0;
1528
1521 spin_lock(&hugetlb_lock); 1529 spin_lock(&hugetlb_lock);
1522 if (PageHuge(page) && !page_count(page)) { 1530 if (!PageHuge(page)) {
1531 rc = 0;
1532 goto out;
1533 }
1534
1535 if (!page_count(page)) {
1523 struct page *head = compound_head(page); 1536 struct page *head = compound_head(page);
1524 struct hstate *h = page_hstate(head); 1537 struct hstate *h = page_hstate(head);
1525 int nid = page_to_nid(head); 1538 int nid = page_to_nid(head);
@@ -1564,11 +1577,9 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1564 1577
1565 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { 1578 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1566 page = pfn_to_page(pfn); 1579 page = pfn_to_page(pfn);
1567 if (PageHuge(page) && !page_count(page)) { 1580 rc = dissolve_free_huge_page(page);
1568 rc = dissolve_free_huge_page(page); 1581 if (rc)
1569 if (rc) 1582 break;
1570 break;
1571 }
1572 } 1583 }
1573 1584
1574 return rc; 1585 return rc;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0f7419938008..eaaa21b23215 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1378,7 +1378,7 @@ static void collapse_shmem(struct mm_struct *mm,
1378 result = SCAN_FAIL; 1378 result = SCAN_FAIL;
1379 goto xa_locked; 1379 goto xa_locked;
1380 } 1380 }
1381 xas_store(&xas, new_page); 1381 xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
1382 nr_none++; 1382 nr_none++;
1383 continue; 1383 continue;
1384 } 1384 }
@@ -1454,7 +1454,7 @@ static void collapse_shmem(struct mm_struct *mm,
1454 list_add_tail(&page->lru, &pagelist); 1454 list_add_tail(&page->lru, &pagelist);
1455 1455
1456 /* Finally, replace with the new page. */ 1456 /* Finally, replace with the new page. */
1457 xas_store(&xas, new_page); 1457 xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
1458 continue; 1458 continue;
1459out_unlock: 1459out_unlock:
1460 unlock_page(page); 1460 unlock_page(page);
diff --git a/mm/memfd.c b/mm/memfd.c
index 2647c898990c..650e65a46b9c 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -39,7 +39,6 @@ static void memfd_tag_pins(struct xa_state *xas)
39 xas_for_each(xas, page, ULONG_MAX) { 39 xas_for_each(xas, page, ULONG_MAX) {
40 if (xa_is_value(page)) 40 if (xa_is_value(page))
41 continue; 41 continue;
42 page = find_subpage(page, xas->xa_index);
43 if (page_count(page) - page_mapcount(page) > 1) 42 if (page_count(page) - page_mapcount(page) > 1)
44 xas_set_mark(xas, MEMFD_TAG_PINNED); 43 xas_set_mark(xas, MEMFD_TAG_PINNED);
45 44
@@ -89,7 +88,6 @@ static int memfd_wait_for_pins(struct address_space *mapping)
89 bool clear = true; 88 bool clear = true;
90 if (xa_is_value(page)) 89 if (xa_is_value(page))
91 continue; 90 continue;
92 page = find_subpage(page, xas.xa_index);
93 if (page_count(page) - page_mapcount(page) != 1) { 91 if (page_count(page) - page_mapcount(page) != 1) {
94 /* 92 /*
95 * On the last scan, we clean up all those tags 93 * On the last scan, we clean up all those tags
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 8da0334b9ca0..d9cc6606f409 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1730,6 +1730,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
1730 if (!ret) { 1730 if (!ret) {
1731 if (set_hwpoison_free_buddy_page(page)) 1731 if (set_hwpoison_free_buddy_page(page))
1732 num_poisoned_pages_inc(); 1732 num_poisoned_pages_inc();
1733 else
1734 ret = -EBUSY;
1733 } 1735 }
1734 } 1736 }
1735 return ret; 1737 return ret;
@@ -1854,11 +1856,8 @@ static int soft_offline_in_use_page(struct page *page, int flags)
1854 1856
1855static int soft_offline_free_page(struct page *page) 1857static int soft_offline_free_page(struct page *page)
1856{ 1858{
1857 int rc = 0; 1859 int rc = dissolve_free_huge_page(page);
1858 struct page *head = compound_head(page);
1859 1860
1860 if (PageHuge(head))
1861 rc = dissolve_free_huge_page(page);
1862 if (!rc) { 1861 if (!rc) {
1863 if (set_hwpoison_free_buddy_page(page)) 1862 if (set_hwpoison_free_buddy_page(page))
1864 num_poisoned_pages_inc(); 1863 num_poisoned_pages_inc();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 01600d80ae01..fdcb73536319 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -306,7 +306,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
306 else { 306 else {
307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, 307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 *nodes); 308 *nodes);
309 pol->w.cpuset_mems_allowed = tmp; 309 pol->w.cpuset_mems_allowed = *nodes;
310 } 310 }
311 311
312 if (nodes_empty(tmp)) 312 if (nodes_empty(tmp))
diff --git a/mm/migrate.c b/mm/migrate.c
index f2ecc2855a12..e9594bc0d406 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -463,7 +463,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
463 463
464 for (i = 1; i < HPAGE_PMD_NR; i++) { 464 for (i = 1; i < HPAGE_PMD_NR; i++) {
465 xas_next(&xas); 465 xas_next(&xas);
466 xas_store(&xas, newpage); 466 xas_store(&xas, newpage + i);
467 } 467 }
468 } 468 }
469 469
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5a58778c91d4..f719b64741d6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -987,8 +987,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
987/* 987/*
988 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 988 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
989 */ 989 */
990static void check_panic_on_oom(struct oom_control *oc, 990static void check_panic_on_oom(struct oom_control *oc)
991 enum oom_constraint constraint)
992{ 991{
993 if (likely(!sysctl_panic_on_oom)) 992 if (likely(!sysctl_panic_on_oom))
994 return; 993 return;
@@ -998,7 +997,7 @@ static void check_panic_on_oom(struct oom_control *oc,
998 * does not panic for cpuset, mempolicy, or memcg allocation 997 * does not panic for cpuset, mempolicy, or memcg allocation
999 * failures. 998 * failures.
1000 */ 999 */
1001 if (constraint != CONSTRAINT_NONE) 1000 if (oc->constraint != CONSTRAINT_NONE)
1002 return; 1001 return;
1003 } 1002 }
1004 /* Do not panic for oom kills triggered by sysrq */ 1003 /* Do not panic for oom kills triggered by sysrq */
@@ -1035,7 +1034,6 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1035bool out_of_memory(struct oom_control *oc) 1034bool out_of_memory(struct oom_control *oc)
1036{ 1035{
1037 unsigned long freed = 0; 1036 unsigned long freed = 0;
1038 enum oom_constraint constraint = CONSTRAINT_NONE;
1039 1037
1040 if (oom_killer_disabled) 1038 if (oom_killer_disabled)
1041 return false; 1039 return false;
@@ -1071,10 +1069,10 @@ bool out_of_memory(struct oom_control *oc)
1071 * Check if there were limitations on the allocation (only relevant for 1069 * Check if there were limitations on the allocation (only relevant for
1072 * NUMA and memcg) that may require different handling. 1070 * NUMA and memcg) that may require different handling.
1073 */ 1071 */
1074 constraint = constrained_alloc(oc); 1072 oc->constraint = constrained_alloc(oc);
1075 if (constraint != CONSTRAINT_MEMORY_POLICY) 1073 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1076 oc->nodemask = NULL; 1074 oc->nodemask = NULL;
1077 check_panic_on_oom(oc, constraint); 1075 check_panic_on_oom(oc);
1078 1076
1079 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && 1077 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1080 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && 1078 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d66bc8abe0af..8e3bc949ebcc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1826,7 +1826,8 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
1826 first_deferred_pfn)) { 1826 first_deferred_pfn)) {
1827 pgdat->first_deferred_pfn = ULONG_MAX; 1827 pgdat->first_deferred_pfn = ULONG_MAX;
1828 pgdat_resize_unlock(pgdat, &flags); 1828 pgdat_resize_unlock(pgdat, &flags);
1829 return true; 1829 /* Retry only once. */
1830 return first_deferred_pfn != ULONG_MAX;
1830 } 1831 }
1831 1832
1832 /* 1833 /*
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 0b39ec0c945c..295512465065 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
136 136
137 end_pfn = pfn + count * BITS_PER_BYTE; 137 end_pfn = pfn + count * BITS_PER_BYTE;
138 if (end_pfn > max_pfn) 138 if (end_pfn > max_pfn)
139 end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); 139 end_pfn = max_pfn;
140 140
141 for (; pfn < end_pfn; pfn++) { 141 for (; pfn < end_pfn; pfn++) {
142 bit = pfn % BITMAP_CHUNK_BITS; 142 bit = pfn % BITMAP_CHUNK_BITS;
@@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
181 181
182 end_pfn = pfn + count * BITS_PER_BYTE; 182 end_pfn = pfn + count * BITS_PER_BYTE;
183 if (end_pfn > max_pfn) 183 if (end_pfn > max_pfn)
184 end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); 184 end_pfn = max_pfn;
185 185
186 for (; pfn < end_pfn; pfn++) { 186 for (; pfn < end_pfn; pfn++) {
187 bit = pfn % BITMAP_CHUNK_BITS; 187 bit = pfn % BITMAP_CHUNK_BITS;
diff --git a/mm/page_io.c b/mm/page_io.c
index 2e8019d0e048..a39aac2f8c8d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -29,10 +29,9 @@
29static struct bio *get_swap_bio(gfp_t gfp_flags, 29static struct bio *get_swap_bio(gfp_t gfp_flags,
30 struct page *page, bio_end_io_t end_io) 30 struct page *page, bio_end_io_t end_io)
31{ 31{
32 int i, nr = hpage_nr_pages(page);
33 struct bio *bio; 32 struct bio *bio;
34 33
35 bio = bio_alloc(gfp_flags, nr); 34 bio = bio_alloc(gfp_flags, 1);
36 if (bio) { 35 if (bio) {
37 struct block_device *bdev; 36 struct block_device *bdev;
38 37
@@ -41,9 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
41 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; 40 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
42 bio->bi_end_io = end_io; 41 bio->bi_end_io = end_io;
43 42
44 for (i = 0; i < nr; i++) 43 bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
45 bio_add_page(bio, page + i, PAGE_SIZE, 0);
46 VM_BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE * nr);
47 } 44 }
48 return bio; 45 return bio;
49} 46}
@@ -140,8 +137,10 @@ out:
140 unlock_page(page); 137 unlock_page(page);
141 WRITE_ONCE(bio->bi_private, NULL); 138 WRITE_ONCE(bio->bi_private, NULL);
142 bio_put(bio); 139 bio_put(bio);
143 blk_wake_io_task(waiter); 140 if (waiter) {
144 put_task_struct(waiter); 141 blk_wake_io_task(waiter);
142 put_task_struct(waiter);
143 }
145} 144}
146 145
147int generic_swapfile_activate(struct swap_info_struct *sis, 146int generic_swapfile_activate(struct swap_info_struct *sis,
@@ -398,11 +397,12 @@ int swap_readpage(struct page *page, bool synchronous)
398 * Keep this task valid during swap readpage because the oom killer may 397 * Keep this task valid during swap readpage because the oom killer may
399 * attempt to access it in the page fault retry time check. 398 * attempt to access it in the page fault retry time check.
400 */ 399 */
401 get_task_struct(current);
402 bio->bi_private = current;
403 bio_set_op_attrs(bio, REQ_OP_READ, 0); 400 bio_set_op_attrs(bio, REQ_OP_READ, 0);
404 if (synchronous) 401 if (synchronous) {
405 bio->bi_opf |= REQ_HIPRI; 402 bio->bi_opf |= REQ_HIPRI;
403 get_task_struct(current);
404 bio->bi_private = current;
405 }
406 count_vm_event(PSWPIN); 406 count_vm_event(PSWPIN);
407 bio_get(bio); 407 bio_get(bio);
408 qc = submit_bio(bio); 408 qc = submit_bio(bio);
diff --git a/mm/shmem.c b/mm/shmem.c
index 1bb3b8dc8bb2..f4dce9c8670d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -614,7 +614,7 @@ static int shmem_add_to_page_cache(struct page *page,
614 if (xas_error(&xas)) 614 if (xas_error(&xas))
615 goto unlock; 615 goto unlock;
616next: 616next:
617 xas_store(&xas, page); 617 xas_store(&xas, page + i);
618 if (++i < nr) { 618 if (++i < nr) {
619 xas_next(&xas); 619 xas_next(&xas);
620 goto next; 620 goto next;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index eb714165afd2..85245fdec8d9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -132,7 +132,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
132 for (i = 0; i < nr; i++) { 132 for (i = 0; i < nr; i++) {
133 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 133 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
134 set_page_private(page + i, entry.val + i); 134 set_page_private(page + i, entry.val + i);
135 xas_store(&xas, page); 135 xas_store(&xas, page + i);
136 xas_next(&xas); 136 xas_next(&xas);
137 } 137 }
138 address_space->nrpages += nr; 138 address_space->nrpages += nr;
@@ -167,7 +167,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry)
167 167
168 for (i = 0; i < nr; i++) { 168 for (i = 0; i < nr; i++) {
169 void *entry = xas_store(&xas, NULL); 169 void *entry = xas_store(&xas, NULL);
170 VM_BUG_ON_PAGE(entry != page, entry); 170 VM_BUG_ON_PAGE(entry != page + i, entry);
171 set_page_private(page + i, 0); 171 set_page_private(page + i, 0);
172 xas_next(&xas); 172 xas_next(&xas);
173 } 173 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4c9e150e5ad3..0f76cca32a1c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -913,7 +913,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
913 unsigned long nva_start_addr, unsigned long size, 913 unsigned long nva_start_addr, unsigned long size,
914 enum fit_type type) 914 enum fit_type type)
915{ 915{
916 struct vmap_area *lva; 916 struct vmap_area *lva = NULL;
917 917
918 if (type == FL_FIT_TYPE) { 918 if (type == FL_FIT_TYPE) {
919 /* 919 /*
@@ -972,7 +972,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
972 if (type != FL_FIT_TYPE) { 972 if (type != FL_FIT_TYPE) {
973 augment_tree_propagate_from(va); 973 augment_tree_propagate_from(va);
974 974
975 if (type == NE_FIT_TYPE) 975 if (lva) /* type == NE_FIT_TYPE */
976 insert_vmap_area_augment(lva, &va->rb_node, 976 insert_vmap_area_augment(lva, &va->rb_node,
977 &free_vmap_area_root, &free_vmap_area_list); 977 &free_vmap_area_root, &free_vmap_area_list);
978 } 978 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7889f583ced9..910e02c793ff 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3644,19 +3644,18 @@ out:
3644} 3644}
3645 3645
3646/* 3646/*
3647 * pgdat->kswapd_classzone_idx is the highest zone index that a recent 3647 * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be
3648 * allocation request woke kswapd for. When kswapd has not woken recently, 3648 * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not
3649 * the value is MAX_NR_ZONES which is not a valid index. This compares a 3649 * a valid index then either kswapd runs for first time or kswapd couldn't sleep
3650 * given classzone and returns it or the highest classzone index kswapd 3650 * after previous reclaim attempt (node is still unbalanced). In that case
3651 * was recently woke for. 3651 * return the zone index of the previous kswapd reclaim cycle.
3652 */ 3652 */
3653static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, 3653static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
3654 enum zone_type classzone_idx) 3654 enum zone_type prev_classzone_idx)
3655{ 3655{
3656 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) 3656 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3657 return classzone_idx; 3657 return prev_classzone_idx;
3658 3658 return pgdat->kswapd_classzone_idx;
3659 return max(pgdat->kswapd_classzone_idx, classzone_idx);
3660} 3659}
3661 3660
3662static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, 3661static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
@@ -3797,7 +3796,7 @@ kswapd_try_sleep:
3797 3796
3798 /* Read the new order and classzone_idx */ 3797 /* Read the new order and classzone_idx */
3799 alloc_order = reclaim_order = pgdat->kswapd_order; 3798 alloc_order = reclaim_order = pgdat->kswapd_order;
3800 classzone_idx = kswapd_classzone_idx(pgdat, 0); 3799 classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
3801 pgdat->kswapd_order = 0; 3800 pgdat->kswapd_order = 0;
3802 pgdat->kswapd_classzone_idx = MAX_NR_ZONES; 3801 pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
3803 3802
@@ -3851,8 +3850,12 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
3851 if (!cpuset_zone_allowed(zone, gfp_flags)) 3850 if (!cpuset_zone_allowed(zone, gfp_flags))
3852 return; 3851 return;
3853 pgdat = zone->zone_pgdat; 3852 pgdat = zone->zone_pgdat;
3854 pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, 3853
3855 classzone_idx); 3854 if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
3855 pgdat->kswapd_classzone_idx = classzone_idx;
3856 else
3857 pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
3858 classzone_idx);
3856 pgdat->kswapd_order = max(pgdat->kswapd_order, order); 3859 pgdat->kswapd_order = max(pgdat->kswapd_order, order);
3857 if (!waitqueue_active(&pgdat->kswapd_wait)) 3860 if (!waitqueue_active(&pgdat->kswapd_wait))
3858 return; 3861 return;
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 19d27bee285e..1555b0c6f7ec 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -160,10 +160,10 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
160 struct in6_addr *daddr, 160 struct in6_addr *daddr,
161 struct sk_buff *skb) 161 struct sk_buff *skb)
162{ 162{
163 struct lowpan_peer *peer;
164 struct in6_addr *nexthop;
165 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); 163 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
166 int count = atomic_read(&dev->peer_count); 164 int count = atomic_read(&dev->peer_count);
165 const struct in6_addr *nexthop;
166 struct lowpan_peer *peer;
167 167
168 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt); 168 BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
169 169
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 9f77432dbe38..5406d7cd46ad 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1353,7 +1353,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1353 * actually encrypted before enforcing a key size. 1353 * actually encrypted before enforcing a key size.
1354 */ 1354 */
1355 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1355 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1356 hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE); 1356 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1357} 1357}
1358 1358
1359static void l2cap_do_start(struct l2cap_chan *chan) 1359static void l2cap_do_start(struct l2cap_chan *chan)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 16f9159234a2..8c2ec35b6512 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -318,6 +318,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
318static int ip_mc_finish_output(struct net *net, struct sock *sk, 318static int ip_mc_finish_output(struct net *net, struct sock *sk,
319 struct sk_buff *skb) 319 struct sk_buff *skb)
320{ 320{
321 struct rtable *new_rt;
321 int ret; 322 int ret;
322 323
323 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); 324 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
@@ -326,6 +327,17 @@ static int ip_mc_finish_output(struct net *net, struct sock *sk,
326 return ret; 327 return ret;
327 } 328 }
328 329
330 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
331 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
332 * see ipv4_pktinfo_prepare().
333 */
334 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
335 if (new_rt) {
336 new_rt->rt_iif = 0;
337 skb_dst_drop(skb);
338 skb_dst_set(skb, &new_rt->dst);
339 }
340
329 return dev_loopback_xmit(net, sk, skb); 341 return dev_loopback_xmit(net, sk, skb);
330} 342}
331 343
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 0b8e06ca75d6..40a6abbc9cf6 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -197,7 +197,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
197 } 197 }
198 sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, 198 sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
199 iph->saddr, iph->daddr, 199 iph->saddr, iph->daddr,
200 skb->dev->ifindex, sdif); 200 dif, sdif);
201 } 201 }
202out: 202out:
203 read_unlock(&raw_v4_hashinfo.lock); 203 read_unlock(&raw_v4_hashinfo.lock);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6cb7cff22db9..8ea0735a6754 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1647,6 +1647,39 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
1647} 1647}
1648EXPORT_SYMBOL(rt_dst_alloc); 1648EXPORT_SYMBOL(rt_dst_alloc);
1649 1649
1650struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1651{
1652 struct rtable *new_rt;
1653
1654 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1655 rt->dst.flags);
1656
1657 if (new_rt) {
1658 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1659 new_rt->rt_flags = rt->rt_flags;
1660 new_rt->rt_type = rt->rt_type;
1661 new_rt->rt_is_input = rt->rt_is_input;
1662 new_rt->rt_iif = rt->rt_iif;
1663 new_rt->rt_pmtu = rt->rt_pmtu;
1664 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1665 new_rt->rt_gw_family = rt->rt_gw_family;
1666 if (rt->rt_gw_family == AF_INET)
1667 new_rt->rt_gw4 = rt->rt_gw4;
1668 else if (rt->rt_gw_family == AF_INET6)
1669 new_rt->rt_gw6 = rt->rt_gw6;
1670 INIT_LIST_HEAD(&new_rt->rt_uncached);
1671
1672 new_rt->dst.flags |= DST_HOST;
1673 new_rt->dst.input = rt->dst.input;
1674 new_rt->dst.output = rt->dst.output;
1675 new_rt->dst.error = rt->dst.error;
1676 new_rt->dst.lastuse = jiffies;
1677 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1678 }
1679 return new_rt;
1680}
1681EXPORT_SYMBOL(rt_dst_clone);
1682
1650/* called in rcu_read_lock() section */ 1683/* called in rcu_read_lock() section */
1651int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1684int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1652 u8 tos, struct net_device *dev, 1685 u8 tos, struct net_device *dev,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 834475717110..21efcd02f337 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -59,8 +59,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
59{ 59{
60 struct dst_entry *dst = skb_dst(skb); 60 struct dst_entry *dst = skb_dst(skb);
61 struct net_device *dev = dst->dev; 61 struct net_device *dev = dst->dev;
62 const struct in6_addr *nexthop;
62 struct neighbour *neigh; 63 struct neighbour *neigh;
63 struct in6_addr *nexthop;
64 int ret; 64 int ret;
65 65
66 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { 66 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 11ad62effd56..97a843cf164c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -218,7 +218,8 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
218{ 218{
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst); 219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
220 220
221 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr); 221 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
222 dst->dev, skb, daddr);
222} 223}
223 224
224static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) 225static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
@@ -5281,7 +5282,7 @@ static struct ctl_table ipv6_route_table_template[] = {
5281 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down, 5282 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
5282 .maxlen = sizeof(int), 5283 .maxlen = sizeof(int),
5283 .mode = 0644, 5284 .mode = 0644,
5284 .proc_handler = proc_dointvec, 5285 .proc_handler = proc_dointvec_minmax,
5285 .extra1 = &zero, 5286 .extra1 = &zero,
5286 .extra2 = &one, 5287 .extra2 = &one,
5287 }, 5288 },
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 241317473114..cdfc33517e85 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -439,9 +439,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
439 struct nf_flowtable *flow_table = priv; 439 struct nf_flowtable *flow_table = priv;
440 struct flow_offload_tuple tuple = {}; 440 struct flow_offload_tuple tuple = {};
441 enum flow_offload_tuple_dir dir; 441 enum flow_offload_tuple_dir dir;
442 const struct in6_addr *nexthop;
442 struct flow_offload *flow; 443 struct flow_offload *flow;
443 struct net_device *outdev; 444 struct net_device *outdev;
444 struct in6_addr *nexthop;
445 struct ipv6hdr *ip6h; 445 struct ipv6hdr *ip6h;
446 struct rt6_info *rt; 446 struct rt6_info *rt;
447 447
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a29d66da7394..5f78df080573 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2401,6 +2401,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
2401 2401
2402 ts = __packet_set_timestamp(po, ph, skb); 2402 ts = __packet_set_timestamp(po, ph, skb);
2403 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts); 2403 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2404
2405 if (!packet_read_pending(&po->tx_ring))
2406 complete(&po->skb_completion);
2404 } 2407 }
2405 2408
2406 sock_wfree(skb); 2409 sock_wfree(skb);
@@ -2585,7 +2588,7 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
2585 2588
2586static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) 2589static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2587{ 2590{
2588 struct sk_buff *skb; 2591 struct sk_buff *skb = NULL;
2589 struct net_device *dev; 2592 struct net_device *dev;
2590 struct virtio_net_hdr *vnet_hdr = NULL; 2593 struct virtio_net_hdr *vnet_hdr = NULL;
2591 struct sockcm_cookie sockc; 2594 struct sockcm_cookie sockc;
@@ -2600,6 +2603,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2600 int len_sum = 0; 2603 int len_sum = 0;
2601 int status = TP_STATUS_AVAILABLE; 2604 int status = TP_STATUS_AVAILABLE;
2602 int hlen, tlen, copylen = 0; 2605 int hlen, tlen, copylen = 0;
2606 long timeo = 0;
2603 2607
2604 mutex_lock(&po->pg_vec_lock); 2608 mutex_lock(&po->pg_vec_lock);
2605 2609
@@ -2646,12 +2650,21 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2646 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr) 2650 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2647 size_max = dev->mtu + reserve + VLAN_HLEN; 2651 size_max = dev->mtu + reserve + VLAN_HLEN;
2648 2652
2653 reinit_completion(&po->skb_completion);
2654
2649 do { 2655 do {
2650 ph = packet_current_frame(po, &po->tx_ring, 2656 ph = packet_current_frame(po, &po->tx_ring,
2651 TP_STATUS_SEND_REQUEST); 2657 TP_STATUS_SEND_REQUEST);
2652 if (unlikely(ph == NULL)) { 2658 if (unlikely(ph == NULL)) {
2653 if (need_wait && need_resched()) 2659 if (need_wait && skb) {
2654 schedule(); 2660 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2661 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2662 if (timeo <= 0) {
2663 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2664 goto out_put;
2665 }
2666 }
2667 /* check for additional frames */
2655 continue; 2668 continue;
2656 } 2669 }
2657 2670
@@ -3207,6 +3220,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3207 sock_init_data(sock, sk); 3220 sock_init_data(sock, sk);
3208 3221
3209 po = pkt_sk(sk); 3222 po = pkt_sk(sk);
3223 init_completion(&po->skb_completion);
3210 sk->sk_family = PF_PACKET; 3224 sk->sk_family = PF_PACKET;
3211 po->num = proto; 3225 po->num = proto;
3212 po->xmit = dev_queue_xmit; 3226 po->xmit = dev_queue_xmit;
@@ -4314,7 +4328,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4314 req3->tp_sizeof_priv || 4328 req3->tp_sizeof_priv ||
4315 req3->tp_feature_req_word) { 4329 req3->tp_feature_req_word) {
4316 err = -EINVAL; 4330 err = -EINVAL;
4317 goto out; 4331 goto out_free_pg_vec;
4318 } 4332 }
4319 } 4333 }
4320 break; 4334 break;
@@ -4378,6 +4392,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4378 prb_shutdown_retire_blk_timer(po, rb_queue); 4392 prb_shutdown_retire_blk_timer(po, rb_queue);
4379 } 4393 }
4380 4394
4395out_free_pg_vec:
4381 if (pg_vec) 4396 if (pg_vec)
4382 free_pg_vec(pg_vec, order, req->tp_block_nr); 4397 free_pg_vec(pg_vec, order, req->tp_block_nr);
4383out: 4398out:
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 3bb7c5fb3bff..c70a2794456f 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -128,6 +128,7 @@ struct packet_sock {
128 unsigned int tp_hdrlen; 128 unsigned int tp_hdrlen;
129 unsigned int tp_reserve; 129 unsigned int tp_reserve;
130 unsigned int tp_tstamp; 130 unsigned int tp_tstamp;
131 struct completion skb_completion;
131 struct net_device __rcu *cached_dev; 132 struct net_device __rcu *cached_dev;
132 int (*xmit)(struct sk_buff *skb); 133 int (*xmit)(struct sk_buff *skb);
133 struct packet_type prot_hook ____cacheline_aligned_in_smp; 134 struct packet_type prot_hook ____cacheline_aligned_in_smp;
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index e16a3d37d2bc..732e109c3055 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -549,12 +549,17 @@ static struct notifier_block cbs_device_notifier = {
549 549
550static int __init cbs_module_init(void) 550static int __init cbs_module_init(void)
551{ 551{
552 int err = register_netdevice_notifier(&cbs_device_notifier); 552 int err;
553 553
554 err = register_netdevice_notifier(&cbs_device_notifier);
554 if (err) 555 if (err)
555 return err; 556 return err;
556 557
557 return register_qdisc(&cbs_qdisc_ops); 558 err = register_qdisc(&cbs_qdisc_ops);
559 if (err)
560 unregister_netdevice_notifier(&cbs_device_notifier);
561
562 return err;
558} 563}
559 564
560static void __exit cbs_module_exit(void) 565static void __exit cbs_module_exit(void)
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e358437ba29b..69cebb2c998b 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -118,10 +118,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
118 /* Initialize the bind addr area */ 118 /* Initialize the bind addr area */
119 sctp_bind_addr_init(&ep->base.bind_addr, 0); 119 sctp_bind_addr_init(&ep->base.bind_addr, 0);
120 120
121 /* Remember who we are attached to. */
122 ep->base.sk = sk;
123 sock_hold(ep->base.sk);
124
125 /* Create the lists of associations. */ 121 /* Create the lists of associations. */
126 INIT_LIST_HEAD(&ep->asocs); 122 INIT_LIST_HEAD(&ep->asocs);
127 123
@@ -154,6 +150,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
154 ep->prsctp_enable = net->sctp.prsctp_enable; 150 ep->prsctp_enable = net->sctp.prsctp_enable;
155 ep->reconf_enable = net->sctp.reconf_enable; 151 ep->reconf_enable = net->sctp.reconf_enable;
156 152
153 /* Remember who we are attached to. */
154 ep->base.sk = sk;
155 sock_hold(ep->base.sk);
156
157 return ep; 157 return ep;
158 158
159nomem_shkey: 159nomem_shkey:
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 0c874e996f85..7621ec2f539c 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -2029,7 +2029,7 @@ static int __init smc_init(void)
2029 2029
2030 rc = smc_pnet_init(); 2030 rc = smc_pnet_init();
2031 if (rc) 2031 if (rc)
2032 return rc; 2032 goto out_pernet_subsys;
2033 2033
2034 rc = smc_llc_init(); 2034 rc = smc_llc_init();
2035 if (rc) { 2035 if (rc) {
@@ -2080,6 +2080,9 @@ out_proto:
2080 proto_unregister(&smc_proto); 2080 proto_unregister(&smc_proto);
2081out_pnet: 2081out_pnet:
2082 smc_pnet_exit(); 2082 smc_pnet_exit();
2083out_pernet_subsys:
2084 unregister_pernet_subsys(&smc_net_ops);
2085
2083 return rc; 2086 return rc;
2084} 2087}
2085 2088
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2d2850adc2a3..4ca50ddf8d16 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -652,7 +652,10 @@ create:
652 rc = smc_lgr_create(smc, ini); 652 rc = smc_lgr_create(smc, ini);
653 if (rc) 653 if (rc)
654 goto out; 654 goto out;
655 lgr = conn->lgr;
656 write_lock_bh(&lgr->conns_lock);
655 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 657 smc_lgr_register_conn(conn); /* add smc conn to lgr */
658 write_unlock_bh(&lgr->conns_lock);
656 } 659 }
657 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; 660 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
658 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE; 661 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 027a3b07d329..0004535c0188 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -211,9 +211,14 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
211 /* Save client advertised inbound read limit for use later in accept. */ 211 /* Save client advertised inbound read limit for use later in accept. */
212 newxprt->sc_ord = param->initiator_depth; 212 newxprt->sc_ord = param->initiator_depth;
213 213
214 /* Set the local and remote addresses in the transport */
215 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; 214 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
216 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); 215 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
216 /* The remote port is arbitrary and not under the control of the
217 * client ULP. Set it to a fixed value so that the DRC continues
218 * to be effective after a reconnect.
219 */
220 rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0);
221
217 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; 222 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
218 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); 223 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
219 224
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c69951ed2ebc..36652352a38c 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -950,6 +950,8 @@ static int xs_local_send_request(struct rpc_rqst *req)
950 struct sock_xprt *transport = 950 struct sock_xprt *transport =
951 container_of(xprt, struct sock_xprt, xprt); 951 container_of(xprt, struct sock_xprt, xprt);
952 struct xdr_buf *xdr = &req->rq_snd_buf; 952 struct xdr_buf *xdr = &req->rq_snd_buf;
953 rpc_fraghdr rm = xs_stream_record_marker(xdr);
954 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
953 int status; 955 int status;
954 int sent = 0; 956 int sent = 0;
955 957
@@ -964,9 +966,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
964 966
965 req->rq_xtime = ktime_get(); 967 req->rq_xtime = ktime_get();
966 status = xs_sendpages(transport->sock, NULL, 0, xdr, 968 status = xs_sendpages(transport->sock, NULL, 0, xdr,
967 transport->xmit.offset, 969 transport->xmit.offset, rm, &sent);
968 xs_stream_record_marker(xdr),
969 &sent);
970 dprintk("RPC: %s(%u) = %d\n", 970 dprintk("RPC: %s(%u) = %d\n",
971 __func__, xdr->len - transport->xmit.offset, status); 971 __func__, xdr->len - transport->xmit.offset, status);
972 972
@@ -976,7 +976,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
976 if (likely(sent > 0) || status == 0) { 976 if (likely(sent > 0) || status == 0) {
977 transport->xmit.offset += sent; 977 transport->xmit.offset += sent;
978 req->rq_bytes_sent = transport->xmit.offset; 978 req->rq_bytes_sent = transport->xmit.offset;
979 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 979 if (likely(req->rq_bytes_sent >= msglen)) {
980 req->rq_xmit_bytes_sent += transport->xmit.offset; 980 req->rq_xmit_bytes_sent += transport->xmit.offset;
981 transport->xmit.offset = 0; 981 transport->xmit.offset = 0;
982 return 0; 982 return 0;
@@ -1097,6 +1097,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
1097 struct rpc_xprt *xprt = req->rq_xprt; 1097 struct rpc_xprt *xprt = req->rq_xprt;
1098 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1098 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1099 struct xdr_buf *xdr = &req->rq_snd_buf; 1099 struct xdr_buf *xdr = &req->rq_snd_buf;
1100 rpc_fraghdr rm = xs_stream_record_marker(xdr);
1101 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1100 bool vm_wait = false; 1102 bool vm_wait = false;
1101 int status; 1103 int status;
1102 int sent; 1104 int sent;
@@ -1122,9 +1124,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
1122 while (1) { 1124 while (1) {
1123 sent = 0; 1125 sent = 0;
1124 status = xs_sendpages(transport->sock, NULL, 0, xdr, 1126 status = xs_sendpages(transport->sock, NULL, 0, xdr,
1125 transport->xmit.offset, 1127 transport->xmit.offset, rm, &sent);
1126 xs_stream_record_marker(xdr),
1127 &sent);
1128 1128
1129 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 1129 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1130 xdr->len - transport->xmit.offset, status); 1130 xdr->len - transport->xmit.offset, status);
@@ -1133,7 +1133,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
1133 * reset the count of bytes sent. */ 1133 * reset the count of bytes sent. */
1134 transport->xmit.offset += sent; 1134 transport->xmit.offset += sent;
1135 req->rq_bytes_sent = transport->xmit.offset; 1135 req->rq_bytes_sent = transport->xmit.offset;
1136 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 1136 if (likely(req->rq_bytes_sent >= msglen)) {
1137 req->rq_xmit_bytes_sent += transport->xmit.offset; 1137 req->rq_xmit_bytes_sent += transport->xmit.offset;
1138 transport->xmit.offset = 0; 1138 transport->xmit.offset = 0;
1139 return 0; 1139 return 0;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index ed536c05252a..c8370722f0bb 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -134,7 +134,7 @@ static int __init tipc_init(void)
134 if (err) 134 if (err)
135 goto out_sysctl; 135 goto out_sysctl;
136 136
137 err = register_pernet_subsys(&tipc_net_ops); 137 err = register_pernet_device(&tipc_net_ops);
138 if (err) 138 if (err)
139 goto out_pernet; 139 goto out_pernet;
140 140
@@ -142,7 +142,7 @@ static int __init tipc_init(void)
142 if (err) 142 if (err)
143 goto out_socket; 143 goto out_socket;
144 144
145 err = register_pernet_subsys(&tipc_topsrv_net_ops); 145 err = register_pernet_device(&tipc_topsrv_net_ops);
146 if (err) 146 if (err)
147 goto out_pernet_topsrv; 147 goto out_pernet_topsrv;
148 148
@@ -153,11 +153,11 @@ static int __init tipc_init(void)
153 pr_info("Started in single node mode\n"); 153 pr_info("Started in single node mode\n");
154 return 0; 154 return 0;
155out_bearer: 155out_bearer:
156 unregister_pernet_subsys(&tipc_topsrv_net_ops); 156 unregister_pernet_device(&tipc_topsrv_net_ops);
157out_pernet_topsrv: 157out_pernet_topsrv:
158 tipc_socket_stop(); 158 tipc_socket_stop();
159out_socket: 159out_socket:
160 unregister_pernet_subsys(&tipc_net_ops); 160 unregister_pernet_device(&tipc_net_ops);
161out_pernet: 161out_pernet:
162 tipc_unregister_sysctl(); 162 tipc_unregister_sysctl();
163out_sysctl: 163out_sysctl:
@@ -172,9 +172,9 @@ out_netlink:
172static void __exit tipc_exit(void) 172static void __exit tipc_exit(void)
173{ 173{
174 tipc_bearer_cleanup(); 174 tipc_bearer_cleanup();
175 unregister_pernet_subsys(&tipc_topsrv_net_ops); 175 unregister_pernet_device(&tipc_topsrv_net_ops);
176 tipc_socket_stop(); 176 tipc_socket_stop();
177 unregister_pernet_subsys(&tipc_net_ops); 177 unregister_pernet_device(&tipc_net_ops);
178 tipc_netlink_stop(); 178 tipc_netlink_stop();
179 tipc_netlink_compat_stop(); 179 tipc_netlink_compat_stop();
180 tipc_unregister_sysctl(); 180 tipc_unregister_sysctl();
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index c6a04c09d075..cf155061c472 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -445,7 +445,11 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
445 if (!bearer) 445 if (!bearer)
446 return -EMSGSIZE; 446 return -EMSGSIZE;
447 447
448 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); 448 len = TLV_GET_DATA_LEN(msg->req);
449 if (len <= 0)
450 return -EINVAL;
451
452 len = min_t(int, len, TIPC_MAX_BEARER_NAME);
449 if (!string_is_valid(name, len)) 453 if (!string_is_valid(name, len))
450 return -EINVAL; 454 return -EINVAL;
451 455
@@ -539,7 +543,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
539 543
540 name = (char *)TLV_DATA(msg->req); 544 name = (char *)TLV_DATA(msg->req);
541 545
542 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); 546 len = TLV_GET_DATA_LEN(msg->req);
547 if (len <= 0)
548 return -EINVAL;
549
550 len = min_t(int, len, TIPC_MAX_BEARER_NAME);
543 if (!string_is_valid(name, len)) 551 if (!string_is_valid(name, len))
544 return -EINVAL; 552 return -EINVAL;
545 553
@@ -817,7 +825,11 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
817 if (!link) 825 if (!link)
818 return -EMSGSIZE; 826 return -EMSGSIZE;
819 827
820 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); 828 len = TLV_GET_DATA_LEN(msg->req);
829 if (len <= 0)
830 return -EINVAL;
831
832 len = min_t(int, len, TIPC_MAX_BEARER_NAME);
821 if (!string_is_valid(name, len)) 833 if (!string_is_valid(name, len))
822 return -EINVAL; 834 return -EINVAL;
823 835
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index fc81ae18cc44..e2b69e805d46 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -279,7 +279,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
279 goto skip_tx_cleanup; 279 goto skip_tx_cleanup;
280 } 280 }
281 281
282 if (!tls_complete_pending_work(sk, ctx, 0, &timeo)) 282 if (unlikely(sk->sk_write_pending) &&
283 !wait_on_pending_writer(sk, &timeo))
283 tls_handle_open_record(sk, 0); 284 tls_handle_open_record(sk, 0);
284 285
285 /* We need these for tls_sw_fallback handling of other packets */ 286 /* We need these for tls_sw_fallback handling of other packets */
diff --git a/samples/pidfd/pidfd-metadata.c b/samples/pidfd/pidfd-metadata.c
index 14b454448429..c459155daf9a 100644
--- a/samples/pidfd/pidfd-metadata.c
+++ b/samples/pidfd/pidfd-metadata.c
@@ -83,7 +83,7 @@ static int pidfd_metadata_fd(pid_t pid, int pidfd)
83 83
84int main(int argc, char *argv[]) 84int main(int argc, char *argv[])
85{ 85{
86 int pidfd = 0, ret = EXIT_FAILURE; 86 int pidfd = -1, ret = EXIT_FAILURE;
87 char buf[4096] = { 0 }; 87 char buf[4096] = { 0 };
88 pid_t pid; 88 pid_t pid;
89 int procfd, statusfd; 89 int procfd, statusfd;
@@ -91,7 +91,11 @@ int main(int argc, char *argv[])
91 91
92 pid = pidfd_clone(CLONE_PIDFD, &pidfd); 92 pid = pidfd_clone(CLONE_PIDFD, &pidfd);
93 if (pid < 0) 93 if (pid < 0)
94 exit(ret); 94 err(ret, "CLONE_PIDFD");
95 if (pidfd == -1) {
96 warnx("CLONE_PIDFD is not supported by the kernel");
97 goto out;
98 }
95 99
96 procfd = pidfd_metadata_fd(pid, pidfd); 100 procfd = pidfd_metadata_fd(pid, pidfd);
97 close(pidfd); 101 close(pidfd);
diff --git a/sound/core/seq/oss/seq_oss_ioctl.c b/sound/core/seq/oss/seq_oss_ioctl.c
index 96ad01fb668c..ccf682689ec9 100644
--- a/sound/core/seq/oss/seq_oss_ioctl.c
+++ b/sound/core/seq/oss/seq_oss_ioctl.c
@@ -49,7 +49,7 @@ static int snd_seq_oss_oob_user(struct seq_oss_devinfo *dp, void __user *arg)
49 if (copy_from_user(ev, arg, 8)) 49 if (copy_from_user(ev, arg, 8))
50 return -EFAULT; 50 return -EFAULT;
51 memset(&tmpev, 0, sizeof(tmpev)); 51 memset(&tmpev, 0, sizeof(tmpev));
52 snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.port, dp->addr.client); 52 snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.client, dp->addr.port);
53 tmpev.time.tick = 0; 53 tmpev.time.tick = 0;
54 if (! snd_seq_oss_process_event(dp, (union evrec *)ev, &tmpev)) { 54 if (! snd_seq_oss_process_event(dp, (union evrec *)ev, &tmpev)) {
55 snd_seq_oss_dispatch(dp, &tmpev, 0, 0); 55 snd_seq_oss_dispatch(dp, &tmpev, 0, 0);
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 79ef430e56e1..537d5f423e20 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -161,7 +161,7 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt)
161 memset(&event, 0, sizeof(event)); 161 memset(&event, 0, sizeof(event));
162 /* set dummy -- to be sure */ 162 /* set dummy -- to be sure */
163 event.type = SNDRV_SEQ_EVENT_NOTEOFF; 163 event.type = SNDRV_SEQ_EVENT_NOTEOFF;
164 snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client); 164 snd_seq_oss_fill_addr(dp, &event, dp->addr.client, dp->addr.port);
165 165
166 if (snd_seq_oss_process_event(dp, rec, &event)) 166 if (snd_seq_oss_process_event(dp, rec, &event))
167 return 0; /* invalid event - no need to insert queue */ 167 return 0; /* invalid event - no need to insert queue */
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index cc6eb30f03a2..71168728940a 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -320,7 +320,7 @@ static void read_midi_messages(struct amdtp_stream *s,
320 u8 *b; 320 u8 *b;
321 321
322 for (f = 0; f < frames; f++) { 322 for (f = 0; f < frames; f++) {
323 port = (s->data_block_counter + f) % 8; 323 port = (8 - s->tx_first_dbc + s->data_block_counter + f) % 8;
324 b = (u8 *)&buffer[p->midi_position]; 324 b = (u8 *)&buffer[p->midi_position];
325 325
326 len = b[0] - 0x80; 326 len = b[0] - 0x80;
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 6907dbefd08c..3842f9d34b7c 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -400,27 +400,33 @@ static void setup_fg_nodes(struct hdac_device *codec)
400int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs) 400int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs)
401{ 401{
402 hda_nid_t start_nid; 402 hda_nid_t start_nid;
403 int nums, err; 403 int nums, err = 0;
404 404
405 /*
406 * Serialize against multiple threads trying to update the sysfs
407 * widgets array.
408 */
409 mutex_lock(&codec->widget_lock);
405 nums = snd_hdac_get_sub_nodes(codec, codec->afg, &start_nid); 410 nums = snd_hdac_get_sub_nodes(codec, codec->afg, &start_nid);
406 if (!start_nid || nums <= 0 || nums >= 0xff) { 411 if (!start_nid || nums <= 0 || nums >= 0xff) {
407 dev_err(&codec->dev, "cannot read sub nodes for FG 0x%02x\n", 412 dev_err(&codec->dev, "cannot read sub nodes for FG 0x%02x\n",
408 codec->afg); 413 codec->afg);
409 return -EINVAL; 414 err = -EINVAL;
415 goto unlock;
410 } 416 }
411 417
412 if (sysfs) { 418 if (sysfs) {
413 mutex_lock(&codec->widget_lock);
414 err = hda_widget_sysfs_reinit(codec, start_nid, nums); 419 err = hda_widget_sysfs_reinit(codec, start_nid, nums);
415 mutex_unlock(&codec->widget_lock);
416 if (err < 0) 420 if (err < 0)
417 return err; 421 goto unlock;
418 } 422 }
419 423
420 codec->num_nodes = nums; 424 codec->num_nodes = nums;
421 codec->start_nid = start_nid; 425 codec->start_nid = start_nid;
422 codec->end_nid = start_nid + nums; 426 codec->end_nid = start_nid + nums;
423 return 0; 427unlock:
428 mutex_unlock(&codec->widget_lock);
429 return err;
424} 430}
425EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets); 431EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets);
426 432
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5b3c26991f26..6f3a35949cdd 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2448,9 +2448,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2448 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), 2448 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
2449 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), 2449 SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
2450 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), 2450 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
2451 SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), 2451 SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
2452 SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS), 2452 SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
2453 SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS), 2453 SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
2454 SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
2454 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2455 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2455 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2456 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
2456 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), 2457 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -7074,6 +7075,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7074 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 7075 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7075 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 7076 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7076 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7077 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7078 SND_PCI_QUIRK(0x17aa, 0x3111, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7077 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7079 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7078 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7080 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7079 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7081 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 21127e4958b2..2c03e0f6bf72 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -556,6 +556,11 @@ int line6_init_pcm(struct usb_line6 *line6,
556 line6pcm->max_packet_size_out = 556 line6pcm->max_packet_size_out =
557 usb_maxpacket(line6->usbdev, 557 usb_maxpacket(line6->usbdev,
558 usb_sndisocpipe(line6->usbdev, ep_write), 1); 558 usb_sndisocpipe(line6->usbdev, ep_write), 1);
559 if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) {
560 dev_err(line6pcm->line6->ifcdev,
561 "cannot get proper max packet size\n");
562 return -EINVAL;
563 }
559 564
560 spin_lock_init(&line6pcm->out.lock); 565 spin_lock_init(&line6pcm->out.lock);
561 spin_lock_init(&line6pcm->in.lock); 566 spin_lock_init(&line6pcm->in.lock);
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 1f6011f36bb0..199fa157a411 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -741,7 +741,7 @@ static int snd_ni_control_init_val(struct usb_mixer_interface *mixer,
741 return err; 741 return err;
742 } 742 }
743 743
744 kctl->private_value |= (value << 24); 744 kctl->private_value |= ((unsigned int)value << 24);
745 return 0; 745 return 0;
746} 746}
747 747
@@ -902,7 +902,7 @@ static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer,
902 if (err < 0) 902 if (err < 0)
903 return err; 903 return err;
904 904
905 kctl->private_value |= value[0] << 24; 905 kctl->private_value |= (unsigned int)value[0] << 24;
906 return 0; 906 return 0;
907} 907}
908 908
diff --git a/tools/arch/x86/include/uapi/asm/perf_regs.h b/tools/arch/x86/include/uapi/asm/perf_regs.h
index ac67bbea10ca..7c9d2bb3833b 100644
--- a/tools/arch/x86/include/uapi/asm/perf_regs.h
+++ b/tools/arch/x86/include/uapi/asm/perf_regs.h
@@ -52,4 +52,7 @@ enum perf_event_x86_regs {
52 /* These include both GPRs and XMMX registers */ 52 /* These include both GPRs and XMMX registers */
53 PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, 53 PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
54}; 54};
55
56#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1))
57
55#endif /* _ASM_X86_PERF_REGS_H */ 58#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
index b7cd91a9014f..b7321337d100 100644
--- a/tools/perf/arch/x86/include/perf_regs.h
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -9,7 +9,6 @@
9void perf_regs_load(u64 *regs); 9void perf_regs_load(u64 *regs);
10 10
11#define PERF_REGS_MAX PERF_REG_X86_XMM_MAX 11#define PERF_REGS_MAX PERF_REG_X86_XMM_MAX
12#define PERF_XMM_REGS_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1))
13#ifndef HAVE_ARCH_X86_64_SUPPORT 12#ifndef HAVE_ARCH_X86_64_SUPPORT
14#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) 13#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
15#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 14#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index 7886ca5263e3..3666c0076df9 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -277,7 +277,7 @@ uint64_t arch__intr_reg_mask(void)
277 .type = PERF_TYPE_HARDWARE, 277 .type = PERF_TYPE_HARDWARE,
278 .config = PERF_COUNT_HW_CPU_CYCLES, 278 .config = PERF_COUNT_HW_CPU_CYCLES,
279 .sample_type = PERF_SAMPLE_REGS_INTR, 279 .sample_type = PERF_SAMPLE_REGS_INTR,
280 .sample_regs_intr = PERF_XMM_REGS_MASK, 280 .sample_regs_intr = PERF_REG_EXTENDED_MASK,
281 .precise_ip = 1, 281 .precise_ip = 1,
282 .disabled = 1, 282 .disabled = 1,
283 .exclude_kernel = 1, 283 .exclude_kernel = 1,
@@ -293,7 +293,7 @@ uint64_t arch__intr_reg_mask(void)
293 fd = sys_perf_event_open(&attr, 0, -1, -1, 0); 293 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
294 if (fd != -1) { 294 if (fd != -1) {
295 close(fd); 295 close(fd);
296 return (PERF_XMM_REGS_MASK | PERF_REGS_MASK); 296 return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK);
297 } 297 }
298 298
299 return PERF_REGS_MASK; 299 return PERF_REGS_MASK;
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 698c08f851b8..8995092d541e 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -279,6 +279,51 @@ static void idr_align_test(struct idr *idr)
279 } 279 }
280} 280}
281 281
282DEFINE_IDR(find_idr);
283
284static void *idr_throbber(void *arg)
285{
286 time_t start = time(NULL);
287 int id = *(int *)arg;
288
289 rcu_register_thread();
290 do {
291 idr_alloc(&find_idr, xa_mk_value(id), id, id + 1, GFP_KERNEL);
292 idr_remove(&find_idr, id);
293 } while (time(NULL) < start + 10);
294 rcu_unregister_thread();
295
296 return NULL;
297}
298
299void idr_find_test_1(int anchor_id, int throbber_id)
300{
301 pthread_t throbber;
302 time_t start = time(NULL);
303
304 pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
305
306 BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
307 anchor_id + 1, GFP_KERNEL) != anchor_id);
308
309 do {
310 int id = 0;
311 void *entry = idr_get_next(&find_idr, &id);
312 BUG_ON(entry != xa_mk_value(id));
313 } while (time(NULL) < start + 11);
314
315 pthread_join(throbber, NULL);
316
317 idr_remove(&find_idr, anchor_id);
318 BUG_ON(!idr_is_empty(&find_idr));
319}
320
321void idr_find_test(void)
322{
323 idr_find_test_1(100000, 0);
324 idr_find_test_1(0, 100000);
325}
326
282void idr_checks(void) 327void idr_checks(void)
283{ 328{
284 unsigned long i; 329 unsigned long i;
@@ -360,6 +405,7 @@ void idr_checks(void)
360 idr_u32_test(1); 405 idr_u32_test(1);
361 idr_u32_test(0); 406 idr_u32_test(0);
362 idr_align_test(&idr); 407 idr_align_test(&idr);
408 idr_find_test();
363} 409}
364 410
365#define module_init(x) 411#define module_init(x)
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index b38260e29775..241919ef1eac 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -146,6 +146,7 @@ int main(int argc, char *argv[])
146 kvm_vm_restart(vm, O_RDWR); 146 kvm_vm_restart(vm, O_RDWR);
147 vm_vcpu_add(vm, VCPU_ID, 0, 0); 147 vm_vcpu_add(vm, VCPU_ID, 0, 0);
148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
149 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
149 vcpu_load_state(vm, VCPU_ID, state); 150 vcpu_load_state(vm, VCPU_ID, state);
150 run = vcpu_state(vm, VCPU_ID); 151 run = vcpu_state(vm, VCPU_ID);
151 free(state); 152 free(state);
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index ba919308fe30..d503b8764a8e 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -3,4 +3,5 @@ subpage_prot
3tempfile 3tempfile
4prot_sao 4prot_sao
5segv_errors 5segv_errors
6wild_bctr \ No newline at end of file 6wild_bctr
7large_vm_fork_separation \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 43d68420e363..f1fbc15800c4 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,7 +2,8 @@
2noarg: 2noarg:
3 $(MAKE) -C ../ 3 $(MAKE) -C ../
4 4
5TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr 5TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
6 large_vm_fork_separation
6TEST_GEN_FILES := tempfile 7TEST_GEN_FILES := tempfile
7 8
8top_srcdir = ../../../../.. 9top_srcdir = ../../../../..
@@ -13,6 +14,7 @@ $(TEST_GEN_PROGS): ../harness.c
13$(OUTPUT)/prot_sao: ../utils.c 14$(OUTPUT)/prot_sao: ../utils.c
14 15
15$(OUTPUT)/wild_bctr: CFLAGS += -m64 16$(OUTPUT)/wild_bctr: CFLAGS += -m64
17$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
16 18
17$(OUTPUT)/tempfile: 19$(OUTPUT)/tempfile:
18 dd if=/dev/zero of=$@ bs=64k count=1 20 dd if=/dev/zero of=$@ bs=64k count=1
diff --git a/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c
new file mode 100644
index 000000000000..2363a7f3ab0d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c
@@ -0,0 +1,87 @@
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright 2019, Michael Ellerman, IBM Corp.
4//
5// Test that allocating memory beyond the memory limit and then forking is
6// handled correctly, ie. the child is able to access the mappings beyond the
7// memory limit and the child's writes are not visible to the parent.
8
9#include <stdio.h>
10#include <stdlib.h>
11#include <sys/mman.h>
12#include <sys/types.h>
13#include <sys/wait.h>
14#include <unistd.h>
15
16#include "utils.h"
17
18
19#ifndef MAP_FIXED_NOREPLACE
20#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
21#endif
22
23
24static int test(void)
25{
26 int p2c[2], c2p[2], rc, status, c, *p;
27 unsigned long page_size;
28 pid_t pid;
29
30 page_size = sysconf(_SC_PAGESIZE);
31 SKIP_IF(page_size != 65536);
32
33 // Create a mapping at 512TB to allocate an extended_id
34 p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE,
35 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
36 if (p == MAP_FAILED) {
37 perror("mmap");
38 printf("Error: couldn't mmap(), confirm kernel has 4TB support?\n");
39 return 1;
40 }
41
42 printf("parent writing %p = 1\n", p);
43 *p = 1;
44
45 FAIL_IF(pipe(p2c) == -1 || pipe(c2p) == -1);
46
47 pid = fork();
48 if (pid == 0) {
49 FAIL_IF(read(p2c[0], &c, 1) != 1);
50
51 pid = getpid();
52 printf("child writing %p = %d\n", p, pid);
53 *p = pid;
54
55 FAIL_IF(write(c2p[1], &c, 1) != 1);
56 FAIL_IF(read(p2c[0], &c, 1) != 1);
57 exit(0);
58 }
59
60 c = 0;
61 FAIL_IF(write(p2c[1], &c, 1) != 1);
62 FAIL_IF(read(c2p[0], &c, 1) != 1);
63
64 // Prevent compiler optimisation
65 barrier();
66
67 rc = 0;
68 printf("parent reading %p = %d\n", p, *p);
69 if (*p != 1) {
70 printf("Error: BUG! parent saw child's write! *p = %d\n", *p);
71 rc = 1;
72 }
73
74 FAIL_IF(write(p2c[1], &c, 1) != 1);
75 FAIL_IF(waitpid(pid, &status, 0) == -1);
76 FAIL_IF(!WIFEXITED(status) || WEXITSTATUS(status));
77
78 if (rc == 0)
79 printf("success: test completed OK\n");
80
81 return rc;
82}
83
84int main(void)
85{
86 return test_harness(test, "large_vm_fork_separation");
87}