aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS8
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst9
-rw-r--r--Documentation/vm/hmm.rst8
-rw-r--r--MAINTAINERS24
-rw-r--r--arch/arm/boot/compressed/decompress.c1
-rw-r--r--arch/mips/kvm/mips.c3
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/kernel/kexec_elf_64.c6
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv.c48
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c14
-rw-r--r--arch/powerpc/kvm/book3s_xive.c55
-rw-r--r--arch/powerpc/kvm/book3s_xive.h1
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c100
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c4
-rw-r--r--arch/s390/crypto/aes_s390.c156
-rw-r--r--arch/s390/crypto/des_s390.c7
-rw-r--r--arch/s390/include/asm/ap.h4
-rw-r--r--arch/s390/include/asm/cpacf.h4
-rw-r--r--arch/s390/include/asm/pci_clp.h25
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/s390/pci/pci_clp.c6
-rw-r--r--arch/x86/kernel/ima_arch.c5
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--block/blk-core.c81
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-pci.c2
-rw-r--r--block/blk-mq-rdma.c4
-rw-r--r--block/blk-mq-virtio.c4
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-rq-qos.c7
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h1
-rw-r--r--block/bsg-lib.c1
-rw-r--r--block/genhd.c4
-rw-r--r--block/partitions/ldm.c2
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/sleep.c39
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/i2c-dev.c1
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/leds/led-core.c5
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c5
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/s390/crypto/ap_bus.c26
-rw-r--r--drivers/s390/crypto/ap_bus.h3
-rw-r--r--drivers/s390/crypto/zcrypt_api.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c9
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c55
-rw-r--r--drivers/s390/scsi/zfcp_unit.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_phy.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/thermal/qcom/tsens-common.c14
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c1
-rw-r--r--drivers/thermal/qcom/tsens-v2.c1
-rw-r--r--drivers/thermal/qcom/tsens.c5
-rw-r--r--drivers/thermal/qcom/tsens.h1
-rw-r--r--drivers/xen/pvcalls-front.c4
-rw-r--r--drivers/xen/xenbus/xenbus.h3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c18
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c7
-rw-r--r--fs/block_dev.c25
-rw-r--r--fs/cifs/dfs_cache.c4
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/smb2pdu.c9
-rw-r--r--fs/io_uring.c2
-rw-r--r--fs/lockd/xdr.c4
-rw-r--r--fs/lockd/xdr4.c4
-rw-r--r--fs/ocfs2/filecheck.c1
-rw-r--r--include/linux/cgroup-defs.h5
-rw-r--r--include/linux/generic-radix-tree.h2
-rw-r--r--include/linux/list_lru.h1
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--kernel/cgroup/cgroup.c16
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/power/hibernate.c4
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sys.c62
-rw-r--r--lib/sort.c15
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/gup.c15
-rw-r--r--mm/kasan/common.c2
-rw-r--r--mm/list_lru.c8
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/z3fold.c11
-rw-r--r--scripts/gcc-plugins/gcc-common.h4
-rw-r--r--scripts/gdb/linux/constants.py.in3
-rwxr-xr-xscripts/spdxcheck.py7
-rw-r--r--security/integrity/evm/evm_crypto.c3
-rw-r--r--security/integrity/ima/ima_policy.c28
-rw-r--r--virt/kvm/arm/arm.c3
-rw-r--r--virt/kvm/kvm_main.c4
111 files changed, 813 insertions, 473 deletions
diff --git a/CREDITS b/CREDITS
index 8e0342620a06..681335f42491 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3364,6 +3364,14 @@ S: Braunschweiger Strasse 79
3364S: 31134 Hildesheim 3364S: 31134 Hildesheim
3365S: Germany 3365S: Germany
3366 3366
3367N: Martin Schwidefsky
3368D: Martin was the most significant contributor to the initial s390
3369D: port of the Linux Kernel and later the maintainer of the s390
3370D: architecture backend for almost two decades.
3371D: He passed away in 2019, and will be greatly missed.
3372S: Germany
3373W: https://lwn.net/Articles/789028/
3374
3367N: Marcel Selhorst 3375N: Marcel Selhorst
3368E: tpmdd@selhorst.net 3376E: tpmdd@selhorst.net
3369D: TPM driver 3377D: TPM driver
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 88e746074252..cf88c1f98270 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -177,6 +177,15 @@ cgroup v2 currently supports the following mount options.
177 ignored on non-init namespace mounts. Please refer to the 177 ignored on non-init namespace mounts. Please refer to the
178 Delegation section for details. 178 Delegation section for details.
179 179
180 memory_localevents
181
182 Only populate memory.events with data for the current cgroup,
183 and not any subtrees. This is legacy behaviour, the default
184 behaviour without this option is to include subtree counts.
185 This option is system wide and can only be set on mount or
186 modified through remount from the init namespace. The mount
187 option is ignored on non-init namespace mounts.
188
180 189
181Organizing Processes and Threads 190Organizing Processes and Threads
182-------------------------------- 191--------------------------------
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index ec1efa32af3c..7cdf7282e022 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -288,15 +288,17 @@ For instance if the device flags for device entries are:
288 WRITE (1 << 62) 288 WRITE (1 << 62)
289 289
290Now let say that device driver wants to fault with at least read a range then 290Now let say that device driver wants to fault with at least read a range then
291it does set: 291it does set::
292 range->default_flags = (1 << 63) 292
293 range->default_flags = (1 << 63);
293 range->pfn_flags_mask = 0; 294 range->pfn_flags_mask = 0;
294 295
295and calls hmm_range_fault() as described above. This will fill fault all page 296and calls hmm_range_fault() as described above. This will fill fault all page
296in the range with at least read permission. 297in the range with at least read permission.
297 298
298Now let say driver wants to do the same except for one page in the range for 299Now let say driver wants to do the same except for one page in the range for
299which its want to have write. Now driver set: 300which its want to have write. Now driver set::
301
300 range->default_flags = (1 << 63); 302 range->default_flags = (1 << 63);
301 range->pfn_flags_mask = (1 << 62); 303 range->pfn_flags_mask = (1 << 62);
302 range->pfns[index_of_write] = (1 << 62); 304 range->pfns[index_of_write] = (1 << 62);
diff --git a/MAINTAINERS b/MAINTAINERS
index 429c6c624861..a6954776a37e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -696,6 +696,7 @@ F: drivers/input/mouse/alps.*
696ALTERA I2C CONTROLLER DRIVER 696ALTERA I2C CONTROLLER DRIVER
697M: Thor Thayer <thor.thayer@linux.intel.com> 697M: Thor Thayer <thor.thayer@linux.intel.com>
698S: Maintained 698S: Maintained
699F: Documentation/devicetree/bindings/i2c/i2c-altera.txt
699F: drivers/i2c/busses/i2c-altera.c 700F: drivers/i2c/busses/i2c-altera.c
700 701
701ALTERA MAILBOX DRIVER 702ALTERA MAILBOX DRIVER
@@ -1174,6 +1175,7 @@ S: Maintained
1174F: Documentation/devicetree/bindings/arm/arm-boards 1175F: Documentation/devicetree/bindings/arm/arm-boards
1175F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt 1176F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
1176F: Documentation/devicetree/bindings/clock/arm-integrator.txt 1177F: Documentation/devicetree/bindings/clock/arm-integrator.txt
1178F: Documentation/devicetree/bindings/i2c/i2c-versatile.txt
1177F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt 1179F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
1178F: Documentation/devicetree/bindings/mtd/arm-versatile.txt 1180F: Documentation/devicetree/bindings/mtd/arm-versatile.txt
1179F: arch/arm/mach-integrator/ 1181F: arch/arm/mach-integrator/
@@ -1781,6 +1783,7 @@ ARM/LPC18XX ARCHITECTURE
1781M: Vladimir Zapolskiy <vz@mleia.com> 1783M: Vladimir Zapolskiy <vz@mleia.com>
1782L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1784L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1783S: Maintained 1785S: Maintained
1786F: Documentation/devicetree/bindings/i2c/i2c-lpc2k.txt
1784F: arch/arm/boot/dts/lpc43* 1787F: arch/arm/boot/dts/lpc43*
1785F: drivers/i2c/busses/i2c-lpc2k.c 1788F: drivers/i2c/busses/i2c-lpc2k.c
1786F: drivers/memory/pl172.c 1789F: drivers/memory/pl172.c
@@ -1794,6 +1797,7 @@ M: Sylvain Lemieux <slemieux.tyco@gmail.com>
1794L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1797L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1795T: git git://github.com/vzapolskiy/linux-lpc32xx.git 1798T: git git://github.com/vzapolskiy/linux-lpc32xx.git
1796S: Maintained 1799S: Maintained
1800F: Documentation/devicetree/bindings/i2c/i2c-pnx.txt
1797F: arch/arm/boot/dts/lpc32* 1801F: arch/arm/boot/dts/lpc32*
1798F: arch/arm/mach-lpc32xx/ 1802F: arch/arm/mach-lpc32xx/
1799F: drivers/i2c/busses/i2c-pnx.c 1803F: drivers/i2c/busses/i2c-pnx.c
@@ -1918,6 +1922,8 @@ ARM/NOMADIK/U300/Ux500 ARCHITECTURES
1918M: Linus Walleij <linus.walleij@linaro.org> 1922M: Linus Walleij <linus.walleij@linaro.org>
1919L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1923L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1920S: Maintained 1924S: Maintained
1925F: Documentation/devicetree/bindings/i2c/i2c-nomadik.txt
1926F: Documentation/devicetree/bindings/i2c/i2c-stu300.txt
1921F: arch/arm/mach-nomadik/ 1927F: arch/arm/mach-nomadik/
1922F: arch/arm/mach-u300/ 1928F: arch/arm/mach-u300/
1923F: arch/arm/mach-ux500/ 1929F: arch/arm/mach-ux500/
@@ -2140,6 +2146,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2140L: linux-rockchip@lists.infradead.org 2146L: linux-rockchip@lists.infradead.org
2141T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git 2147T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip.git
2142S: Maintained 2148S: Maintained
2149F: Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
2143F: arch/arm/boot/dts/rk3* 2150F: arch/arm/boot/dts/rk3*
2144F: arch/arm/boot/dts/rv1108* 2151F: arch/arm/boot/dts/rv1108*
2145F: arch/arm/mach-rockchip/ 2152F: arch/arm/mach-rockchip/
@@ -2275,6 +2282,7 @@ M: Patrice Chotard <patrice.chotard@st.com>
2275L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2282L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2276W: http://www.stlinux.com 2283W: http://www.stlinux.com
2277S: Maintained 2284S: Maintained
2285F: Documentation/devicetree/bindings/i2c/i2c-st.txt
2278F: arch/arm/mach-sti/ 2286F: arch/arm/mach-sti/
2279F: arch/arm/boot/dts/sti* 2287F: arch/arm/boot/dts/sti*
2280F: drivers/char/hw_random/st-rng.c 2288F: drivers/char/hw_random/st-rng.c
@@ -2466,6 +2474,7 @@ ARM/VT8500 ARM ARCHITECTURE
2466M: Tony Prisk <linux@prisktech.co.nz> 2474M: Tony Prisk <linux@prisktech.co.nz>
2467L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2475L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2468S: Maintained 2476S: Maintained
2477F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
2469F: arch/arm/mach-vt8500/ 2478F: arch/arm/mach-vt8500/
2470F: drivers/clocksource/timer-vt8500.c 2479F: drivers/clocksource/timer-vt8500.c
2471F: drivers/i2c/busses/i2c-wmt.c 2480F: drivers/i2c/busses/i2c-wmt.c
@@ -2531,6 +2540,8 @@ F: drivers/cpuidle/cpuidle-zynq.c
2531F: drivers/block/xsysace.c 2540F: drivers/block/xsysace.c
2532N: zynq 2541N: zynq
2533N: xilinx 2542N: xilinx
2543F: Documentation/devicetree/bindings/i2c/i2c-cadence.txt
2544F: Documentation/devicetree/bindings/i2c/i2c-xiic.txt
2534F: drivers/clocksource/timer-cadence-ttc.c 2545F: drivers/clocksource/timer-cadence-ttc.c
2535F: drivers/i2c/busses/i2c-cadence.c 2546F: drivers/i2c/busses/i2c-cadence.c
2536F: drivers/mmc/host/sdhci-of-arasan.c 2547F: drivers/mmc/host/sdhci-of-arasan.c
@@ -3049,8 +3060,9 @@ S: Maintained
3049F: arch/riscv/net/ 3060F: arch/riscv/net/
3050 3061
3051BPF JIT for S390 3062BPF JIT for S390
3052M: Martin Schwidefsky <schwidefsky@de.ibm.com>
3053M: Heiko Carstens <heiko.carstens@de.ibm.com> 3063M: Heiko Carstens <heiko.carstens@de.ibm.com>
3064M: Vasily Gorbik <gor@linux.ibm.com>
3065M: Christian Borntraeger <borntraeger@de.ibm.com>
3054L: netdev@vger.kernel.org 3066L: netdev@vger.kernel.org
3055L: bpf@vger.kernel.org 3067L: bpf@vger.kernel.org
3056S: Maintained 3068S: Maintained
@@ -7341,6 +7353,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
7341M: Gregory CLEMENT <gregory.clement@bootlin.com> 7353M: Gregory CLEMENT <gregory.clement@bootlin.com>
7342L: linux-i2c@vger.kernel.org 7354L: linux-i2c@vger.kernel.org
7343S: Maintained 7355S: Maintained
7356F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
7344F: drivers/i2c/busses/i2c-mv64xxx.c 7357F: drivers/i2c/busses/i2c-mv64xxx.c
7345 7358
7346I2C OVER PARALLEL PORT 7359I2C OVER PARALLEL PORT
@@ -11724,6 +11737,7 @@ M: Peter Korsgaard <peter@korsgaard.com>
11724M: Andrew Lunn <andrew@lunn.ch> 11737M: Andrew Lunn <andrew@lunn.ch>
11725L: linux-i2c@vger.kernel.org 11738L: linux-i2c@vger.kernel.org
11726S: Maintained 11739S: Maintained
11740F: Documentation/devicetree/bindings/i2c/i2c-ocores.txt
11727F: Documentation/i2c/busses/i2c-ocores 11741F: Documentation/i2c/busses/i2c-ocores
11728F: drivers/i2c/busses/i2c-ocores.c 11742F: drivers/i2c/busses/i2c-ocores.c
11729F: include/linux/platform_data/i2c-ocores.h 11743F: include/linux/platform_data/i2c-ocores.h
@@ -13363,6 +13377,7 @@ F: drivers/clk/renesas/
13363RENESAS EMEV2 I2C DRIVER 13377RENESAS EMEV2 I2C DRIVER
13364M: Wolfram Sang <wsa+renesas@sang-engineering.com> 13378M: Wolfram Sang <wsa+renesas@sang-engineering.com>
13365S: Supported 13379S: Supported
13380F: Documentation/devicetree/bindings/i2c/i2c-emev2.txt
13366F: drivers/i2c/busses/i2c-emev2.c 13381F: drivers/i2c/busses/i2c-emev2.c
13367 13382
13368RENESAS ETHERNET DRIVERS 13383RENESAS ETHERNET DRIVERS
@@ -13384,6 +13399,8 @@ F: drivers/iio/adc/rcar-gyroadc.c
13384RENESAS R-CAR I2C DRIVERS 13399RENESAS R-CAR I2C DRIVERS
13385M: Wolfram Sang <wsa+renesas@sang-engineering.com> 13400M: Wolfram Sang <wsa+renesas@sang-engineering.com>
13386S: Supported 13401S: Supported
13402F: Documentation/devicetree/bindings/i2c/i2c-rcar.txt
13403F: Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
13387F: drivers/i2c/busses/i2c-rcar.c 13404F: drivers/i2c/busses/i2c-rcar.c
13388F: drivers/i2c/busses/i2c-sh_mobile.c 13405F: drivers/i2c/busses/i2c-sh_mobile.c
13389 13406
@@ -13614,8 +13631,9 @@ S: Maintained
13614F: drivers/video/fbdev/savage/ 13631F: drivers/video/fbdev/savage/
13615 13632
13616S390 13633S390
13617M: Martin Schwidefsky <schwidefsky@de.ibm.com>
13618M: Heiko Carstens <heiko.carstens@de.ibm.com> 13634M: Heiko Carstens <heiko.carstens@de.ibm.com>
13635M: Vasily Gorbik <gor@linux.ibm.com>
13636M: Christian Borntraeger <borntraeger@de.ibm.com>
13619L: linux-s390@vger.kernel.org 13637L: linux-s390@vger.kernel.org
13620W: http://www.ibm.com/developerworks/linux/linux390/ 13638W: http://www.ibm.com/developerworks/linux/linux390/
13621T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git 13639T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
@@ -15672,6 +15690,7 @@ R: Bartosz Golaszewski <bgolaszewski@baylibre.com>
15672L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 15690L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
15673T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git 15691T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
15674S: Supported 15692S: Supported
15693F: Documentation/devicetree/bindings/i2c/i2c-davinci.txt
15675F: arch/arm/mach-davinci/ 15694F: arch/arm/mach-davinci/
15676F: drivers/i2c/busses/i2c-davinci.c 15695F: drivers/i2c/busses/i2c-davinci.c
15677F: arch/arm/boot/dts/da850* 15696F: arch/arm/boot/dts/da850*
@@ -17375,6 +17394,7 @@ M: Jan Glauber <jglauber@cavium.com>
17375L: linux-i2c@vger.kernel.org 17394L: linux-i2c@vger.kernel.org
17376W: http://www.cavium.com 17395W: http://www.cavium.com
17377S: Supported 17396S: Supported
17397F: Documentation/devicetree/bindings/i2c/i2c-xlp9xx.txt
17378F: drivers/i2c/busses/i2c-xlp9xx.c 17398F: drivers/i2c/busses/i2c-xlp9xx.c
17379 17399
17380XRA1403 GPIO EXPANDER 17400XRA1403 GPIO EXPANDER
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index c16c1829a5e4..aa075d8372ea 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -32,6 +32,7 @@
32extern char * strstr(const char * s1, const char *s2); 32extern char * strstr(const char * s1, const char *s2);
33extern size_t strlen(const char *s); 33extern size_t strlen(const char *s);
34extern int memcmp(const void *cs, const void *ct, size_t count); 34extern int memcmp(const void *cs, const void *ct, size_t count);
35extern char * strchrnul(const char *, int);
35 36
36#ifdef CONFIG_KERNEL_GZIP 37#ifdef CONFIG_KERNEL_GZIP
37#include "../../../../lib/decompress_inflate.c" 38#include "../../../../lib/decompress_inflate.c"
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 6d0517ac18e5..0369f26ab96d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1122,6 +1122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1122 case KVM_CAP_MAX_VCPUS: 1122 case KVM_CAP_MAX_VCPUS:
1123 r = KVM_MAX_VCPUS; 1123 r = KVM_MAX_VCPUS;
1124 break; 1124 break;
1125 case KVM_CAP_MAX_VCPU_ID:
1126 r = KVM_MAX_VCPU_ID;
1127 break;
1125 case KVM_CAP_MIPS_FPU: 1128 case KVM_CAP_MIPS_FPU:
1126 /* We don't handle systems with inconsistent cpu_has_fpu */ 1129 /* We don't handle systems with inconsistent cpu_has_fpu */
1127 r = !!raw_cpu_has_fpu; 1130 r = !!raw_cpu_has_fpu;
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 088ab948a5ca..900b00084953 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -225,7 +225,6 @@ CONFIG_UNUSED_SYMBOLS=y
225CONFIG_DEBUG_FS=y 225CONFIG_DEBUG_FS=y
226CONFIG_MAGIC_SYSRQ=y 226CONFIG_MAGIC_SYSRQ=y
227CONFIG_DEBUG_SLAB=y 227CONFIG_DEBUG_SLAB=y
228CONFIG_DEBUG_SLAB_LEAK=y
229CONFIG_DEBUG_MEMORY_INIT=y 228CONFIG_DEBUG_MEMORY_INIT=y
230CONFIG_DEBUG_STACKOVERFLOW=y 229CONFIG_DEBUG_STACKOVERFLOW=y
231CONFIG_PANIC_ON_OOPS=y 230CONFIG_PANIC_ON_OOPS=y
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 013c76a0a03e..d10df677c452 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -309,6 +309,7 @@ struct kvm_arch {
309#ifdef CONFIG_PPC_BOOK3S_64 309#ifdef CONFIG_PPC_BOOK3S_64
310 struct list_head spapr_tce_tables; 310 struct list_head spapr_tce_tables;
311 struct list_head rtas_tokens; 311 struct list_head rtas_tokens;
312 struct mutex rtas_token_lock;
312 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); 313 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
313#endif 314#endif
314#ifdef CONFIG_KVM_MPIC 315#ifdef CONFIG_KVM_MPIC
@@ -325,6 +326,7 @@ struct kvm_arch {
325#endif 326#endif
326 struct kvmppc_ops *kvm_ops; 327 struct kvmppc_ops *kvm_ops;
327#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 328#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
329 struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
328 u64 l1_ptcr; 330 u64 l1_ptcr;
329 int max_nested_lpid; 331 int max_nested_lpid;
330 struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS]; 332 struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
index ba4f18a43ee8..52a29fc73730 100644
--- a/arch/powerpc/kernel/kexec_elf_64.c
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -547,6 +547,7 @@ static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,
547 kbuf.memsz = phdr->p_memsz; 547 kbuf.memsz = phdr->p_memsz;
548 kbuf.buf_align = phdr->p_align; 548 kbuf.buf_align = phdr->p_align;
549 kbuf.buf_min = phdr->p_paddr + base; 549 kbuf.buf_min = phdr->p_paddr + base;
550 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
550 ret = kexec_add_buffer(&kbuf); 551 ret = kexec_add_buffer(&kbuf);
551 if (ret) 552 if (ret)
552 goto out; 553 goto out;
@@ -581,7 +582,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
581 struct kexec_buf kbuf = { .image = image, .buf_min = 0, 582 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
582 .buf_max = ppc64_rma_size }; 583 .buf_max = ppc64_rma_size };
583 struct kexec_buf pbuf = { .image = image, .buf_min = 0, 584 struct kexec_buf pbuf = { .image = image, .buf_min = 0,
584 .buf_max = ppc64_rma_size, .top_down = true }; 585 .buf_max = ppc64_rma_size, .top_down = true,
586 .mem = KEXEC_BUF_MEM_UNKNOWN };
585 587
586 ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info); 588 ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
587 if (ret) 589 if (ret)
@@ -606,6 +608,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
606 kbuf.bufsz = kbuf.memsz = initrd_len; 608 kbuf.bufsz = kbuf.memsz = initrd_len;
607 kbuf.buf_align = PAGE_SIZE; 609 kbuf.buf_align = PAGE_SIZE;
608 kbuf.top_down = false; 610 kbuf.top_down = false;
611 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
609 ret = kexec_add_buffer(&kbuf); 612 ret = kexec_add_buffer(&kbuf);
610 if (ret) 613 if (ret)
611 goto out; 614 goto out;
@@ -638,6 +641,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
638 kbuf.bufsz = kbuf.memsz = fdt_size; 641 kbuf.bufsz = kbuf.memsz = fdt_size;
639 kbuf.buf_align = PAGE_SIZE; 642 kbuf.buf_align = PAGE_SIZE;
640 kbuf.top_down = true; 643 kbuf.top_down = true;
644 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
641 ret = kexec_add_buffer(&kbuf); 645 ret = kexec_add_buffer(&kbuf);
642 if (ret) 646 if (ret)
643 goto out; 647 goto out;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 61a212d0daf0..ac5664845aca 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -902,6 +902,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
902#ifdef CONFIG_PPC64 902#ifdef CONFIG_PPC64
903 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); 903 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
904 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); 904 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
905 mutex_init(&kvm->arch.rtas_token_lock);
905#endif 906#endif
906 907
907 return kvm->arch.kvm_ops->init_vm(kvm); 908 return kvm->arch.kvm_ops->init_vm(kvm);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ab3d484c5e2e..51971311e6c9 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -63,7 +63,7 @@ struct kvm_resize_hpt {
63 struct work_struct work; 63 struct work_struct work;
64 u32 order; 64 u32 order;
65 65
66 /* These fields protected by kvm->lock */ 66 /* These fields protected by kvm->arch.mmu_setup_lock */
67 67
68 /* Possible values and their usage: 68 /* Possible values and their usage:
69 * <0 an error occurred during allocation, 69 * <0 an error occurred during allocation,
@@ -73,7 +73,7 @@ struct kvm_resize_hpt {
73 int error; 73 int error;
74 74
75 /* Private to the work thread, until error != -EBUSY, 75 /* Private to the work thread, until error != -EBUSY,
76 * then protected by kvm->lock. 76 * then protected by kvm->arch.mmu_setup_lock.
77 */ 77 */
78 struct kvm_hpt_info hpt; 78 struct kvm_hpt_info hpt;
79}; 79};
@@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
139 long err = -EBUSY; 139 long err = -EBUSY;
140 struct kvm_hpt_info info; 140 struct kvm_hpt_info info;
141 141
142 mutex_lock(&kvm->lock); 142 mutex_lock(&kvm->arch.mmu_setup_lock);
143 if (kvm->arch.mmu_ready) { 143 if (kvm->arch.mmu_ready) {
144 kvm->arch.mmu_ready = 0; 144 kvm->arch.mmu_ready = 0;
145 /* order mmu_ready vs. vcpus_running */ 145 /* order mmu_ready vs. vcpus_running */
@@ -183,7 +183,7 @@ out:
183 /* Ensure that each vcpu will flush its TLB on next entry. */ 183 /* Ensure that each vcpu will flush its TLB on next entry. */
184 cpumask_setall(&kvm->arch.need_tlb_flush); 184 cpumask_setall(&kvm->arch.need_tlb_flush);
185 185
186 mutex_unlock(&kvm->lock); 186 mutex_unlock(&kvm->arch.mmu_setup_lock);
187 return err; 187 return err;
188} 188}
189 189
@@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1447 1447
1448static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) 1448static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
1449{ 1449{
1450 if (WARN_ON(!mutex_is_locked(&kvm->lock))) 1450 if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
1451 return; 1451 return;
1452 1452
1453 if (!resize) 1453 if (!resize)
@@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work)
1474 if (WARN_ON(resize->error != -EBUSY)) 1474 if (WARN_ON(resize->error != -EBUSY))
1475 return; 1475 return;
1476 1476
1477 mutex_lock(&kvm->lock); 1477 mutex_lock(&kvm->arch.mmu_setup_lock);
1478 1478
1479 /* Request is still current? */ 1479 /* Request is still current? */
1480 if (kvm->arch.resize_hpt == resize) { 1480 if (kvm->arch.resize_hpt == resize) {
1481 /* We may request large allocations here: 1481 /* We may request large allocations here:
1482 * do not sleep with kvm->lock held for a while. 1482 * do not sleep with kvm->arch.mmu_setup_lock held for a while.
1483 */ 1483 */
1484 mutex_unlock(&kvm->lock); 1484 mutex_unlock(&kvm->arch.mmu_setup_lock);
1485 1485
1486 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", 1486 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
1487 resize->order); 1487 resize->order);
@@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work)
1494 if (WARN_ON(err == -EBUSY)) 1494 if (WARN_ON(err == -EBUSY))
1495 err = -EINPROGRESS; 1495 err = -EINPROGRESS;
1496 1496
1497 mutex_lock(&kvm->lock); 1497 mutex_lock(&kvm->arch.mmu_setup_lock);
1498 /* It is possible that kvm->arch.resize_hpt != resize 1498 /* It is possible that kvm->arch.resize_hpt != resize
1499 * after we grab kvm->lock again. 1499 * after we grab kvm->arch.mmu_setup_lock again.
1500 */ 1500 */
1501 } 1501 }
1502 1502
@@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
1505 if (kvm->arch.resize_hpt != resize) 1505 if (kvm->arch.resize_hpt != resize)
1506 resize_hpt_release(kvm, resize); 1506 resize_hpt_release(kvm, resize);
1507 1507
1508 mutex_unlock(&kvm->lock); 1508 mutex_unlock(&kvm->arch.mmu_setup_lock);
1509} 1509}
1510 1510
1511long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, 1511long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
@@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1522 if (shift && ((shift < 18) || (shift > 46))) 1522 if (shift && ((shift < 18) || (shift > 46)))
1523 return -EINVAL; 1523 return -EINVAL;
1524 1524
1525 mutex_lock(&kvm->lock); 1525 mutex_lock(&kvm->arch.mmu_setup_lock);
1526 1526
1527 resize = kvm->arch.resize_hpt; 1527 resize = kvm->arch.resize_hpt;
1528 1528
@@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1565 ret = 100; /* estimated time in ms */ 1565 ret = 100; /* estimated time in ms */
1566 1566
1567out: 1567out:
1568 mutex_unlock(&kvm->lock); 1568 mutex_unlock(&kvm->arch.mmu_setup_lock);
1569 return ret; 1569 return ret;
1570} 1570}
1571 1571
@@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
1588 if (shift && ((shift < 18) || (shift > 46))) 1588 if (shift && ((shift < 18) || (shift > 46)))
1589 return -EINVAL; 1589 return -EINVAL;
1590 1590
1591 mutex_lock(&kvm->lock); 1591 mutex_lock(&kvm->arch.mmu_setup_lock);
1592 1592
1593 resize = kvm->arch.resize_hpt; 1593 resize = kvm->arch.resize_hpt;
1594 1594
@@ -1625,7 +1625,7 @@ out:
1625 smp_mb(); 1625 smp_mb();
1626out_no_hpt: 1626out_no_hpt:
1627 resize_hpt_release(kvm, resize); 1627 resize_hpt_release(kvm, resize);
1628 mutex_unlock(&kvm->lock); 1628 mutex_unlock(&kvm->arch.mmu_setup_lock);
1629 return ret; 1629 return ret;
1630} 1630}
1631 1631
@@ -1868,7 +1868,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1868 return -EINVAL; 1868 return -EINVAL;
1869 1869
1870 /* lock out vcpus from running while we're doing this */ 1870 /* lock out vcpus from running while we're doing this */
1871 mutex_lock(&kvm->lock); 1871 mutex_lock(&kvm->arch.mmu_setup_lock);
1872 mmu_ready = kvm->arch.mmu_ready; 1872 mmu_ready = kvm->arch.mmu_ready;
1873 if (mmu_ready) { 1873 if (mmu_ready) {
1874 kvm->arch.mmu_ready = 0; /* temporarily */ 1874 kvm->arch.mmu_ready = 0; /* temporarily */
@@ -1876,7 +1876,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1876 smp_mb(); 1876 smp_mb();
1877 if (atomic_read(&kvm->arch.vcpus_running)) { 1877 if (atomic_read(&kvm->arch.vcpus_running)) {
1878 kvm->arch.mmu_ready = 1; 1878 kvm->arch.mmu_ready = 1;
1879 mutex_unlock(&kvm->lock); 1879 mutex_unlock(&kvm->arch.mmu_setup_lock);
1880 return -EBUSY; 1880 return -EBUSY;
1881 } 1881 }
1882 } 1882 }
@@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1963 /* Order HPTE updates vs. mmu_ready */ 1963 /* Order HPTE updates vs. mmu_ready */
1964 smp_wmb(); 1964 smp_wmb();
1965 kvm->arch.mmu_ready = mmu_ready; 1965 kvm->arch.mmu_ready = mmu_ready;
1966 mutex_unlock(&kvm->lock); 1966 mutex_unlock(&kvm->arch.mmu_setup_lock);
1967 1967
1968 if (err) 1968 if (err)
1969 return err; 1969 return err;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d5fc624e0655..5e840113eda4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -446,12 +446,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
446 446
447static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) 447static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
448{ 448{
449 struct kvm_vcpu *ret; 449 return kvm_get_vcpu_by_id(kvm, id);
450
451 mutex_lock(&kvm->lock);
452 ret = kvm_get_vcpu_by_id(kvm, id);
453 mutex_unlock(&kvm->lock);
454 return ret;
455} 450}
456 451
457static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 452static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1583,7 +1578,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1583 struct kvmppc_vcore *vc = vcpu->arch.vcore; 1578 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1584 u64 mask; 1579 u64 mask;
1585 1580
1586 mutex_lock(&kvm->lock);
1587 spin_lock(&vc->lock); 1581 spin_lock(&vc->lock);
1588 /* 1582 /*
1589 * If ILE (interrupt little-endian) has changed, update the 1583 * If ILE (interrupt little-endian) has changed, update the
@@ -1623,7 +1617,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1623 mask &= 0xFFFFFFFF; 1617 mask &= 0xFFFFFFFF;
1624 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); 1618 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1625 spin_unlock(&vc->lock); 1619 spin_unlock(&vc->lock);
1626 mutex_unlock(&kvm->lock);
1627} 1620}
1628 1621
1629static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, 1622static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
@@ -2338,11 +2331,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
2338 pr_devel("KVM: collision on id %u", id); 2331 pr_devel("KVM: collision on id %u", id);
2339 vcore = NULL; 2332 vcore = NULL;
2340 } else if (!vcore) { 2333 } else if (!vcore) {
2334 /*
2335 * Take mmu_setup_lock for mutual exclusion
2336 * with kvmppc_update_lpcr().
2337 */
2341 err = -ENOMEM; 2338 err = -ENOMEM;
2342 vcore = kvmppc_vcore_create(kvm, 2339 vcore = kvmppc_vcore_create(kvm,
2343 id & ~(kvm->arch.smt_mode - 1)); 2340 id & ~(kvm->arch.smt_mode - 1));
2341 mutex_lock(&kvm->arch.mmu_setup_lock);
2344 kvm->arch.vcores[core] = vcore; 2342 kvm->arch.vcores[core] = vcore;
2345 kvm->arch.online_vcores++; 2343 kvm->arch.online_vcores++;
2344 mutex_unlock(&kvm->arch.mmu_setup_lock);
2346 } 2345 }
2347 } 2346 }
2348 mutex_unlock(&kvm->lock); 2347 mutex_unlock(&kvm->lock);
@@ -3663,6 +3662,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3663 vc->in_guest = 0; 3662 vc->in_guest = 0;
3664 3663
3665 mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb()); 3664 mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3665 mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
3666 3666
3667 kvmhv_load_host_pmu(); 3667 kvmhv_load_host_pmu();
3668 3668
@@ -3859,7 +3859,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3859 int r = 0; 3859 int r = 0;
3860 struct kvm *kvm = vcpu->kvm; 3860 struct kvm *kvm = vcpu->kvm;
3861 3861
3862 mutex_lock(&kvm->lock); 3862 mutex_lock(&kvm->arch.mmu_setup_lock);
3863 if (!kvm->arch.mmu_ready) { 3863 if (!kvm->arch.mmu_ready) {
3864 if (!kvm_is_radix(kvm)) 3864 if (!kvm_is_radix(kvm))
3865 r = kvmppc_hv_setup_htab_rma(vcpu); 3865 r = kvmppc_hv_setup_htab_rma(vcpu);
@@ -3869,7 +3869,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3869 kvm->arch.mmu_ready = 1; 3869 kvm->arch.mmu_ready = 1;
3870 } 3870 }
3871 } 3871 }
3872 mutex_unlock(&kvm->lock); 3872 mutex_unlock(&kvm->arch.mmu_setup_lock);
3873 return r; 3873 return r;
3874} 3874}
3875 3875
@@ -4091,16 +4091,20 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
4091 kvmppc_check_need_tlb_flush(kvm, pcpu, nested); 4091 kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
4092 } 4092 }
4093 4093
4094 trace_hardirqs_on();
4095 guest_enter_irqoff(); 4094 guest_enter_irqoff();
4096 4095
4097 srcu_idx = srcu_read_lock(&kvm->srcu); 4096 srcu_idx = srcu_read_lock(&kvm->srcu);
4098 4097
4099 this_cpu_disable_ftrace(); 4098 this_cpu_disable_ftrace();
4100 4099
4100 /* Tell lockdep that we're about to enable interrupts */
4101 trace_hardirqs_on();
4102
4101 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr); 4103 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
4102 vcpu->arch.trap = trap; 4104 vcpu->arch.trap = trap;
4103 4105
4106 trace_hardirqs_off();
4107
4104 this_cpu_enable_ftrace(); 4108 this_cpu_enable_ftrace();
4105 4109
4106 srcu_read_unlock(&kvm->srcu, srcu_idx); 4110 srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -4110,7 +4114,6 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
4110 isync(); 4114 isync();
4111 } 4115 }
4112 4116
4113 trace_hardirqs_off();
4114 set_irq_happened(trap); 4117 set_irq_happened(trap);
4115 4118
4116 kvmppc_set_host_core(pcpu); 4119 kvmppc_set_host_core(pcpu);
@@ -4478,7 +4481,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
4478 4481
4479/* 4482/*
4480 * Update LPCR values in kvm->arch and in vcores. 4483 * Update LPCR values in kvm->arch and in vcores.
4481 * Caller must hold kvm->lock. 4484 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4485 * of kvm->arch.lpcr update).
4482 */ 4486 */
4483void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) 4487void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
4484{ 4488{
@@ -4530,7 +4534,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
4530 4534
4531/* 4535/*
4532 * Set up HPT (hashed page table) and RMA (real-mode area). 4536 * Set up HPT (hashed page table) and RMA (real-mode area).
4533 * Must be called with kvm->lock held. 4537 * Must be called with kvm->arch.mmu_setup_lock held.
4534 */ 4538 */
4535static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) 4539static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
4536{ 4540{
@@ -4618,7 +4622,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
4618 goto out_srcu; 4622 goto out_srcu;
4619} 4623}
4620 4624
4621/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ 4625/*
4626 * Must be called with kvm->arch.mmu_setup_lock held and
4627 * mmu_ready = 0 and no vcpus running.
4628 */
4622int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) 4629int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
4623{ 4630{
4624 if (nesting_enabled(kvm)) 4631 if (nesting_enabled(kvm))
@@ -4635,7 +4642,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
4635 return 0; 4642 return 0;
4636} 4643}
4637 4644
4638/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ 4645/*
4646 * Must be called with kvm->arch.mmu_setup_lock held and
4647 * mmu_ready = 0 and no vcpus running.
4648 */
4639int kvmppc_switch_mmu_to_radix(struct kvm *kvm) 4649int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
4640{ 4650{
4641 int err; 4651 int err;
@@ -4740,6 +4750,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
4740 char buf[32]; 4750 char buf[32];
4741 int ret; 4751 int ret;
4742 4752
4753 mutex_init(&kvm->arch.mmu_setup_lock);
4754
4743 /* Allocate the guest's logical partition ID */ 4755 /* Allocate the guest's logical partition ID */
4744 4756
4745 lpid = kvmppc_alloc_lpid(); 4757 lpid = kvmppc_alloc_lpid();
@@ -5265,7 +5277,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
5265 if (kvmhv_on_pseries() && !radix) 5277 if (kvmhv_on_pseries() && !radix)
5266 return -EINVAL; 5278 return -EINVAL;
5267 5279
5268 mutex_lock(&kvm->lock); 5280 mutex_lock(&kvm->arch.mmu_setup_lock);
5269 if (radix != kvm_is_radix(kvm)) { 5281 if (radix != kvm_is_radix(kvm)) {
5270 if (kvm->arch.mmu_ready) { 5282 if (kvm->arch.mmu_ready) {
5271 kvm->arch.mmu_ready = 0; 5283 kvm->arch.mmu_ready = 0;
@@ -5293,7 +5305,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
5293 err = 0; 5305 err = 0;
5294 5306
5295 out_unlock: 5307 out_unlock:
5296 mutex_unlock(&kvm->lock); 5308 mutex_unlock(&kvm->arch.mmu_setup_lock);
5297 return err; 5309 return err;
5298} 5310}
5299 5311
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 4e178c4c1ea5..b7ae3dfbf00e 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
146{ 146{
147 struct rtas_token_definition *d, *tmp; 147 struct rtas_token_definition *d, *tmp;
148 148
149 lockdep_assert_held(&kvm->lock); 149 lockdep_assert_held(&kvm->arch.rtas_token_lock);
150 150
151 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { 151 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
152 if (rtas_name_matches(d->handler->name, name)) { 152 if (rtas_name_matches(d->handler->name, name)) {
@@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
167 bool found; 167 bool found;
168 int i; 168 int i;
169 169
170 lockdep_assert_held(&kvm->lock); 170 lockdep_assert_held(&kvm->arch.rtas_token_lock);
171 171
172 list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { 172 list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
173 if (d->token == token) 173 if (d->token == token)
@@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
206 if (copy_from_user(&args, argp, sizeof(args))) 206 if (copy_from_user(&args, argp, sizeof(args)))
207 return -EFAULT; 207 return -EFAULT;
208 208
209 mutex_lock(&kvm->lock); 209 mutex_lock(&kvm->arch.rtas_token_lock);
210 210
211 if (args.token) 211 if (args.token)
212 rc = rtas_token_define(kvm, args.name, args.token); 212 rc = rtas_token_define(kvm, args.name, args.token);
213 else 213 else
214 rc = rtas_token_undefine(kvm, args.name); 214 rc = rtas_token_undefine(kvm, args.name);
215 215
216 mutex_unlock(&kvm->lock); 216 mutex_unlock(&kvm->arch.rtas_token_lock);
217 217
218 return rc; 218 return rc;
219} 219}
@@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
245 orig_rets = args.rets; 245 orig_rets = args.rets;
246 args.rets = &args.args[be32_to_cpu(args.nargs)]; 246 args.rets = &args.args[be32_to_cpu(args.nargs)];
247 247
248 mutex_lock(&vcpu->kvm->lock); 248 mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
249 249
250 rc = -ENOENT; 250 rc = -ENOENT;
251 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { 251 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
@@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
256 } 256 }
257 } 257 }
258 258
259 mutex_unlock(&vcpu->kvm->lock); 259 mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
260 260
261 if (rc == 0) { 261 if (rc == 0) {
262 args.rets = orig_rets; 262 args.rets = orig_rets;
@@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
282{ 282{
283 struct rtas_token_definition *d, *tmp; 283 struct rtas_token_definition *d, *tmp;
284 284
285 lockdep_assert_held(&kvm->lock);
286
287 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { 285 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
288 list_del(&d->list); 286 list_del(&d->list);
289 kfree(d); 287 kfree(d);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 4953957333b7..922fd62bcd2a 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -271,14 +271,14 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
271 return rc; 271 return rc;
272} 272}
273 273
274/* Called with kvm_lock held */ 274/* Called with xive->lock held */
275static int xive_check_provisioning(struct kvm *kvm, u8 prio) 275static int xive_check_provisioning(struct kvm *kvm, u8 prio)
276{ 276{
277 struct kvmppc_xive *xive = kvm->arch.xive; 277 struct kvmppc_xive *xive = kvm->arch.xive;
278 struct kvm_vcpu *vcpu; 278 struct kvm_vcpu *vcpu;
279 int i, rc; 279 int i, rc;
280 280
281 lockdep_assert_held(&kvm->lock); 281 lockdep_assert_held(&xive->lock);
282 282
283 /* Already provisioned ? */ 283 /* Already provisioned ? */
284 if (xive->qmap & (1 << prio)) 284 if (xive->qmap & (1 << prio))
@@ -621,9 +621,12 @@ int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
621 irq, server, priority); 621 irq, server, priority);
622 622
623 /* First, check provisioning of queues */ 623 /* First, check provisioning of queues */
624 if (priority != MASKED) 624 if (priority != MASKED) {
625 mutex_lock(&xive->lock);
625 rc = xive_check_provisioning(xive->kvm, 626 rc = xive_check_provisioning(xive->kvm,
626 xive_prio_from_guest(priority)); 627 xive_prio_from_guest(priority));
628 mutex_unlock(&xive->lock);
629 }
627 if (rc) { 630 if (rc) {
628 pr_devel(" provisioning failure %d !\n", rc); 631 pr_devel(" provisioning failure %d !\n", rc);
629 return rc; 632 return rc;
@@ -1199,7 +1202,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1199 return -ENOMEM; 1202 return -ENOMEM;
1200 1203
1201 /* We need to synchronize with queue provisioning */ 1204 /* We need to synchronize with queue provisioning */
1202 mutex_lock(&vcpu->kvm->lock); 1205 mutex_lock(&xive->lock);
1203 vcpu->arch.xive_vcpu = xc; 1206 vcpu->arch.xive_vcpu = xc;
1204 xc->xive = xive; 1207 xc->xive = xive;
1205 xc->vcpu = vcpu; 1208 xc->vcpu = vcpu;
@@ -1283,7 +1286,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1283 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); 1286 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1284 1287
1285bail: 1288bail:
1286 mutex_unlock(&vcpu->kvm->lock); 1289 mutex_unlock(&xive->lock);
1287 if (r) { 1290 if (r) {
1288 kvmppc_xive_cleanup_vcpu(vcpu); 1291 kvmppc_xive_cleanup_vcpu(vcpu);
1289 return r; 1292 return r;
@@ -1527,13 +1530,12 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1527struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( 1530struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1528 struct kvmppc_xive *xive, int irq) 1531 struct kvmppc_xive *xive, int irq)
1529{ 1532{
1530 struct kvm *kvm = xive->kvm;
1531 struct kvmppc_xive_src_block *sb; 1533 struct kvmppc_xive_src_block *sb;
1532 int i, bid; 1534 int i, bid;
1533 1535
1534 bid = irq >> KVMPPC_XICS_ICS_SHIFT; 1536 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1535 1537
1536 mutex_lock(&kvm->lock); 1538 mutex_lock(&xive->lock);
1537 1539
1538 /* block already exists - somebody else got here first */ 1540 /* block already exists - somebody else got here first */
1539 if (xive->src_blocks[bid]) 1541 if (xive->src_blocks[bid])
@@ -1560,7 +1562,7 @@ struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1560 xive->max_sbid = bid; 1562 xive->max_sbid = bid;
1561 1563
1562out: 1564out:
1563 mutex_unlock(&kvm->lock); 1565 mutex_unlock(&xive->lock);
1564 return xive->src_blocks[bid]; 1566 return xive->src_blocks[bid];
1565} 1567}
1566 1568
@@ -1670,9 +1672,9 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1670 /* If we have a priority target the interrupt */ 1672 /* If we have a priority target the interrupt */
1671 if (act_prio != MASKED) { 1673 if (act_prio != MASKED) {
1672 /* First, check provisioning of queues */ 1674 /* First, check provisioning of queues */
1673 mutex_lock(&xive->kvm->lock); 1675 mutex_lock(&xive->lock);
1674 rc = xive_check_provisioning(xive->kvm, act_prio); 1676 rc = xive_check_provisioning(xive->kvm, act_prio);
1675 mutex_unlock(&xive->kvm->lock); 1677 mutex_unlock(&xive->lock);
1676 1678
1677 /* Target interrupt */ 1679 /* Target interrupt */
1678 if (rc == 0) 1680 if (rc == 0)
@@ -1826,7 +1828,6 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1826{ 1828{
1827 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); 1829 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1828 xive_native_configure_irq(hw_num, 0, MASKED, 0); 1830 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1829 xive_cleanup_irq_data(xd);
1830} 1831}
1831 1832
1832void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) 1833void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
@@ -1840,9 +1841,10 @@ void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1840 continue; 1841 continue;
1841 1842
1842 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); 1843 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1844 xive_cleanup_irq_data(&state->ipi_data);
1843 xive_native_free_irq(state->ipi_number); 1845 xive_native_free_irq(state->ipi_number);
1844 1846
1845 /* Pass-through, cleanup too */ 1847 /* Pass-through, cleanup too but keep IRQ hw data */
1846 if (state->pt_number) 1848 if (state->pt_number)
1847 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); 1849 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1848 1850
@@ -1859,21 +1861,10 @@ static void kvmppc_xive_release(struct kvm_device *dev)
1859 struct kvm *kvm = xive->kvm; 1861 struct kvm *kvm = xive->kvm;
1860 struct kvm_vcpu *vcpu; 1862 struct kvm_vcpu *vcpu;
1861 int i; 1863 int i;
1862 int was_ready;
1863 1864
1864 pr_devel("Releasing xive device\n"); 1865 pr_devel("Releasing xive device\n");
1865 1866
1866 debugfs_remove(xive->dentry);
1867
1868 /* 1867 /*
1869 * Clearing mmu_ready temporarily while holding kvm->lock
1870 * is a way of ensuring that no vcpus can enter the guest
1871 * until we drop kvm->lock. Doing kick_all_cpus_sync()
1872 * ensures that any vcpu executing inside the guest has
1873 * exited the guest. Once kick_all_cpus_sync() has finished,
1874 * we know that no vcpu can be executing the XIVE push or
1875 * pull code, or executing a XICS hcall.
1876 *
1877 * Since this is the device release function, we know that 1868 * Since this is the device release function, we know that
1878 * userspace does not have any open fd referring to the 1869 * userspace does not have any open fd referring to the
1879 * device. Therefore there can not be any of the device 1870 * device. Therefore there can not be any of the device
@@ -1881,9 +1872,8 @@ static void kvmppc_xive_release(struct kvm_device *dev)
1881 * and similarly, the connect_vcpu and set/clr_mapped 1872 * and similarly, the connect_vcpu and set/clr_mapped
1882 * functions also cannot be being executed. 1873 * functions also cannot be being executed.
1883 */ 1874 */
1884 was_ready = kvm->arch.mmu_ready; 1875
1885 kvm->arch.mmu_ready = 0; 1876 debugfs_remove(xive->dentry);
1886 kick_all_cpus_sync();
1887 1877
1888 /* 1878 /*
1889 * We should clean up the vCPU interrupt presenters first. 1879 * We should clean up the vCPU interrupt presenters first.
@@ -1892,12 +1882,22 @@ static void kvmppc_xive_release(struct kvm_device *dev)
1892 /* 1882 /*
1893 * Take vcpu->mutex to ensure that no one_reg get/set ioctl 1883 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1894 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently. 1884 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
1885 * Holding the vcpu->mutex also means that the vcpu cannot
1886 * be executing the KVM_RUN ioctl, and therefore it cannot
1887 * be executing the XIVE push or pull code or accessing
1888 * the XIVE MMIO regions.
1895 */ 1889 */
1896 mutex_lock(&vcpu->mutex); 1890 mutex_lock(&vcpu->mutex);
1897 kvmppc_xive_cleanup_vcpu(vcpu); 1891 kvmppc_xive_cleanup_vcpu(vcpu);
1898 mutex_unlock(&vcpu->mutex); 1892 mutex_unlock(&vcpu->mutex);
1899 } 1893 }
1900 1894
1895 /*
1896 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1897 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1898 * against xive code getting called during vcpu execution or
1899 * set/get one_reg operations.
1900 */
1901 kvm->arch.xive = NULL; 1901 kvm->arch.xive = NULL;
1902 1902
1903 /* Mask and free interrupts */ 1903 /* Mask and free interrupts */
@@ -1911,8 +1911,6 @@ static void kvmppc_xive_release(struct kvm_device *dev)
1911 if (xive->vp_base != XIVE_INVALID_VP) 1911 if (xive->vp_base != XIVE_INVALID_VP)
1912 xive_native_free_vp_block(xive->vp_base); 1912 xive_native_free_vp_block(xive->vp_base);
1913 1913
1914 kvm->arch.mmu_ready = was_ready;
1915
1916 /* 1914 /*
1917 * A reference of the kvmppc_xive pointer is now kept under 1915 * A reference of the kvmppc_xive pointer is now kept under
1918 * the xive_devices struct of the machine for reuse. It is 1916 * the xive_devices struct of the machine for reuse. It is
@@ -1967,6 +1965,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1967 dev->private = xive; 1965 dev->private = xive;
1968 xive->dev = dev; 1966 xive->dev = dev;
1969 xive->kvm = kvm; 1967 xive->kvm = kvm;
1968 mutex_init(&xive->lock);
1970 1969
1971 /* Already there ? */ 1970 /* Already there ? */
1972 if (kvm->arch.xive) 1971 if (kvm->arch.xive)
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 426146332984..862c2c9650ae 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -141,6 +141,7 @@ struct kvmppc_xive {
141 struct kvmppc_xive_ops *ops; 141 struct kvmppc_xive_ops *ops;
142 struct address_space *mapping; 142 struct address_space *mapping;
143 struct mutex mapping_lock; 143 struct mutex mapping_lock;
144 struct mutex lock;
144}; 145};
145 146
146#define KVMPPC_XIVE_Q_COUNT 8 147#define KVMPPC_XIVE_Q_COUNT 8
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 6a8e698c4b6e..5596c8ec221a 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -109,12 +109,12 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
109 return -EPERM; 109 return -EPERM;
110 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) 110 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
111 return -EBUSY; 111 return -EBUSY;
112 if (server_num >= KVM_MAX_VCPUS) { 112 if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
113 pr_devel("Out of bounds !\n"); 113 pr_devel("Out of bounds !\n");
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 mutex_lock(&vcpu->kvm->lock); 117 mutex_lock(&xive->lock);
118 118
119 if (kvmppc_xive_find_server(vcpu->kvm, server_num)) { 119 if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
120 pr_devel("Duplicate !\n"); 120 pr_devel("Duplicate !\n");
@@ -159,7 +159,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
159 159
160 /* TODO: reset all queues to a clean state ? */ 160 /* TODO: reset all queues to a clean state ? */
161bail: 161bail:
162 mutex_unlock(&vcpu->kvm->lock); 162 mutex_unlock(&xive->lock);
163 if (rc) 163 if (rc)
164 kvmppc_xive_native_cleanup_vcpu(vcpu); 164 kvmppc_xive_native_cleanup_vcpu(vcpu);
165 165
@@ -172,6 +172,7 @@ bail:
172static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq) 172static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
173{ 173{
174 struct kvmppc_xive *xive = kvm->arch.xive; 174 struct kvmppc_xive *xive = kvm->arch.xive;
175 pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
175 176
176 if (irq >= KVMPPC_XIVE_NR_IRQS) 177 if (irq >= KVMPPC_XIVE_NR_IRQS)
177 return -EINVAL; 178 return -EINVAL;
@@ -185,7 +186,7 @@ static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
185 mutex_lock(&xive->mapping_lock); 186 mutex_lock(&xive->mapping_lock);
186 if (xive->mapping) 187 if (xive->mapping)
187 unmap_mapping_range(xive->mapping, 188 unmap_mapping_range(xive->mapping,
188 irq * (2ull << PAGE_SHIFT), 189 esb_pgoff << PAGE_SHIFT,
189 2ull << PAGE_SHIFT, 1); 190 2ull << PAGE_SHIFT, 1);
190 mutex_unlock(&xive->mapping_lock); 191 mutex_unlock(&xive->mapping_lock);
191 return 0; 192 return 0;
@@ -535,6 +536,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
535 struct xive_q *q; 536 struct xive_q *q;
536 gfn_t gfn; 537 gfn_t gfn;
537 unsigned long page_size; 538 unsigned long page_size;
539 int srcu_idx;
538 540
539 /* 541 /*
540 * Demangle priority/server tuple from the EQ identifier 542 * Demangle priority/server tuple from the EQ identifier
@@ -565,24 +567,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
565 __func__, server, priority, kvm_eq.flags, 567 __func__, server, priority, kvm_eq.flags,
566 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); 568 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
567 569
568 /*
569 * sPAPR specifies a "Unconditional Notify (n) flag" for the
570 * H_INT_SET_QUEUE_CONFIG hcall which forces notification
571 * without using the coalescing mechanisms provided by the
572 * XIVE END ESBs. This is required on KVM as notification
573 * using the END ESBs is not supported.
574 */
575 if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
576 pr_err("invalid flags %d\n", kvm_eq.flags);
577 return -EINVAL;
578 }
579
580 rc = xive_native_validate_queue_size(kvm_eq.qshift);
581 if (rc) {
582 pr_err("invalid queue size %d\n", kvm_eq.qshift);
583 return rc;
584 }
585
586 /* reset queue and disable queueing */ 570 /* reset queue and disable queueing */
587 if (!kvm_eq.qshift) { 571 if (!kvm_eq.qshift) {
588 q->guest_qaddr = 0; 572 q->guest_qaddr = 0;
@@ -604,26 +588,48 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
604 return 0; 588 return 0;
605 } 589 }
606 590
591 /*
592 * sPAPR specifies a "Unconditional Notify (n) flag" for the
593 * H_INT_SET_QUEUE_CONFIG hcall which forces notification
594 * without using the coalescing mechanisms provided by the
595 * XIVE END ESBs. This is required on KVM as notification
596 * using the END ESBs is not supported.
597 */
598 if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
599 pr_err("invalid flags %d\n", kvm_eq.flags);
600 return -EINVAL;
601 }
602
603 rc = xive_native_validate_queue_size(kvm_eq.qshift);
604 if (rc) {
605 pr_err("invalid queue size %d\n", kvm_eq.qshift);
606 return rc;
607 }
608
607 if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) { 609 if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
608 pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr, 610 pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
609 1ull << kvm_eq.qshift); 611 1ull << kvm_eq.qshift);
610 return -EINVAL; 612 return -EINVAL;
611 } 613 }
612 614
615 srcu_idx = srcu_read_lock(&kvm->srcu);
613 gfn = gpa_to_gfn(kvm_eq.qaddr); 616 gfn = gpa_to_gfn(kvm_eq.qaddr);
614 page = gfn_to_page(kvm, gfn); 617 page = gfn_to_page(kvm, gfn);
615 if (is_error_page(page)) { 618 if (is_error_page(page)) {
619 srcu_read_unlock(&kvm->srcu, srcu_idx);
616 pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); 620 pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
617 return -EINVAL; 621 return -EINVAL;
618 } 622 }
619 623
620 page_size = kvm_host_page_size(kvm, gfn); 624 page_size = kvm_host_page_size(kvm, gfn);
621 if (1ull << kvm_eq.qshift > page_size) { 625 if (1ull << kvm_eq.qshift > page_size) {
626 srcu_read_unlock(&kvm->srcu, srcu_idx);
622 pr_warn("Incompatible host page size %lx!\n", page_size); 627 pr_warn("Incompatible host page size %lx!\n", page_size);
623 return -EINVAL; 628 return -EINVAL;
624 } 629 }
625 630
626 qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); 631 qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
632 srcu_read_unlock(&kvm->srcu, srcu_idx);
627 633
628 /* 634 /*
629 * Backup the queue page guest address to the mark EQ page 635 * Backup the queue page guest address to the mark EQ page
@@ -772,7 +778,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
772 778
773 pr_devel("%s\n", __func__); 779 pr_devel("%s\n", __func__);
774 780
775 mutex_lock(&kvm->lock); 781 mutex_lock(&xive->lock);
776 782
777 kvm_for_each_vcpu(i, vcpu, kvm) { 783 kvm_for_each_vcpu(i, vcpu, kvm) {
778 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 784 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -810,7 +816,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
810 } 816 }
811 } 817 }
812 818
813 mutex_unlock(&kvm->lock); 819 mutex_unlock(&xive->lock);
814 820
815 return 0; 821 return 0;
816} 822}
@@ -854,6 +860,7 @@ static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
854{ 860{
855 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 861 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
856 unsigned int prio; 862 unsigned int prio;
863 int srcu_idx;
857 864
858 if (!xc) 865 if (!xc)
859 return -ENOENT; 866 return -ENOENT;
@@ -865,7 +872,9 @@ static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
865 continue; 872 continue;
866 873
867 /* Mark EQ page dirty for migration */ 874 /* Mark EQ page dirty for migration */
875 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
868 mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); 876 mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
877 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
869 } 878 }
870 return 0; 879 return 0;
871} 880}
@@ -878,7 +887,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
878 887
879 pr_devel("%s\n", __func__); 888 pr_devel("%s\n", __func__);
880 889
881 mutex_lock(&kvm->lock); 890 mutex_lock(&xive->lock);
882 for (i = 0; i <= xive->max_sbid; i++) { 891 for (i = 0; i <= xive->max_sbid; i++) {
883 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; 892 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
884 893
@@ -892,7 +901,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
892 kvm_for_each_vcpu(i, vcpu, kvm) { 901 kvm_for_each_vcpu(i, vcpu, kvm) {
893 kvmppc_xive_native_vcpu_eq_sync(vcpu); 902 kvmppc_xive_native_vcpu_eq_sync(vcpu);
894 } 903 }
895 mutex_unlock(&kvm->lock); 904 mutex_unlock(&xive->lock);
896 905
897 return 0; 906 return 0;
898} 907}
@@ -965,7 +974,7 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
965} 974}
966 975
967/* 976/*
968 * Called when device fd is closed 977 * Called when device fd is closed. kvm->lock is held.
969 */ 978 */
970static void kvmppc_xive_native_release(struct kvm_device *dev) 979static void kvmppc_xive_native_release(struct kvm_device *dev)
971{ 980{
@@ -973,21 +982,18 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
973 struct kvm *kvm = xive->kvm; 982 struct kvm *kvm = xive->kvm;
974 struct kvm_vcpu *vcpu; 983 struct kvm_vcpu *vcpu;
975 int i; 984 int i;
976 int was_ready;
977
978 debugfs_remove(xive->dentry);
979 985
980 pr_devel("Releasing xive native device\n"); 986 pr_devel("Releasing xive native device\n");
981 987
982 /* 988 /*
983 * Clearing mmu_ready temporarily while holding kvm->lock 989 * Clear the KVM device file address_space which is used to
984 * is a way of ensuring that no vcpus can enter the guest 990 * unmap the ESB pages when a device is passed-through.
985 * until we drop kvm->lock. Doing kick_all_cpus_sync() 991 */
986 * ensures that any vcpu executing inside the guest has 992 mutex_lock(&xive->mapping_lock);
987 * exited the guest. Once kick_all_cpus_sync() has finished, 993 xive->mapping = NULL;
988 * we know that no vcpu can be executing the XIVE push or 994 mutex_unlock(&xive->mapping_lock);
989 * pull code or accessing the XIVE MMIO regions. 995
990 * 996 /*
991 * Since this is the device release function, we know that 997 * Since this is the device release function, we know that
992 * userspace does not have any open fd or mmap referring to 998 * userspace does not have any open fd or mmap referring to
993 * the device. Therefore there can not be any of the 999 * the device. Therefore there can not be any of the
@@ -996,9 +1002,8 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
996 * connect_vcpu and set/clr_mapped functions also cannot 1002 * connect_vcpu and set/clr_mapped functions also cannot
997 * be being executed. 1003 * be being executed.
998 */ 1004 */
999 was_ready = kvm->arch.mmu_ready; 1005
1000 kvm->arch.mmu_ready = 0; 1006 debugfs_remove(xive->dentry);
1001 kick_all_cpus_sync();
1002 1007
1003 /* 1008 /*
1004 * We should clean up the vCPU interrupt presenters first. 1009 * We should clean up the vCPU interrupt presenters first.
@@ -1007,12 +1012,22 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
1007 /* 1012 /*
1008 * Take vcpu->mutex to ensure that no one_reg get/set ioctl 1013 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1009 * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done. 1014 * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
1015 * Holding the vcpu->mutex also means that the vcpu cannot
1016 * be executing the KVM_RUN ioctl, and therefore it cannot
1017 * be executing the XIVE push or pull code or accessing
1018 * the XIVE MMIO regions.
1010 */ 1019 */
1011 mutex_lock(&vcpu->mutex); 1020 mutex_lock(&vcpu->mutex);
1012 kvmppc_xive_native_cleanup_vcpu(vcpu); 1021 kvmppc_xive_native_cleanup_vcpu(vcpu);
1013 mutex_unlock(&vcpu->mutex); 1022 mutex_unlock(&vcpu->mutex);
1014 } 1023 }
1015 1024
1025 /*
1026 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1027 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1028 * against xive code getting called during vcpu execution or
1029 * set/get one_reg operations.
1030 */
1016 kvm->arch.xive = NULL; 1031 kvm->arch.xive = NULL;
1017 1032
1018 for (i = 0; i <= xive->max_sbid; i++) { 1033 for (i = 0; i <= xive->max_sbid; i++) {
@@ -1025,8 +1040,6 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
1025 if (xive->vp_base != XIVE_INVALID_VP) 1040 if (xive->vp_base != XIVE_INVALID_VP)
1026 xive_native_free_vp_block(xive->vp_base); 1041 xive_native_free_vp_block(xive->vp_base);
1027 1042
1028 kvm->arch.mmu_ready = was_ready;
1029
1030 /* 1043 /*
1031 * A reference of the kvmppc_xive pointer is now kept under 1044 * A reference of the kvmppc_xive pointer is now kept under
1032 * the xive_devices struct of the machine for reuse. It is 1045 * the xive_devices struct of the machine for reuse. It is
@@ -1060,6 +1073,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
1060 xive->kvm = kvm; 1073 xive->kvm = kvm;
1061 kvm->arch.xive = xive; 1074 kvm->arch.xive = xive;
1062 mutex_init(&xive->mapping_lock); 1075 mutex_init(&xive->mapping_lock);
1076 mutex_init(&xive->lock);
1063 1077
1064 /* 1078 /*
1065 * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for 1079 * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3393b166817a..aa3a678711be 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -657,6 +657,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
657 case KVM_CAP_MAX_VCPUS: 657 case KVM_CAP_MAX_VCPUS:
658 r = KVM_MAX_VCPUS; 658 r = KVM_MAX_VCPUS;
659 break; 659 break;
660 case KVM_CAP_MAX_VCPU_ID:
661 r = KVM_MAX_VCPU_ID;
662 break;
660#ifdef CONFIG_PPC_BOOK3S_64 663#ifdef CONFIG_PPC_BOOK3S_64
661 case KVM_CAP_PPC_GET_SMMU_INFO: 664 case KVM_CAP_PPC_GET_SMMU_INFO:
662 r = 1; 665 r = 1;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7e129f62cd67..ca92e01d0bd1 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1846,6 +1846,7 @@ static int power_pmu_event_init(struct perf_event *event)
1846 int n; 1846 int n;
1847 int err; 1847 int err;
1848 struct cpu_hw_events *cpuhw; 1848 struct cpu_hw_events *cpuhw;
1849 u64 bhrb_filter;
1849 1850
1850 if (!ppmu) 1851 if (!ppmu)
1851 return -ENOENT; 1852 return -ENOENT;
@@ -1951,13 +1952,14 @@ static int power_pmu_event_init(struct perf_event *event)
1951 err = power_check_constraints(cpuhw, events, cflags, n + 1); 1952 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1952 1953
1953 if (has_branch_stack(event)) { 1954 if (has_branch_stack(event)) {
1954 cpuhw->bhrb_filter = ppmu->bhrb_filter_map( 1955 bhrb_filter = ppmu->bhrb_filter_map(
1955 event->attr.branch_sample_type); 1956 event->attr.branch_sample_type);
1956 1957
1957 if (cpuhw->bhrb_filter == -1) { 1958 if (bhrb_filter == -1) {
1958 put_cpu_var(cpu_hw_events); 1959 put_cpu_var(cpu_hw_events);
1959 return -EOPNOTSUPP; 1960 return -EOPNOTSUPP;
1960 } 1961 }
1962 cpuhw->bhrb_filter = bhrb_filter;
1961 } 1963 }
1962 1964
1963 put_cpu_var(cpu_hw_events); 1965 put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index b47e9fb5e899..3a5fcc20ff31 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,7 @@ enum {
25#define POWER8_MMCRA_IFM1 0x0000000040000000UL 25#define POWER8_MMCRA_IFM1 0x0000000040000000UL
26#define POWER8_MMCRA_IFM2 0x0000000080000000UL 26#define POWER8_MMCRA_IFM2 0x0000000080000000UL
27#define POWER8_MMCRA_IFM3 0x00000000C0000000UL 27#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
28#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
28 29
29/* 30/*
30 * Raw event encoding for PowerISA v2.07 (Power8): 31 * Raw event encoding for PowerISA v2.07 (Power8):
@@ -239,6 +240,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
239 240
240static void power8_config_bhrb(u64 pmu_bhrb_filter) 241static void power8_config_bhrb(u64 pmu_bhrb_filter)
241{ 242{
243 pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
244
242 /* Enable BHRB filter in PMU */ 245 /* Enable BHRB filter in PMU */
243 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 246 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
244} 247}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 738ed26c538d..08c3ef796198 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -88,6 +88,7 @@ enum {
88#define POWER9_MMCRA_IFM1 0x0000000040000000UL 88#define POWER9_MMCRA_IFM1 0x0000000040000000UL
89#define POWER9_MMCRA_IFM2 0x0000000080000000UL 89#define POWER9_MMCRA_IFM2 0x0000000080000000UL
90#define POWER9_MMCRA_IFM3 0x00000000C0000000UL 90#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
91#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
91 92
92/* Nasty Power9 specific hack */ 93/* Nasty Power9 specific hack */
93#define PVR_POWER9_CUMULUS 0x00002000 94#define PVR_POWER9_CUMULUS 0x00002000
@@ -296,6 +297,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type)
296 297
297static void power9_config_bhrb(u64 pmu_bhrb_filter) 298static void power9_config_bhrb(u64 pmu_bhrb_filter)
298{ 299{
300 pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
301
299 /* Enable BHRB filter in PMU */ 302 /* Enable BHRB filter in PMU */
300 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 303 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
301} 304}
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 1b6932890a73..186109bdd41b 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -157,6 +157,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
157 struct imc_pmu *pmu_ptr; 157 struct imc_pmu *pmu_ptr;
158 u32 offset; 158 u32 offset;
159 159
160 /* Return for unknown domain */
161 if (domain < 0)
162 return -EINVAL;
163
160 /* memory for pmu */ 164 /* memory for pmu */
161 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); 165 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
162 if (!pmu_ptr) 166 if (!pmu_ptr)
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index dd456725189f..d00f84add5f4 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -27,14 +27,14 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/cpufeature.h> 28#include <linux/cpufeature.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/spinlock.h> 30#include <linux/mutex.h>
31#include <linux/fips.h> 31#include <linux/fips.h>
32#include <linux/string.h> 32#include <linux/string.h>
33#include <crypto/xts.h> 33#include <crypto/xts.h>
34#include <asm/cpacf.h> 34#include <asm/cpacf.h>
35 35
36static u8 *ctrblk; 36static u8 *ctrblk;
37static DEFINE_SPINLOCK(ctrblk_lock); 37static DEFINE_MUTEX(ctrblk_lock);
38 38
39static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, 39static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
40 kma_functions; 40 kma_functions;
@@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
698 unsigned int n, nbytes; 698 unsigned int n, nbytes;
699 int ret, locked; 699 int ret, locked;
700 700
701 locked = spin_trylock(&ctrblk_lock); 701 locked = mutex_trylock(&ctrblk_lock);
702 702
703 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); 703 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
704 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 704 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
@@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
716 ret = blkcipher_walk_done(desc, walk, nbytes - n); 716 ret = blkcipher_walk_done(desc, walk, nbytes - n);
717 } 717 }
718 if (locked) 718 if (locked)
719 spin_unlock(&ctrblk_lock); 719 mutex_unlock(&ctrblk_lock);
720 /* 720 /*
721 * final block may be < AES_BLOCK_SIZE, copy only nbytes 721 * final block may be < AES_BLOCK_SIZE, copy only nbytes
722 */ 722 */
@@ -826,19 +826,45 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
826 return 0; 826 return 0;
827} 827}
828 828
829static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 829static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
830 unsigned int len) 830 unsigned int len)
831{ 831{
832 memset(gw, 0, sizeof(*gw)); 832 memset(gw, 0, sizeof(*gw));
833 gw->walk_bytes_remain = len; 833 gw->walk_bytes_remain = len;
834 scatterwalk_start(&gw->walk, sg); 834 scatterwalk_start(&gw->walk, sg);
835} 835}
836 836
837static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 837static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
838{
839 struct scatterlist *nextsg;
840
841 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
842 while (!gw->walk_bytes) {
843 nextsg = sg_next(gw->walk.sg);
844 if (!nextsg)
845 return 0;
846 scatterwalk_start(&gw->walk, nextsg);
847 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
848 gw->walk_bytes_remain);
849 }
850 gw->walk_ptr = scatterwalk_map(&gw->walk);
851 return gw->walk_bytes;
852}
853
854static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
855 unsigned int nbytes)
856{
857 gw->walk_bytes_remain -= nbytes;
858 scatterwalk_unmap(&gw->walk);
859 scatterwalk_advance(&gw->walk, nbytes);
860 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
861 gw->walk_ptr = NULL;
862}
863
864static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
838{ 865{
839 int n; 866 int n;
840 867
841 /* minbytesneeded <= AES_BLOCK_SIZE */
842 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { 868 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
843 gw->ptr = gw->buf; 869 gw->ptr = gw->buf;
844 gw->nbytes = gw->buf_bytes; 870 gw->nbytes = gw->buf_bytes;
@@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
851 goto out; 877 goto out;
852 } 878 }
853 879
854 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); 880 if (!_gcm_sg_clamp_and_map(gw)) {
855 if (!gw->walk_bytes) { 881 gw->ptr = NULL;
856 scatterwalk_start(&gw->walk, sg_next(gw->walk.sg)); 882 gw->nbytes = 0;
857 gw->walk_bytes = scatterwalk_clamp(&gw->walk, 883 goto out;
858 gw->walk_bytes_remain);
859 } 884 }
860 gw->walk_ptr = scatterwalk_map(&gw->walk);
861 885
862 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { 886 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
863 gw->ptr = gw->walk_ptr; 887 gw->ptr = gw->walk_ptr;
@@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
869 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); 893 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
870 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); 894 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
871 gw->buf_bytes += n; 895 gw->buf_bytes += n;
872 gw->walk_bytes_remain -= n; 896 _gcm_sg_unmap_and_advance(gw, n);
873 scatterwalk_unmap(&gw->walk);
874 scatterwalk_advance(&gw->walk, n);
875 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
876
877 if (gw->buf_bytes >= minbytesneeded) { 897 if (gw->buf_bytes >= minbytesneeded) {
878 gw->ptr = gw->buf; 898 gw->ptr = gw->buf;
879 gw->nbytes = gw->buf_bytes; 899 gw->nbytes = gw->buf_bytes;
880 goto out; 900 goto out;
881 } 901 }
882 902 if (!_gcm_sg_clamp_and_map(gw)) {
883 gw->walk_bytes = scatterwalk_clamp(&gw->walk, 903 gw->ptr = NULL;
884 gw->walk_bytes_remain); 904 gw->nbytes = 0;
885 if (!gw->walk_bytes) { 905 goto out;
886 scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
887 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
888 gw->walk_bytes_remain);
889 } 906 }
890 gw->walk_ptr = scatterwalk_map(&gw->walk);
891 } 907 }
892 908
893out: 909out:
894 return gw->nbytes; 910 return gw->nbytes;
895} 911}
896 912
897static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 913static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
898{ 914{
899 int n; 915 if (gw->walk_bytes_remain == 0) {
916 gw->ptr = NULL;
917 gw->nbytes = 0;
918 goto out;
919 }
900 920
921 if (!_gcm_sg_clamp_and_map(gw)) {
922 gw->ptr = NULL;
923 gw->nbytes = 0;
924 goto out;
925 }
926
927 if (gw->walk_bytes >= minbytesneeded) {
928 gw->ptr = gw->walk_ptr;
929 gw->nbytes = gw->walk_bytes;
930 goto out;
931 }
932
933 scatterwalk_unmap(&gw->walk);
934 gw->walk_ptr = NULL;
935
936 gw->ptr = gw->buf;
937 gw->nbytes = sizeof(gw->buf);
938
939out:
940 return gw->nbytes;
941}
942
943static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
944{
901 if (gw->ptr == NULL) 945 if (gw->ptr == NULL)
902 return; 946 return 0;
903 947
904 if (gw->ptr == gw->buf) { 948 if (gw->ptr == gw->buf) {
905 n = gw->buf_bytes - bytesdone; 949 int n = gw->buf_bytes - bytesdone;
906 if (n > 0) { 950 if (n > 0) {
907 memmove(gw->buf, gw->buf + bytesdone, n); 951 memmove(gw->buf, gw->buf + bytesdone, n);
908 gw->buf_bytes -= n; 952 gw->buf_bytes = n;
909 } else 953 } else
910 gw->buf_bytes = 0; 954 gw->buf_bytes = 0;
911 } else { 955 } else
912 gw->walk_bytes_remain -= bytesdone; 956 _gcm_sg_unmap_and_advance(gw, bytesdone);
913 scatterwalk_unmap(&gw->walk); 957
914 scatterwalk_advance(&gw->walk, bytesdone); 958 return bytesdone;
915 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 959}
916 } 960
961static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
962{
963 int i, n;
964
965 if (gw->ptr == NULL)
966 return 0;
967
968 if (gw->ptr == gw->buf) {
969 for (i = 0; i < bytesdone; i += n) {
970 if (!_gcm_sg_clamp_and_map(gw))
971 return i;
972 n = min(gw->walk_bytes, bytesdone - i);
973 memcpy(gw->walk_ptr, gw->buf + i, n);
974 _gcm_sg_unmap_and_advance(gw, n);
975 }
976 } else
977 _gcm_sg_unmap_and_advance(gw, bytesdone);
978
979 return bytesdone;
917} 980}
918 981
919static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) 982static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
@@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
926 unsigned int pclen = req->cryptlen; 989 unsigned int pclen = req->cryptlen;
927 int ret = 0; 990 int ret = 0;
928 991
929 unsigned int len, in_bytes, out_bytes, 992 unsigned int n, len, in_bytes, out_bytes,
930 min_bytes, bytes, aad_bytes, pc_bytes; 993 min_bytes, bytes, aad_bytes, pc_bytes;
931 struct gcm_sg_walk gw_in, gw_out; 994 struct gcm_sg_walk gw_in, gw_out;
932 u8 tag[GHASH_DIGEST_SIZE]; 995 u8 tag[GHASH_DIGEST_SIZE];
@@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
963 *(u32 *)(param.j0 + ivsize) = 1; 1026 *(u32 *)(param.j0 + ivsize) = 1;
964 memcpy(param.k, ctx->key, ctx->key_len); 1027 memcpy(param.k, ctx->key, ctx->key_len);
965 1028
966 gcm_sg_walk_start(&gw_in, req->src, len); 1029 gcm_walk_start(&gw_in, req->src, len);
967 gcm_sg_walk_start(&gw_out, req->dst, len); 1030 gcm_walk_start(&gw_out, req->dst, len);
968 1031
969 do { 1032 do {
970 min_bytes = min_t(unsigned int, 1033 min_bytes = min_t(unsigned int,
971 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); 1034 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
972 in_bytes = gcm_sg_walk_go(&gw_in, min_bytes); 1035 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
973 out_bytes = gcm_sg_walk_go(&gw_out, min_bytes); 1036 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
974 bytes = min(in_bytes, out_bytes); 1037 bytes = min(in_bytes, out_bytes);
975 1038
976 if (aadlen + pclen <= bytes) { 1039 if (aadlen + pclen <= bytes) {
@@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
997 gw_in.ptr + aad_bytes, pc_bytes, 1060 gw_in.ptr + aad_bytes, pc_bytes,
998 gw_in.ptr, aad_bytes); 1061 gw_in.ptr, aad_bytes);
999 1062
1000 gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes); 1063 n = aad_bytes + pc_bytes;
1001 gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes); 1064 if (gcm_in_walk_done(&gw_in, n) != n)
1065 return -ENOMEM;
1066 if (gcm_out_walk_done(&gw_out, n) != n)
1067 return -ENOMEM;
1002 aadlen -= aad_bytes; 1068 aadlen -= aad_bytes;
1003 pclen -= pc_bytes; 1069 pclen -= pc_bytes;
1004 } while (aadlen + pclen > 0); 1070 } while (aadlen + pclen > 0);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 1f9ab24dc048..374b42fc7637 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -14,6 +14,7 @@
14#include <linux/cpufeature.h> 14#include <linux/cpufeature.h>
15#include <linux/crypto.h> 15#include <linux/crypto.h>
16#include <linux/fips.h> 16#include <linux/fips.h>
17#include <linux/mutex.h>
17#include <crypto/algapi.h> 18#include <crypto/algapi.h>
18#include <crypto/des.h> 19#include <crypto/des.h>
19#include <asm/cpacf.h> 20#include <asm/cpacf.h>
@@ -21,7 +22,7 @@
21#define DES3_KEY_SIZE (3 * DES_KEY_SIZE) 22#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
22 23
23static u8 *ctrblk; 24static u8 *ctrblk;
24static DEFINE_SPINLOCK(ctrblk_lock); 25static DEFINE_MUTEX(ctrblk_lock);
25 26
26static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 27static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
27 28
@@ -374,7 +375,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
374 unsigned int n, nbytes; 375 unsigned int n, nbytes;
375 int ret, locked; 376 int ret, locked;
376 377
377 locked = spin_trylock(&ctrblk_lock); 378 locked = mutex_trylock(&ctrblk_lock);
378 379
379 ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); 380 ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
380 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { 381 while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
@@ -391,7 +392,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
391 ret = blkcipher_walk_done(desc, walk, nbytes - n); 392 ret = blkcipher_walk_done(desc, walk, nbytes - n);
392 } 393 }
393 if (locked) 394 if (locked)
394 spin_unlock(&ctrblk_lock); 395 mutex_unlock(&ctrblk_lock);
395 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ 396 /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
396 if (nbytes) { 397 if (nbytes) {
397 cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr, 398 cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index e94a0a28b5eb..aea32dda3d14 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -160,8 +160,8 @@ struct ap_config_info {
160 unsigned char Nd; /* max # of Domains - 1 */ 160 unsigned char Nd; /* max # of Domains - 1 */
161 unsigned char _reserved3[10]; 161 unsigned char _reserved3[10];
162 unsigned int apm[8]; /* AP ID mask */ 162 unsigned int apm[8]; /* AP ID mask */
163 unsigned int aqm[8]; /* AP queue mask */ 163 unsigned int aqm[8]; /* AP (usage) queue mask */
164 unsigned int adm[8]; /* AP domain mask */ 164 unsigned int adm[8]; /* AP (control) domain mask */
165 unsigned char _reserved4[16]; 165 unsigned char _reserved4[16];
166} __aligned(8); 166} __aligned(8);
167 167
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 27696755daa9..e3d53eb6bcf5 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -178,7 +178,7 @@ static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
178 : "cc"); 178 : "cc");
179} 179}
180 180
181static inline int __cpacf_check_opcode(unsigned int opcode) 181static __always_inline int __cpacf_check_opcode(unsigned int opcode)
182{ 182{
183 switch (opcode) { 183 switch (opcode) {
184 case CPACF_KMAC: 184 case CPACF_KMAC:
@@ -218,7 +218,7 @@ static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
218 return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0; 218 return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
219} 219}
220 220
221static inline int cpacf_query_func(unsigned int opcode, unsigned int func) 221static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int func)
222{ 222{
223 cpacf_mask_t mask; 223 cpacf_mask_t mask;
224 224
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index 3ec52a05d500..50359172cc48 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -70,6 +70,17 @@ struct clp_rsp_list_pci {
70 struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES]; 70 struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
71} __packed; 71} __packed;
72 72
73struct mio_info {
74 u32 valid : 6;
75 u32 : 26;
76 u32 : 32;
77 struct {
78 u64 wb;
79 u64 wt;
80 } addr[PCI_BAR_COUNT];
81 u32 reserved[6];
82} __packed;
83
73/* Query PCI function request */ 84/* Query PCI function request */
74struct clp_req_query_pci { 85struct clp_req_query_pci {
75 struct clp_req_hdr hdr; 86 struct clp_req_hdr hdr;
@@ -100,14 +111,7 @@ struct clp_rsp_query_pci {
100 u32 uid; /* user defined id */ 111 u32 uid; /* user defined id */
101 u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ 112 u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
102 u32 reserved2[16]; 113 u32 reserved2[16];
103 u32 mio_valid : 6; 114 struct mio_info mio;
104 u32 : 26;
105 u32 : 32;
106 struct {
107 u64 wb;
108 u64 wt;
109 } addr[PCI_BAR_COUNT];
110 u32 reserved3[6];
111} __packed; 115} __packed;
112 116
113/* Query PCI function group request */ 117/* Query PCI function group request */
@@ -155,8 +159,9 @@ struct clp_req_set_pci {
155struct clp_rsp_set_pci { 159struct clp_rsp_set_pci {
156 struct clp_rsp_hdr hdr; 160 struct clp_rsp_hdr hdr;
157 u32 fh; /* function handle */ 161 u32 fh; /* function handle */
158 u32 reserved3; 162 u32 reserved1;
159 u64 reserved4; 163 u64 reserved2;
164 struct mio_info mio;
160} __packed; 165} __packed;
161 166
162/* Combined request/response block structures used by clp insn */ 167/* Combined request/response block structures used by clp insn */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e5e8eb29e68e..28ebd647784c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -539,6 +539,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
539 break; 539 break;
540 case KVM_CAP_NR_VCPUS: 540 case KVM_CAP_NR_VCPUS:
541 case KVM_CAP_MAX_VCPUS: 541 case KVM_CAP_MAX_VCPUS:
542 case KVM_CAP_MAX_VCPU_ID:
542 r = KVM_S390_BSCA_CPU_SLOTS; 543 r = KVM_S390_BSCA_CPU_SLOTS;
543 if (!kvm_s390_use_sca_entries()) 544 if (!kvm_s390_use_sca_entries())
544 r = KVM_MAX_VCPUS; 545 r = KVM_MAX_VCPUS;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index c220399ae196..91ce03fd0c84 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -85,7 +85,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
85 * Find out which address space caused the exception. 85 * Find out which address space caused the exception.
86 * Access register mode is impossible, ignore space == 3. 86 * Access register mode is impossible, ignore space == 3.
87 */ 87 */
88static inline enum fault_type get_fault_type(struct pt_regs *regs) 88static enum fault_type get_fault_type(struct pt_regs *regs)
89{ 89{
90 unsigned long trans_exc_code; 90 unsigned long trans_exc_code;
91 91
@@ -211,6 +211,8 @@ static void dump_fault_info(struct pt_regs *regs)
211 asce = S390_lowcore.kernel_asce; 211 asce = S390_lowcore.kernel_asce;
212 pr_cont("kernel "); 212 pr_cont("kernel ");
213 break; 213 break;
214 default:
215 unreachable();
214 } 216 }
215 pr_cont("ASCE.\n"); 217 pr_cont("ASCE.\n");
216 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); 218 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 0ebb7c405a25..86ca7f88fb22 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -528,7 +528,10 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
528 if (zdev->bars[i].val & 4) 528 if (zdev->bars[i].val & 4)
529 flags |= IORESOURCE_MEM_64; 529 flags |= IORESOURCE_MEM_64;
530 530
531 addr = ZPCI_ADDR(entry); 531 if (static_branch_likely(&have_mio))
532 addr = (unsigned long) zdev->bars[i].mio_wb;
533 else
534 addr = ZPCI_ADDR(entry);
532 size = 1UL << zdev->bars[i].size; 535 size = 1UL << zdev->bars[i].size;
533 536
534 res = __alloc_res(zdev, addr, size, flags); 537 res = __alloc_res(zdev, addr, size, flags);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 3a36b07a5571..d03631dba7c2 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -165,11 +165,11 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
165 } 165 }
166 zdev->mio_capable = response->mio_addr_avail; 166 zdev->mio_capable = response->mio_addr_avail;
167 for (i = 0; i < PCI_BAR_COUNT; i++) { 167 for (i = 0; i < PCI_BAR_COUNT; i++) {
168 if (!(response->mio_valid & (1 << (PCI_BAR_COUNT - i - 1)))) 168 if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1))))
169 continue; 169 continue;
170 170
171 zdev->bars[i].mio_wb = (void __iomem *) response->addr[i].wb; 171 zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
172 zdev->bars[i].mio_wt = (void __iomem *) response->addr[i].wt; 172 zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
173 } 173 }
174 return 0; 174 return 0;
175} 175}
diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
index 85de790583f9..64b973f0e985 100644
--- a/arch/x86/kernel/ima_arch.c
+++ b/arch/x86/kernel/ima_arch.c
@@ -18,6 +18,11 @@ static enum efi_secureboot_mode get_sb_mode(void)
18 18
19 size = sizeof(secboot); 19 size = sizeof(secboot);
20 20
21 if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
22 pr_info("ima: secureboot mode unknown, no efi\n");
23 return efi_secureboot_mode_unknown;
24 }
25
21 /* Get variable contents into buffer */ 26 /* Get variable contents into buffer */
22 status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid, 27 status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
23 NULL, &size, &secboot); 28 NULL, &size, &secboot);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index acb179f78fdc..83aefd759846 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3122,6 +3122,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
3122 case KVM_CAP_MAX_VCPUS: 3122 case KVM_CAP_MAX_VCPUS:
3123 r = KVM_MAX_VCPUS; 3123 r = KVM_MAX_VCPUS;
3124 break; 3124 break;
3125 case KVM_CAP_MAX_VCPU_ID:
3126 r = KVM_MAX_VCPU_ID;
3127 break;
3125 case KVM_CAP_PV_MMU: /* obsolete */ 3128 case KVM_CAP_PV_MMU: /* obsolete */
3126 r = 0; 3129 r = 0;
3127 break; 3130 break;
diff --git a/block/blk-core.c b/block/blk-core.c
index 1bf83a0df0f6..ee1b35fe8572 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -282,35 +282,6 @@ void blk_set_queue_dying(struct request_queue *q)
282} 282}
283EXPORT_SYMBOL_GPL(blk_set_queue_dying); 283EXPORT_SYMBOL_GPL(blk_set_queue_dying);
284 284
285/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
286void blk_exit_queue(struct request_queue *q)
287{
288 /*
289 * Since the I/O scheduler exit code may access cgroup information,
290 * perform I/O scheduler exit before disassociating from the block
291 * cgroup controller.
292 */
293 if (q->elevator) {
294 ioc_clear_queue(q);
295 elevator_exit(q, q->elevator);
296 q->elevator = NULL;
297 }
298
299 /*
300 * Remove all references to @q from the block cgroup controller before
301 * restoring @q->queue_lock to avoid that restoring this pointer causes
302 * e.g. blkcg_print_blkgs() to crash.
303 */
304 blkcg_exit_queue(q);
305
306 /*
307 * Since the cgroup code may dereference the @q->backing_dev_info
308 * pointer, only decrease its reference count after having removed the
309 * association with the block cgroup controller.
310 */
311 bdi_put(q->backing_dev_info);
312}
313
314/** 285/**
315 * blk_cleanup_queue - shutdown a request queue 286 * blk_cleanup_queue - shutdown a request queue
316 * @q: request queue to shutdown 287 * @q: request queue to shutdown
@@ -346,14 +317,6 @@ void blk_cleanup_queue(struct request_queue *q)
346 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); 317 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
347 blk_sync_queue(q); 318 blk_sync_queue(q);
348 319
349 /*
350 * I/O scheduler exit is only safe after the sysfs scheduler attribute
351 * has been removed.
352 */
353 WARN_ON_ONCE(q->kobj.state_in_sysfs);
354
355 blk_exit_queue(q);
356
357 if (queue_is_mq(q)) 320 if (queue_is_mq(q))
358 blk_mq_exit_queue(q); 321 blk_mq_exit_queue(q);
359 322
@@ -994,22 +957,8 @@ blk_qc_t generic_make_request(struct bio *bio)
994 * yet. 957 * yet.
995 */ 958 */
996 struct bio_list bio_list_on_stack[2]; 959 struct bio_list bio_list_on_stack[2];
997 blk_mq_req_flags_t flags = 0;
998 struct request_queue *q = bio->bi_disk->queue;
999 blk_qc_t ret = BLK_QC_T_NONE; 960 blk_qc_t ret = BLK_QC_T_NONE;
1000 961
1001 if (bio->bi_opf & REQ_NOWAIT)
1002 flags = BLK_MQ_REQ_NOWAIT;
1003 if (bio_flagged(bio, BIO_QUEUE_ENTERED))
1004 blk_queue_enter_live(q);
1005 else if (blk_queue_enter(q, flags) < 0) {
1006 if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
1007 bio_wouldblock_error(bio);
1008 else
1009 bio_io_error(bio);
1010 return ret;
1011 }
1012
1013 if (!generic_make_request_checks(bio)) 962 if (!generic_make_request_checks(bio))
1014 goto out; 963 goto out;
1015 964
@@ -1046,22 +995,11 @@ blk_qc_t generic_make_request(struct bio *bio)
1046 bio_list_init(&bio_list_on_stack[0]); 995 bio_list_init(&bio_list_on_stack[0]);
1047 current->bio_list = bio_list_on_stack; 996 current->bio_list = bio_list_on_stack;
1048 do { 997 do {
1049 bool enter_succeeded = true; 998 struct request_queue *q = bio->bi_disk->queue;
1050 999 blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
1051 if (unlikely(q != bio->bi_disk->queue)) { 1000 BLK_MQ_REQ_NOWAIT : 0;
1052 if (q)
1053 blk_queue_exit(q);
1054 q = bio->bi_disk->queue;
1055 flags = 0;
1056 if (bio->bi_opf & REQ_NOWAIT)
1057 flags = BLK_MQ_REQ_NOWAIT;
1058 if (blk_queue_enter(q, flags) < 0) {
1059 enter_succeeded = false;
1060 q = NULL;
1061 }
1062 }
1063 1001
1064 if (enter_succeeded) { 1002 if (likely(blk_queue_enter(q, flags) == 0)) {
1065 struct bio_list lower, same; 1003 struct bio_list lower, same;
1066 1004
1067 /* Create a fresh bio_list for all subordinate requests */ 1005 /* Create a fresh bio_list for all subordinate requests */
@@ -1069,6 +1007,8 @@ blk_qc_t generic_make_request(struct bio *bio)
1069 bio_list_init(&bio_list_on_stack[0]); 1007 bio_list_init(&bio_list_on_stack[0]);
1070 ret = q->make_request_fn(q, bio); 1008 ret = q->make_request_fn(q, bio);
1071 1009
1010 blk_queue_exit(q);
1011
1072 /* sort new bios into those for a lower level 1012 /* sort new bios into those for a lower level
1073 * and those for the same level 1013 * and those for the same level
1074 */ 1014 */
@@ -1095,8 +1035,6 @@ blk_qc_t generic_make_request(struct bio *bio)
1095 current->bio_list = NULL; /* deactivate */ 1035 current->bio_list = NULL; /* deactivate */
1096 1036
1097out: 1037out:
1098 if (q)
1099 blk_queue_exit(q);
1100 return ret; 1038 return ret;
1101} 1039}
1102EXPORT_SYMBOL(generic_make_request); 1040EXPORT_SYMBOL(generic_make_request);
@@ -1200,7 +1138,9 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
1200 struct request *rq) 1138 struct request *rq)
1201{ 1139{
1202 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { 1140 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
1203 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1141 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1142 __func__, blk_rq_sectors(rq),
1143 blk_queue_get_max_sectors(q, req_op(rq)));
1204 return -EIO; 1144 return -EIO;
1205 } 1145 }
1206 1146
@@ -1212,7 +1152,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
1212 */ 1152 */
1213 blk_recalc_rq_segments(rq); 1153 blk_recalc_rq_segments(rq);
1214 if (rq->nr_phys_segments > queue_max_segments(q)) { 1154 if (rq->nr_phys_segments > queue_max_segments(q)) {
1215 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1155 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1156 __func__, rq->nr_phys_segments, queue_max_segments(q));
1216 return -EIO; 1157 return -EIO;
1217 } 1158 }
1218 1159
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 48bebf00a5f3..f945621a0e8f 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -42,8 +42,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
42 /* 42 /*
43 * First do sequential mapping between CPUs and queues. 43 * First do sequential mapping between CPUs and queues.
44 * In case we still have CPUs to map, and we have some number of 44 * In case we still have CPUs to map, and we have some number of
45 * threads per cores then map sibling threads to the same queue for 45 * threads per cores then map sibling threads to the same queue
46 * performace optimizations. 46 * for performance optimizations.
47 */ 47 */
48 if (cpu < nr_queues) { 48 if (cpu < nr_queues) {
49 map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu); 49 map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
@@ -60,7 +60,11 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
60} 60}
61EXPORT_SYMBOL_GPL(blk_mq_map_queues); 61EXPORT_SYMBOL_GPL(blk_mq_map_queues);
62 62
63/* 63/**
64 * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
65 * @qmap: CPU to hardware queue map.
66 * @index: hardware queue index.
67 *
64 * We have no quick way of doing reverse lookups. This is only used at 68 * We have no quick way of doing reverse lookups. This is only used at
65 * queue init time, so runtime isn't important. 69 * queue init time, so runtime isn't important.
66 */ 70 */
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index ad4545a2a98b..b595a94c4d16 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -13,7 +13,7 @@
13 13
14/** 14/**
15 * blk_mq_pci_map_queues - provide a default queue mapping for PCI device 15 * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
16 * @set: tagset to provide the mapping for 16 * @qmap: CPU to hardware queue map.
17 * @pdev: PCI device associated with @set. 17 * @pdev: PCI device associated with @set.
18 * @offset: Offset to use for the pci irq vector 18 * @offset: Offset to use for the pci irq vector
19 * 19 *
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index cc921e6ba709..14f968e58b8f 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -8,8 +8,8 @@
8 8
9/** 9/**
10 * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device 10 * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
11 * @set: tagset to provide the mapping for 11 * @map: CPU to hardware queue map.
12 * @dev: rdma device associated with @set. 12 * @dev: rdma device to provide a mapping for.
13 * @first_vec: first interrupt vectors to use for queues (usually 0) 13 * @first_vec: first interrupt vectors to use for queues (usually 0)
14 * 14 *
15 * This function assumes the rdma device @dev has at least as many available 15 * This function assumes the rdma device @dev has at least as many available
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 75a52c18a8f6..488341628256 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -11,8 +11,8 @@
11 11
12/** 12/**
13 * blk_mq_virtio_map_queues - provide a default queue mapping for virtio device 13 * blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
14 * @set: tagset to provide the mapping for 14 * @qmap: CPU to hardware queue map.
15 * @vdev: virtio device associated with @set. 15 * @vdev: virtio device to provide a mapping for.
16 * @first_vec: first interrupt vectors to use for queues (usually 0) 16 * @first_vec: first interrupt vectors to use for queues (usually 0)
17 * 17 *
18 * This function assumes the virtio device @vdev has at least as many available 18 * This function assumes the virtio device @vdev has at least as many available
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32b8ad3d341b..ce0f5f4ede70 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2865,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2865 goto err_exit; 2865 goto err_exit;
2866 2866
2867 if (blk_mq_alloc_ctxs(q)) 2867 if (blk_mq_alloc_ctxs(q))
2868 goto err_exit; 2868 goto err_poll;
2869 2869
2870 /* init q->mq_kobj and sw queues' kobjects */ 2870 /* init q->mq_kobj and sw queues' kobjects */
2871 blk_mq_sysfs_init(q); 2871 blk_mq_sysfs_init(q);
@@ -2929,6 +2929,9 @@ err_hctxs:
2929 kfree(q->queue_hw_ctx); 2929 kfree(q->queue_hw_ctx);
2930err_sys_init: 2930err_sys_init:
2931 blk_mq_sysfs_deinit(q); 2931 blk_mq_sysfs_deinit(q);
2932err_poll:
2933 blk_stat_free_callback(q->poll_cb);
2934 q->poll_cb = NULL;
2932err_exit: 2935err_exit:
2933 q->mq_ops = NULL; 2936 q->mq_ops = NULL;
2934 return ERR_PTR(-ENOMEM); 2937 return ERR_PTR(-ENOMEM);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 3f55b56f24bc..659ccb8b693f 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -209,9 +209,10 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
209 209
210/** 210/**
211 * rq_qos_wait - throttle on a rqw if we need to 211 * rq_qos_wait - throttle on a rqw if we need to
212 * @private_data - caller provided specific data 212 * @rqw: rqw to throttle on
213 * @acquire_inflight_cb - inc the rqw->inflight counter if we can 213 * @private_data: caller provided specific data
214 * @cleanup_cb - the callback to cleanup in case we race with a waker 214 * @acquire_inflight_cb: inc the rqw->inflight counter if we can
215 * @cleanup_cb: the callback to cleanup in case we race with a waker
215 * 216 *
216 * This provides a uniform place for the rq_qos users to do their throttling. 217 * This provides a uniform place for the rq_qos users to do their throttling.
217 * Since you can end up with a lot of things sleeping at once, this manages the 218 * Since you can end up with a lot of things sleeping at once, this manages the
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a16a02c52a85..75b5281cc577 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -840,6 +840,36 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
840 kmem_cache_free(blk_requestq_cachep, q); 840 kmem_cache_free(blk_requestq_cachep, q);
841} 841}
842 842
843/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
844static void blk_exit_queue(struct request_queue *q)
845{
846 /*
847 * Since the I/O scheduler exit code may access cgroup information,
848 * perform I/O scheduler exit before disassociating from the block
849 * cgroup controller.
850 */
851 if (q->elevator) {
852 ioc_clear_queue(q);
853 elevator_exit(q, q->elevator);
854 q->elevator = NULL;
855 }
856
857 /*
858 * Remove all references to @q from the block cgroup controller before
859 * restoring @q->queue_lock to avoid that restoring this pointer causes
860 * e.g. blkcg_print_blkgs() to crash.
861 */
862 blkcg_exit_queue(q);
863
864 /*
865 * Since the cgroup code may dereference the @q->backing_dev_info
866 * pointer, only decrease its reference count after having removed the
867 * association with the block cgroup controller.
868 */
869 bdi_put(q->backing_dev_info);
870}
871
872
843/** 873/**
844 * __blk_release_queue - release a request queue 874 * __blk_release_queue - release a request queue
845 * @work: pointer to the release_work member of the request queue to be released 875 * @work: pointer to the release_work member of the request queue to be released
@@ -860,23 +890,10 @@ static void __blk_release_queue(struct work_struct *work)
860 blk_stat_remove_callback(q, q->poll_cb); 890 blk_stat_remove_callback(q, q->poll_cb);
861 blk_stat_free_callback(q->poll_cb); 891 blk_stat_free_callback(q->poll_cb);
862 892
863 if (!blk_queue_dead(q)) {
864 /*
865 * Last reference was dropped without having called
866 * blk_cleanup_queue().
867 */
868 WARN_ONCE(blk_queue_init_done(q),
869 "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
870 q);
871 blk_exit_queue(q);
872 }
873
874 WARN(blk_queue_root_blkg(q),
875 "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
876 q);
877
878 blk_free_queue_stats(q->stats); 893 blk_free_queue_stats(q->stats);
879 894
895 blk_exit_queue(q);
896
880 blk_queue_free_zone_bitmaps(q); 897 blk_queue_free_zone_bitmaps(q);
881 898
882 if (queue_is_mq(q)) 899 if (queue_is_mq(q))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1b97a73d2fb1..9ea7c0ecad10 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1220,7 +1220,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
1220 struct throtl_grp *this_tg); 1220 struct throtl_grp *this_tg);
1221/** 1221/**
1222 * throtl_pending_timer_fn - timer function for service_queue->pending_timer 1222 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1223 * @arg: the throtl_service_queue being serviced 1223 * @t: the pending_timer member of the throtl_service_queue being serviced
1224 * 1224 *
1225 * This timer is armed when a child throtl_grp with active bio's become 1225 * This timer is armed when a child throtl_grp with active bio's become
1226 * pending and queued on the service_queue's pending_tree and expires when 1226 * pending and queued on the service_queue's pending_tree and expires when
diff --git a/block/blk.h b/block/blk.h
index e27fd1512e4b..91b3581b7c7a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -50,7 +50,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
50 int node, int cmd_size, gfp_t flags); 50 int node, int cmd_size, gfp_t flags);
51void blk_free_flush_queue(struct blk_flush_queue *q); 51void blk_free_flush_queue(struct blk_flush_queue *q);
52 52
53void blk_exit_queue(struct request_queue *q);
54void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 53void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
55 struct bio *bio); 54 struct bio *bio);
56void blk_freeze_queue(struct request_queue *q); 55void blk_freeze_queue(struct request_queue *q);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index b898a1cdf872..785dd58947f1 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -354,6 +354,7 @@ static const struct blk_mq_ops bsg_mq_ops = {
354 * @dev: device to attach bsg device to 354 * @dev: device to attach bsg device to
355 * @name: device to give bsg device 355 * @name: device to give bsg device
356 * @job_fn: bsg job handler 356 * @job_fn: bsg job handler
357 * @timeout: timeout handler function pointer
357 * @dd_job_size: size of LLD data needed for each job 358 * @dd_job_size: size of LLD data needed for each job
358 */ 359 */
359struct request_queue *bsg_setup_queue(struct device *dev, const char *name, 360struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
diff --git a/block/genhd.c b/block/genhd.c
index ad6826628e79..24654e1d83e6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -532,8 +532,8 @@ void blk_free_devt(dev_t devt)
532 } 532 }
533} 533}
534 534
535/** 535/*
536 * We invalidate devt by assigning NULL pointer for devt in idr. 536 * We invalidate devt by assigning NULL pointer for devt in idr.
537 */ 537 */
538void blk_invalidate_devt(dev_t devt) 538void blk_invalidate_devt(dev_t devt)
539{ 539{
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 6db573f33219..fe5d970e2e60 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -19,7 +19,7 @@
19#include "check.h" 19#include "check.h"
20#include "msdos.h" 20#include "msdos.h"
21 21
22/** 22/*
23 * ldm_debug/info/error/crit - Output an error message 23 * ldm_debug/info/error/crit - Output an error message
24 * @f: A printf format string containing the message 24 * @f: A printf format string containing the message
25 * @...: Variables to substitute into @f 25 * @...: Variables to substitute into @f
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index adc8979b02b6..e54956ae93d3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -944,8 +944,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
944 u32 sys_target = acpi_target_system_state(); 944 u32 sys_target = acpi_target_system_state();
945 int ret, state; 945 int ret, state;
946 946
947 if (!pm_runtime_suspended(dev) || !adev || 947 if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
948 device_may_wakeup(dev) != !!adev->wakeup.prepare_count) 948 device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
949 return true; 949 return true;
950 950
951 if (sys_target == ACPI_STATE_S0) 951 if (sys_target == ACPI_STATE_S0)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index e52f1238d2d6..a34deccd7317 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1132,15 +1132,19 @@ void __init acpi_no_s4_hw_signature(void)
1132 nosigcheck = true; 1132 nosigcheck = true;
1133} 1133}
1134 1134
1135static int acpi_hibernation_begin(void) 1135static int acpi_hibernation_begin(pm_message_t stage)
1136{ 1136{
1137 int error; 1137 if (!nvs_nosave) {
1138 int error = suspend_nvs_alloc();
1139 if (error)
1140 return error;
1141 }
1138 1142
1139 error = nvs_nosave ? 0 : suspend_nvs_alloc(); 1143 if (stage.event == PM_EVENT_HIBERNATE)
1140 if (!error) 1144 pm_set_suspend_via_firmware();
1141 acpi_pm_start(ACPI_STATE_S4);
1142 1145
1143 return error; 1146 acpi_pm_start(ACPI_STATE_S4);
1147 return 0;
1144} 1148}
1145 1149
1146static int acpi_hibernation_enter(void) 1150static int acpi_hibernation_enter(void)
@@ -1200,7 +1204,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops = {
1200 * function is used if the pre-ACPI 2.0 suspend ordering has been 1204 * function is used if the pre-ACPI 2.0 suspend ordering has been
1201 * requested. 1205 * requested.
1202 */ 1206 */
1203static int acpi_hibernation_begin_old(void) 1207static int acpi_hibernation_begin_old(pm_message_t stage)
1204{ 1208{
1205 int error; 1209 int error;
1206 /* 1210 /*
@@ -1211,16 +1215,21 @@ static int acpi_hibernation_begin_old(void)
1211 acpi_sleep_tts_switch(ACPI_STATE_S4); 1215 acpi_sleep_tts_switch(ACPI_STATE_S4);
1212 1216
1213 error = acpi_sleep_prepare(ACPI_STATE_S4); 1217 error = acpi_sleep_prepare(ACPI_STATE_S4);
1218 if (error)
1219 return error;
1214 1220
1215 if (!error) { 1221 if (!nvs_nosave) {
1216 if (!nvs_nosave) 1222 error = suspend_nvs_alloc();
1217 error = suspend_nvs_alloc(); 1223 if (error)
1218 if (!error) { 1224 return error;
1219 acpi_target_sleep_state = ACPI_STATE_S4;
1220 acpi_scan_lock_acquire();
1221 }
1222 } 1225 }
1223 return error; 1226
1227 if (stage.event == PM_EVENT_HIBERNATE)
1228 pm_set_suspend_via_firmware();
1229
1230 acpi_target_sleep_state = ACPI_STATE_S4;
1231 acpi_scan_lock_acquire();
1232 return 0;
1224} 1233}
1225 1234
1226/* 1235/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 102d79575895..f11b7dc16e9d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -945,9 +945,20 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
945 if (!file) 945 if (!file)
946 goto out; 946 goto out;
947 947
948 /*
949 * If we don't hold exclusive handle for the device, upgrade to it
950 * here to avoid changing device under exclusive owner.
951 */
952 if (!(mode & FMODE_EXCL)) {
953 bdgrab(bdev);
954 error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
955 if (error)
956 goto out_putf;
957 }
958
948 error = mutex_lock_killable(&loop_ctl_mutex); 959 error = mutex_lock_killable(&loop_ctl_mutex);
949 if (error) 960 if (error)
950 goto out_putf; 961 goto out_bdev;
951 962
952 error = -EBUSY; 963 error = -EBUSY;
953 if (lo->lo_state != Lo_unbound) 964 if (lo->lo_state != Lo_unbound)
@@ -1012,10 +1023,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
1012 mutex_unlock(&loop_ctl_mutex); 1023 mutex_unlock(&loop_ctl_mutex);
1013 if (partscan) 1024 if (partscan)
1014 loop_reread_partitions(lo, bdev); 1025 loop_reread_partitions(lo, bdev);
1026 if (!(mode & FMODE_EXCL))
1027 blkdev_put(bdev, mode | FMODE_EXCL);
1015 return 0; 1028 return 0;
1016 1029
1017out_unlock: 1030out_unlock:
1018 mutex_unlock(&loop_ctl_mutex); 1031 mutex_unlock(&loop_ctl_mutex);
1032out_bdev:
1033 if (!(mode & FMODE_EXCL))
1034 blkdev_put(bdev, mode | FMODE_EXCL);
1019out_putf: 1035out_putf:
1020 fput(file); 1036 fput(file);
1021out: 1037out:
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 745ed43a22d6..2fd717d8dd30 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -503,6 +503,7 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
503 platform_set_drvdata(pdev, priv); 503 platform_set_drvdata(pdev, priv);
504 504
505 priv->dev = &pdev->dev; 505 priv->dev = &pdev->dev;
506 priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
506 507
507 /* Register with i2c layer */ 508 /* Register with i2c layer */
508 mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO); 509 mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
@@ -518,7 +519,6 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
518 mlxcpld_i2c_adapter.nr = pdev->id; 519 mlxcpld_i2c_adapter.nr = pdev->id;
519 priv->adap = mlxcpld_i2c_adapter; 520 priv->adap = mlxcpld_i2c_adapter;
520 priv->adap.dev.parent = &pdev->dev; 521 priv->adap.dev.parent = &pdev->dev;
521 priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
522 i2c_set_adapdata(&priv->adap, priv); 522 i2c_set_adapdata(&priv->adap, priv);
523 523
524 err = i2c_add_numbered_adapter(&priv->adap); 524 err = i2c_add_numbered_adapter(&priv->adap);
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index f14d4b3fab44..f724c8e6b360 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -351,7 +351,7 @@ static int synquacer_i2c_doxfer(struct synquacer_i2c *i2c,
351 /* wait 2 clock periods to ensure the stop has been through the bus */ 351 /* wait 2 clock periods to ensure the stop has been through the bus */
352 udelay(DIV_ROUND_UP(2 * 1000, i2c->speed_khz)); 352 udelay(DIV_ROUND_UP(2 * 1000, i2c->speed_khz));
353 353
354 return 0; 354 return ret;
355} 355}
356 356
357static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id) 357static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id)
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index e83bf56c1d1c..2ea4585d18c5 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -275,6 +275,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
275 msgs[i].len < 1 || msgs[i].buf[0] < 1 || 275 msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
276 msgs[i].len < msgs[i].buf[0] + 276 msgs[i].len < msgs[i].buf[0] +
277 I2C_SMBUS_BLOCK_MAX) { 277 I2C_SMBUS_BLOCK_MAX) {
278 i++;
278 res = -EINVAL; 279 res = -EINVAL;
279 break; 280 break;
280 } 281 }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a209199f3af6..09b8ff0d856a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3034,7 +3034,8 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
3034{ 3034{
3035 struct pci_dev *pdev = NULL; 3035 struct pci_dev *pdev = NULL;
3036 struct dmar_drhd_unit *drhd; 3036 struct dmar_drhd_unit *drhd;
3037 struct intel_iommu *iommu; 3037 /* To avoid a -Wunused-but-set-variable warning. */
3038 struct intel_iommu *iommu __maybe_unused;
3038 struct device *dev; 3039 struct device *dev;
3039 int i; 3040 int i;
3040 int ret = 0; 3041 int ret = 0;
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index e9ae7f87ab90..e3da7c03da1b 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -164,11 +164,6 @@ static void led_blink_setup(struct led_classdev *led_cdev,
164 unsigned long *delay_on, 164 unsigned long *delay_on,
165 unsigned long *delay_off) 165 unsigned long *delay_off)
166{ 166{
167 /*
168 * If "set brightness to 0" is pending in workqueue, we don't
169 * want that to be reordered after blink_set()
170 */
171 flush_work(&led_cdev->set_brightness_work);
172 if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && 167 if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) &&
173 led_cdev->blink_set && 168 led_cdev->blink_set &&
174 !led_cdev->blink_set(led_cdev, delay_on, delay_off)) 169 !led_cdev->blink_set(led_cdev, delay_on, delay_off))
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index ca898c1383be..427fc3c303d5 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -113,6 +113,11 @@ static int timer_trig_activate(struct led_classdev *led_cdev)
113 led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER; 113 led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
114 } 114 }
115 115
116 /*
117 * If "set brightness to 0" is pending in workqueue, we don't
118 * want that to be reordered after blink_set()
119 */
120 flush_work(&led_cdev->set_brightness_work);
116 led_blink_set(led_cdev, &led_cdev->blink_delay_on, 121 led_blink_set(led_cdev, &led_cdev->blink_delay_on,
117 &led_cdev->blink_delay_off); 122 &led_cdev->blink_delay_off);
118 123
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c5e1a097d7e3..1897847ceb0c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -733,7 +733,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
733 if (!adev || !acpi_device_power_manageable(adev)) 733 if (!adev || !acpi_device_power_manageable(adev))
734 return false; 734 return false;
735 735
736 if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) 736 if (adev->wakeup.flags.valid &&
737 device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
737 return true; 738 return true;
738 739
739 if (acpi_target_system_state() == ACPI_STATE_S0) 740 if (acpi_target_system_state() == ACPI_STATE_S0)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index cae630fe6387..5eadbc3d0969 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -734,6 +734,8 @@ static int pci_pm_suspend(struct device *dev)
734 struct pci_dev *pci_dev = to_pci_dev(dev); 734 struct pci_dev *pci_dev = to_pci_dev(dev);
735 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 735 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
736 736
737 pci_dev->skip_bus_pm = false;
738
737 if (pci_has_legacy_pm_support(pci_dev)) 739 if (pci_has_legacy_pm_support(pci_dev))
738 return pci_legacy_suspend(dev, PMSG_SUSPEND); 740 return pci_legacy_suspend(dev, PMSG_SUSPEND);
739 741
@@ -827,7 +829,20 @@ static int pci_pm_suspend_noirq(struct device *dev)
827 } 829 }
828 } 830 }
829 831
830 if (!pci_dev->state_saved) { 832 if (pci_dev->skip_bus_pm) {
833 /*
834 * The function is running for the second time in a row without
835 * going through full resume, which is possible only during
836 * suspend-to-idle in a spurious wakeup case. Moreover, the
837 * device was originally left in D0, so its power state should
838 * not be changed here and the device register values saved
839 * originally should be restored on resume again.
840 */
841 pci_dev->state_saved = true;
842 } else if (pci_dev->state_saved) {
843 if (pci_dev->current_state == PCI_D0)
844 pci_dev->skip_bus_pm = true;
845 } else {
831 pci_save_state(pci_dev); 846 pci_save_state(pci_dev);
832 if (pci_power_manageable(pci_dev)) 847 if (pci_power_manageable(pci_dev))
833 pci_prepare_to_sleep(pci_dev); 848 pci_prepare_to_sleep(pci_dev);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index cc30e4f07fff..b9fc502c58c2 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -254,19 +254,37 @@ static inline int ap_test_config_card_id(unsigned int id)
254} 254}
255 255
256/* 256/*
257 * ap_test_config_domain(): Test, whether an AP usage domain is configured. 257 * ap_test_config_usage_domain(): Test, whether an AP usage domain
258 * is configured.
258 * @domain AP usage domain ID 259 * @domain AP usage domain ID
259 * 260 *
260 * Returns 0 if the usage domain is not configured 261 * Returns 0 if the usage domain is not configured
261 * 1 if the usage domain is configured or 262 * 1 if the usage domain is configured or
262 * if the configuration information is not available 263 * if the configuration information is not available
263 */ 264 */
264static inline int ap_test_config_domain(unsigned int domain) 265int ap_test_config_usage_domain(unsigned int domain)
265{ 266{
266 if (!ap_configuration) /* QCI not supported */ 267 if (!ap_configuration) /* QCI not supported */
267 return domain < 16; 268 return domain < 16;
268 return ap_test_config(ap_configuration->aqm, domain); 269 return ap_test_config(ap_configuration->aqm, domain);
269} 270}
271EXPORT_SYMBOL(ap_test_config_usage_domain);
272
273/*
274 * ap_test_config_ctrl_domain(): Test, whether an AP control domain
275 * is configured.
276 * @domain AP control domain ID
277 *
278 * Returns 1 if the control domain is configured
279 * 0 in all other cases
280 */
281int ap_test_config_ctrl_domain(unsigned int domain)
282{
283 if (!ap_configuration) /* QCI not supported */
284 return 0;
285 return ap_test_config(ap_configuration->adm, domain);
286}
287EXPORT_SYMBOL(ap_test_config_ctrl_domain);
270 288
271/** 289/**
272 * ap_query_queue(): Check if an AP queue is available. 290 * ap_query_queue(): Check if an AP queue is available.
@@ -1267,7 +1285,7 @@ static void ap_select_domain(void)
1267 best_domain = -1; 1285 best_domain = -1;
1268 max_count = 0; 1286 max_count = 0;
1269 for (i = 0; i < AP_DOMAINS; i++) { 1287 for (i = 0; i < AP_DOMAINS; i++) {
1270 if (!ap_test_config_domain(i) || 1288 if (!ap_test_config_usage_domain(i) ||
1271 !test_bit_inv(i, ap_perms.aqm)) 1289 !test_bit_inv(i, ap_perms.aqm))
1272 continue; 1290 continue;
1273 count = 0; 1291 count = 0;
@@ -1442,7 +1460,7 @@ static void _ap_scan_bus_adapter(int id)
1442 (void *)(long) qid, 1460 (void *)(long) qid,
1443 __match_queue_device_with_qid); 1461 __match_queue_device_with_qid);
1444 aq = dev ? to_ap_queue(dev) : NULL; 1462 aq = dev ? to_ap_queue(dev) : NULL;
1445 if (!ap_test_config_domain(dom)) { 1463 if (!ap_test_config_usage_domain(dom)) {
1446 if (dev) { 1464 if (dev) {
1447 /* Queue device exists but has been 1465 /* Queue device exists but has been
1448 * removed from configuration. 1466 * removed from configuration.
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 15a98a673c5c..6f3cf37776ca 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -251,6 +251,9 @@ void ap_wait(enum ap_wait wait);
251void ap_request_timeout(struct timer_list *t); 251void ap_request_timeout(struct timer_list *t);
252void ap_bus_force_rescan(void); 252void ap_bus_force_rescan(void);
253 253
254int ap_test_config_usage_domain(unsigned int domain);
255int ap_test_config_ctrl_domain(unsigned int domain);
256
254void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 257void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
255struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 258struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
256void ap_queue_prepare_remove(struct ap_queue *aq); 259void ap_queue_prepare_remove(struct ap_queue *aq);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 852b8c2299c1..1058b4b5cc1e 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -822,7 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
822 struct ap_message ap_msg; 822 struct ap_message ap_msg;
823 unsigned int weight, pref_weight; 823 unsigned int weight, pref_weight;
824 unsigned int func_code; 824 unsigned int func_code;
825 unsigned short *domain; 825 unsigned short *domain, tdom;
826 int qid = 0, rc = -ENODEV; 826 int qid = 0, rc = -ENODEV;
827 struct module *mod; 827 struct module *mod;
828 828
@@ -834,6 +834,17 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
834 if (rc) 834 if (rc)
835 goto out; 835 goto out;
836 836
837 /*
838 * If a valid target domain is set and this domain is NOT a usage
839 * domain but a control only domain, use the default domain as target.
840 */
841 tdom = *domain;
842 if (tdom >= 0 && tdom < AP_DOMAINS &&
843 !ap_test_config_usage_domain(tdom) &&
844 ap_test_config_ctrl_domain(tdom) &&
845 ap_domain_index >= 0)
846 tdom = ap_domain_index;
847
837 pref_zc = NULL; 848 pref_zc = NULL;
838 pref_zq = NULL; 849 pref_zq = NULL;
839 spin_lock(&zcrypt_list_lock); 850 spin_lock(&zcrypt_list_lock);
@@ -856,8 +867,8 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
856 /* check if device is online and eligible */ 867 /* check if device is online and eligible */
857 if (!zq->online || 868 if (!zq->online ||
858 !zq->ops->send_cprb || 869 !zq->ops->send_cprb ||
859 ((*domain != (unsigned short) AUTOSELECT) && 870 (tdom != (unsigned short) AUTOSELECT &&
860 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 871 tdom != AP_QID_QUEUE(zq->queue->qid)))
861 continue; 872 continue;
862 /* check if device node has admission for this queue */ 873 /* check if device node has admission for this queue */
863 if (!zcrypt_check_queue(perms, 874 if (!zcrypt_check_queue(perms,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c6acca521ffe..31e8a7240fd7 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
167extern struct mutex zfcp_sysfs_port_units_mutex; 167extern struct mutex zfcp_sysfs_port_units_mutex;
168extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 168extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
169extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 169extern struct device_attribute *zfcp_sysfs_shost_attrs[];
170bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
170 171
171/* zfcp_unit.c */ 172/* zfcp_unit.c */
172extern int zfcp_unit_add(struct zfcp_port *, u64); 173extern int zfcp_unit_add(struct zfcp_port *, u64);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 221d0dfb8493..e9ded2befa0d 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -129,6 +129,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
129 129
130 zfcp_sdev->erp_action.port = port; 130 zfcp_sdev->erp_action.port = port;
131 131
132 mutex_lock(&zfcp_sysfs_port_units_mutex);
133 if (zfcp_sysfs_port_is_removing(port)) {
134 /* port is already gone */
135 mutex_unlock(&zfcp_sysfs_port_units_mutex);
136 put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
137 return -ENXIO;
138 }
139 mutex_unlock(&zfcp_sysfs_port_units_mutex);
140
132 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); 141 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
133 if (unit) 142 if (unit)
134 put_device(&unit->dev); 143 put_device(&unit->dev);
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index b277be6f7611..af197e2b3e69 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -235,6 +235,53 @@ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
235 235
236DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); 236DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
237 237
238static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port)
239{
240 lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
241 atomic_set(&port->units, -1);
242}
243
244bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port)
245{
246 lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
247 return atomic_read(&port->units) == -1;
248}
249
250static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
251{
252 struct zfcp_adapter *const adapter = port->adapter;
253 unsigned long flags;
254 struct scsi_device *sdev;
255 bool in_use = true;
256
257 mutex_lock(&zfcp_sysfs_port_units_mutex);
258 if (atomic_read(&port->units) > 0)
259 goto unlock_port_units_mutex; /* zfcp_unit(s) under port */
260
261 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
262 __shost_for_each_device(sdev, adapter->scsi_host) {
263 const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
264
265 if (sdev->sdev_state == SDEV_DEL ||
266 sdev->sdev_state == SDEV_CANCEL)
267 continue;
268 if (zsdev->port != port)
269 continue;
270 /* alive scsi_device under port of interest */
271 goto unlock_host_lock;
272 }
273
274 /* port is about to be removed, so no more unit_add or slave_alloc */
275 zfcp_sysfs_port_set_removing(port);
276 in_use = false;
277
278unlock_host_lock:
279 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
280unlock_port_units_mutex:
281 mutex_unlock(&zfcp_sysfs_port_units_mutex);
282 return in_use;
283}
284
238static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, 285static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
239 struct device_attribute *attr, 286 struct device_attribute *attr,
240 const char *buf, size_t count) 287 const char *buf, size_t count)
@@ -257,15 +304,11 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
257 else 304 else
258 retval = 0; 305 retval = 0;
259 306
260 mutex_lock(&zfcp_sysfs_port_units_mutex); 307 if (zfcp_sysfs_port_in_use(port)) {
261 if (atomic_read(&port->units) > 0) {
262 retval = -EBUSY; 308 retval = -EBUSY;
263 mutex_unlock(&zfcp_sysfs_port_units_mutex); 309 put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
264 goto out; 310 goto out;
265 } 311 }
266 /* port is about to be removed, so no more unit_add */
267 atomic_set(&port->units, -1);
268 mutex_unlock(&zfcp_sysfs_port_units_mutex);
269 312
270 write_lock_irq(&adapter->port_list_lock); 313 write_lock_irq(&adapter->port_list_lock);
271 list_del(&port->list); 314 list_del(&port->list);
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1bf0a0984a09..e67bf7388cae 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -124,7 +124,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
124 int retval = 0; 124 int retval = 0;
125 125
126 mutex_lock(&zfcp_sysfs_port_units_mutex); 126 mutex_lock(&zfcp_sysfs_port_units_mutex);
127 if (atomic_read(&port->units) == -1) { 127 if (zfcp_sysfs_port_is_removing(port)) {
128 /* port is already gone */ 128 /* port is already gone */
129 retval = -ENODEV; 129 retval = -ENODEV;
130 goto out; 130 goto out;
@@ -168,8 +168,14 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
168 write_lock_irq(&port->unit_list_lock); 168 write_lock_irq(&port->unit_list_lock);
169 list_add_tail(&unit->list, &port->unit_list); 169 list_add_tail(&unit->list, &port->unit_list);
170 write_unlock_irq(&port->unit_list_lock); 170 write_unlock_irq(&port->unit_list_lock);
171 /*
172 * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
173 * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
174 */
175 mutex_unlock(&zfcp_sysfs_port_units_mutex);
171 176
172 zfcp_unit_scsi_scan(unit); 177 zfcp_unit_scsi_scan(unit);
178 return retval;
173 179
174out: 180out:
175 mutex_unlock(&zfcp_sysfs_port_units_mutex); 181 mutex_unlock(&zfcp_sysfs_port_units_mutex);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8b915d4ed98d..7d43e014bd21 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -639,6 +639,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
639 639
640 if (ndev->flags & IFF_LOOPBACK) { 640 if (ndev->flags & IFF_LOOPBACK) {
641 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); 641 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
642 if (!ndev) {
643 err = -ENETUNREACH;
644 goto rel_neigh;
645 }
642 mtu = ndev->mtu; 646 mtu = ndev->mtu;
643 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", 647 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
644 n->dev->name, ndev->name, mtu); 648 n->dev->name, ndev->name, mtu);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a7a4a772f501..f0066f8a1786 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1160,10 +1160,8 @@ static int __init alua_init(void)
1160 int r; 1160 int r;
1161 1161
1162 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); 1162 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
1163 if (!kaluad_wq) { 1163 if (!kaluad_wq)
1164 /* Temporary failure, bypass */ 1164 return -ENOMEM;
1165 return SCSI_DH_DEV_TEMP_BUSY;
1166 }
1167 1165
1168 r = scsi_register_device_handler(&alua_dh); 1166 r = scsi_register_device_handler(&alua_dh);
1169 if (r != 0) { 1167 if (r != 0) {
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 83f2fd70ce76..9f7e2457360e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1019,6 +1019,8 @@ static struct domain_device *sas_ex_discover_expander(
1019 list_del(&child->dev_list_node); 1019 list_del(&child->dev_list_node);
1020 spin_unlock_irq(&parent->port->dev_list_lock); 1020 spin_unlock_irq(&parent->port->dev_list_lock);
1021 sas_put_device(child); 1021 sas_put_device(child);
1022 sas_port_delete(phy->port);
1023 phy->port = NULL;
1022 return NULL; 1024 return NULL;
1023 } 1025 }
1024 list_add_tail(&child->siblings, &parent->ex_dev.children); 1026 list_add_tail(&child->siblings, &parent->ex_dev.children);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index e030e1452136..b71f5ac6c7dc 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,6 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
35 struct asd_sas_event *ev = to_asd_sas_event(work); 35 struct asd_sas_event *ev = to_asd_sas_event(work);
36 struct asd_sas_phy *phy = ev->phy; 36 struct asd_sas_phy *phy = ev->phy;
37 37
38 phy->in_shutdown = 0;
39 phy->error = 0; 38 phy->error = 0;
40 sas_deform_port(phy, 1); 39 sas_deform_port(phy, 1);
41} 40}
@@ -45,7 +44,6 @@ static void sas_phye_oob_done(struct work_struct *work)
45 struct asd_sas_event *ev = to_asd_sas_event(work); 44 struct asd_sas_event *ev = to_asd_sas_event(work);
46 struct asd_sas_phy *phy = ev->phy; 45 struct asd_sas_phy *phy = ev->phy;
47 46
48 phy->in_shutdown = 0;
49 phy->error = 0; 47 phy->error = 0;
50} 48}
51 49
@@ -126,6 +124,7 @@ static void sas_phye_shutdown(struct work_struct *work)
126 ret); 124 ret);
127 } else 125 } else
128 pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id); 126 pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id);
127 phy->in_shutdown = 0;
129} 128}
130 129
131/* ---------- Phy class registration ---------- */ 130/* ---------- Phy class registration ---------- */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index b17761eafca9..d6be4e8f4a8f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -7291,7 +7291,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7291 else 7291 else
7292 mask = DMA_BIT_MASK(32); 7292 mask = DMA_BIT_MASK(32);
7293 7293
7294 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 7294 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7295 if (rc) { 7295 if (rc) {
7296 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 7296 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7297 goto disable_device; 7297 goto disable_device;
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index 928e8e81ba69..528df8801254 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -64,20 +64,6 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
64 } 64 }
65} 65}
66 66
67bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id)
68{
69 u32 val;
70 int ret;
71
72 if ((hw_id > (priv->num_sensors - 1)) || (hw_id < 0))
73 return -EINVAL;
74 ret = regmap_field_read(priv->rf[SENSOR_EN], &val);
75 if (ret)
76 return ret;
77
78 return val & (1 << hw_id);
79}
80
81static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s) 67static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
82{ 68{
83 int degc, num, den; 69 int degc, num, den;
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index a319283c223f..6f26fadf4c27 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -334,7 +334,6 @@ static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
334 /* CTRL_OFFSET */ 334 /* CTRL_OFFSET */
335 [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0), 335 [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
336 [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1), 336 [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
337 [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
338 337
339 /* ----- TM ------ */ 338 /* ----- TM ------ */
340 /* INTERRUPT ENABLE */ 339 /* INTERRUPT ENABLE */
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 1099069f2aa3..0a4f2b8fcab6 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -44,7 +44,6 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
44 /* CTRL_OFF */ 44 /* CTRL_OFF */
45 [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0), 45 [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
46 [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1), 46 [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
47 [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 18),
48 47
49 /* ----- TM ------ */ 48 /* ----- TM ------ */
50 /* INTERRUPT ENABLE */ 49 /* INTERRUPT ENABLE */
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 36b0b52db524..0627d8615c30 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -85,11 +85,6 @@ static int tsens_register(struct tsens_priv *priv)
85 struct thermal_zone_device *tzd; 85 struct thermal_zone_device *tzd;
86 86
87 for (i = 0; i < priv->num_sensors; i++) { 87 for (i = 0; i < priv->num_sensors; i++) {
88 if (!is_sensor_enabled(priv, priv->sensor[i].hw_id)) {
89 dev_err(priv->dev, "sensor %d: disabled\n",
90 priv->sensor[i].hw_id);
91 continue;
92 }
93 priv->sensor[i].priv = priv; 88 priv->sensor[i].priv = priv;
94 priv->sensor[i].id = i; 89 priv->sensor[i].id = i;
95 tzd = devm_thermal_zone_of_sensor_register(priv->dev, i, 90 tzd = devm_thermal_zone_of_sensor_register(priv->dev, i,
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index eefe3844fb4e..2fd94997245b 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -315,7 +315,6 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mo
315int init_common(struct tsens_priv *priv); 315int init_common(struct tsens_priv *priv);
316int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp); 316int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp);
317int get_temp_common(struct tsens_priv *priv, int i, int *temp); 317int get_temp_common(struct tsens_priv *priv, int i, int *temp);
318bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id);
319 318
320/* TSENS target */ 319/* TSENS target */
321extern const struct tsens_plat_data data_8960; 320extern const struct tsens_plat_data data_8960;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index c75549928656..57592a6b5c9e 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -531,7 +531,6 @@ out:
531int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg, 531int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
532 size_t len) 532 size_t len)
533{ 533{
534 struct pvcalls_bedata *bedata;
535 struct sock_mapping *map; 534 struct sock_mapping *map;
536 int sent, tot_sent = 0; 535 int sent, tot_sent = 0;
537 int count = 0, flags; 536 int count = 0, flags;
@@ -543,7 +542,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
543 map = pvcalls_enter_sock(sock); 542 map = pvcalls_enter_sock(sock);
544 if (IS_ERR(map)) 543 if (IS_ERR(map))
545 return PTR_ERR(map); 544 return PTR_ERR(map);
546 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
547 545
548 mutex_lock(&map->active.out_mutex); 546 mutex_lock(&map->active.out_mutex);
549 if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { 547 if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
@@ -626,7 +624,6 @@ out:
626int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 624int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
627 int flags) 625 int flags)
628{ 626{
629 struct pvcalls_bedata *bedata;
630 int ret; 627 int ret;
631 struct sock_mapping *map; 628 struct sock_mapping *map;
632 629
@@ -636,7 +633,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
636 map = pvcalls_enter_sock(sock); 633 map = pvcalls_enter_sock(sock);
637 if (IS_ERR(map)) 634 if (IS_ERR(map))
638 return PTR_ERR(map); 635 return PTR_ERR(map);
639 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
640 636
641 mutex_lock(&map->active.in_mutex); 637 mutex_lock(&map->active.in_mutex);
642 if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) 638 if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 092981171df1..d75a2385b37c 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -83,6 +83,7 @@ struct xb_req_data {
83 int num_vecs; 83 int num_vecs;
84 int err; 84 int err;
85 enum xb_req_state state; 85 enum xb_req_state state;
86 bool user_req;
86 void (*cb)(struct xb_req_data *); 87 void (*cb)(struct xb_req_data *);
87 void *par; 88 void *par;
88}; 89};
@@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void);
133int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par); 134int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
134void xenbus_dev_queue_reply(struct xb_req_data *req); 135void xenbus_dev_queue_reply(struct xb_req_data *req);
135 136
137extern unsigned int xb_dev_generation_id;
138
136#endif 139#endif
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index faf452d0edf0..08adc590f631 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -62,6 +62,8 @@
62 62
63#include "xenbus.h" 63#include "xenbus.h"
64 64
65unsigned int xb_dev_generation_id;
66
65/* 67/*
66 * An element of a list of outstanding transactions, for which we're 68 * An element of a list of outstanding transactions, for which we're
67 * still waiting a reply. 69 * still waiting a reply.
@@ -69,6 +71,7 @@
69struct xenbus_transaction_holder { 71struct xenbus_transaction_holder {
70 struct list_head list; 72 struct list_head list;
71 struct xenbus_transaction handle; 73 struct xenbus_transaction handle;
74 unsigned int generation_id;
72}; 75};
73 76
74/* 77/*
@@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type,
441 rc = -ENOMEM; 444 rc = -ENOMEM;
442 goto out; 445 goto out;
443 } 446 }
447 trans->generation_id = xb_dev_generation_id;
444 list_add(&trans->list, &u->transactions); 448 list_add(&trans->list, &u->transactions);
445 } else if (msg->hdr.tx_id != 0 && 449 } else if (msg->hdr.tx_id != 0 &&
446 !xenbus_get_transaction(u, msg->hdr.tx_id)) 450 !xenbus_get_transaction(u, msg->hdr.tx_id))
@@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type,
449 !(msg->hdr.len == 2 && 453 !(msg->hdr.len == 2 &&
450 (!strcmp(msg->body, "T") || !strcmp(msg->body, "F")))) 454 (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
451 return xenbus_command_reply(u, XS_ERROR, "EINVAL"); 455 return xenbus_command_reply(u, XS_ERROR, "EINVAL");
456 else if (msg_type == XS_TRANSACTION_END) {
457 trans = xenbus_get_transaction(u, msg->hdr.tx_id);
458 if (trans && trans->generation_id != xb_dev_generation_id) {
459 list_del(&trans->list);
460 kfree(trans);
461 if (!strcmp(msg->body, "T"))
462 return xenbus_command_reply(u, XS_ERROR,
463 "EAGAIN");
464 else
465 return xenbus_command_reply(u,
466 XS_TRANSACTION_END,
467 "OK");
468 }
469 }
452 470
453 rc = xenbus_dev_request_and_reply(&msg->hdr, u); 471 rc = xenbus_dev_request_and_reply(&msg->hdr, u);
454 if (rc && trans) { 472 if (rc && trans) {
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 49a3874ae6bb..ddc18da61834 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -105,6 +105,7 @@ static void xs_suspend_enter(void)
105 105
106static void xs_suspend_exit(void) 106static void xs_suspend_exit(void)
107{ 107{
108 xb_dev_generation_id++;
108 spin_lock(&xs_state_lock); 109 spin_lock(&xs_state_lock);
109 xs_suspend_active--; 110 xs_suspend_active--;
110 spin_unlock(&xs_state_lock); 111 spin_unlock(&xs_state_lock);
@@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req)
125 spin_lock(&xs_state_lock); 126 spin_lock(&xs_state_lock);
126 } 127 }
127 128
128 if (req->type == XS_TRANSACTION_START) 129 if (req->type == XS_TRANSACTION_START && !req->user_req)
129 xs_state_users++; 130 xs_state_users++;
130 xs_state_users++; 131 xs_state_users++;
131 rq_id = xs_request_id++; 132 rq_id = xs_request_id++;
@@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req)
140 spin_lock(&xs_state_lock); 141 spin_lock(&xs_state_lock);
141 xs_state_users--; 142 xs_state_users--;
142 if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) || 143 if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
143 (req->type == XS_TRANSACTION_END && 144 (req->type == XS_TRANSACTION_END && !req->user_req &&
144 !WARN_ON_ONCE(req->msg.type == XS_ERROR && 145 !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
145 !strcmp(req->body, "ENOENT")))) 146 !strcmp(req->body, "ENOENT"))))
146 xs_state_users--; 147 xs_state_users--;
@@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
286 req->num_vecs = 1; 287 req->num_vecs = 1;
287 req->cb = xenbus_dev_queue_reply; 288 req->cb = xenbus_dev_queue_reply;
288 req->par = par; 289 req->par = par;
290 req->user_req = true;
289 291
290 xs_send(req, msg); 292 xs_send(req, msg);
291 293
@@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t,
313 req->vec = iovec; 315 req->vec = iovec;
314 req->num_vecs = num_vecs; 316 req->num_vecs = num_vecs;
315 req->cb = xs_wake_up; 317 req->cb = xs_wake_up;
318 req->user_req = false;
316 319
317 msg.req_id = 0; 320 msg.req_id = 0;
318 msg.tx_id = t.id; 321 msg.tx_id = t.id;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e6886c93c89d..749f5984425d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1406,20 +1406,27 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
1406 */ 1406 */
1407int revalidate_disk(struct gendisk *disk) 1407int revalidate_disk(struct gendisk *disk)
1408{ 1408{
1409 struct block_device *bdev;
1410 int ret = 0; 1409 int ret = 0;
1411 1410
1412 if (disk->fops->revalidate_disk) 1411 if (disk->fops->revalidate_disk)
1413 ret = disk->fops->revalidate_disk(disk); 1412 ret = disk->fops->revalidate_disk(disk);
1414 bdev = bdget_disk(disk, 0);
1415 if (!bdev)
1416 return ret;
1417 1413
1418 mutex_lock(&bdev->bd_mutex); 1414 /*
1419 check_disk_size_change(disk, bdev, ret == 0); 1415 * Hidden disks don't have associated bdev so there's no point in
1420 bdev->bd_invalidated = 0; 1416 * revalidating it.
1421 mutex_unlock(&bdev->bd_mutex); 1417 */
1422 bdput(bdev); 1418 if (!(disk->flags & GENHD_FL_HIDDEN)) {
1419 struct block_device *bdev = bdget_disk(disk, 0);
1420
1421 if (!bdev)
1422 return ret;
1423
1424 mutex_lock(&bdev->bd_mutex);
1425 check_disk_size_change(disk, bdev, ret == 0);
1426 bdev->bd_invalidated = 0;
1427 mutex_unlock(&bdev->bd_mutex);
1428 bdput(bdev);
1429 }
1423 return ret; 1430 return ret;
1424} 1431}
1425EXPORT_SYMBOL(revalidate_disk); 1432EXPORT_SYMBOL(revalidate_disk);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 85dc89d3a203..e3e1c13df439 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -132,7 +132,7 @@ static inline void flush_cache_ent(struct dfs_cache_entry *ce)
132 return; 132 return;
133 133
134 hlist_del_init_rcu(&ce->ce_hlist); 134 hlist_del_init_rcu(&ce->ce_hlist);
135 kfree(ce->ce_path); 135 kfree_const(ce->ce_path);
136 free_tgts(ce); 136 free_tgts(ce);
137 dfs_cache_count--; 137 dfs_cache_count--;
138 call_rcu(&ce->ce_rcu, free_cache_entry); 138 call_rcu(&ce->ce_rcu, free_cache_entry);
@@ -422,7 +422,7 @@ alloc_cache_entry(const char *path, const struct dfs_info3_param *refs,
422 422
423 rc = copy_ref_data(refs, numrefs, ce, NULL); 423 rc = copy_ref_data(refs, numrefs, ce, NULL);
424 if (rc) { 424 if (rc) {
425 kfree(ce->ce_path); 425 kfree_const(ce->ce_path);
426 kmem_cache_free(dfs_cache_slab, ce); 426 kmem_cache_free(dfs_cache_slab, ce);
427 ce = ERR_PTR(rc); 427 ce = ERR_PTR(rc);
428 } 428 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ce9a5be11df5..06e27ac6d82c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3216,7 +3216,9 @@ cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
3216 } 3216 }
3217 3217
3218 if (rc) { 3218 if (rc) {
3219 for (i = 0; i < nr_pages; i++) { 3219 unsigned int nr_page_failed = i;
3220
3221 for (i = 0; i < nr_page_failed; i++) {
3220 put_page(rdata->pages[i]); 3222 put_page(rdata->pages[i]);
3221 rdata->pages[i] = NULL; 3223 rdata->pages[i] = NULL;
3222 } 3224 }
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 710ceb875161..29b699d532ef 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1054,7 +1054,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1054 * not supported error. Client should accept it. 1054 * not supported error. Client should accept it.
1055 */ 1055 */
1056 cifs_dbg(VFS, "Server does not support validate negotiate\n"); 1056 cifs_dbg(VFS, "Server does not support validate negotiate\n");
1057 return 0; 1057 rc = 0;
1058 goto out_free_inbuf;
1058 } else if (rc != 0) { 1059 } else if (rc != 0) {
1059 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc); 1060 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
1060 rc = -EIO; 1061 rc = -EIO;
@@ -2619,10 +2620,12 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
2619void 2620void
2620SMB2_ioctl_free(struct smb_rqst *rqst) 2621SMB2_ioctl_free(struct smb_rqst *rqst)
2621{ 2622{
2623 int i;
2622 if (rqst && rqst->rq_iov) { 2624 if (rqst && rqst->rq_iov) {
2623 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 2625 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
2624 if (rqst->rq_iov[1].iov_len) 2626 for (i = 1; i < rqst->rq_nvec; i++)
2625 kfree(rqst->rq_iov[1].iov_base); 2627 if (rqst->rq_iov[i].iov_base != smb2_padding)
2628 kfree(rqst->rq_iov[i].iov_base);
2626 } 2629 }
2627} 2630}
2628 2631
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 310f8d17c53e..0fbb486a320e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2616,7 +2616,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2616 2616
2617 ret = io_copy_iov(ctx, &iov, arg, i); 2617 ret = io_copy_iov(ctx, &iov, arg, i);
2618 if (ret) 2618 if (ret)
2619 break; 2619 goto err;
2620 2620
2621 /* 2621 /*
2622 * Don't impose further limits on the size and buffer 2622 * Don't impose further limits on the size and buffer
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index 9846f7e95282..7147e4aebecc 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -127,7 +127,7 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
127 127
128 locks_init_lock(fl); 128 locks_init_lock(fl);
129 fl->fl_owner = current->files; 129 fl->fl_owner = current->files;
130 fl->fl_pid = current->tgid; 130 fl->fl_pid = (pid_t)lock->svid;
131 fl->fl_flags = FL_POSIX; 131 fl->fl_flags = FL_POSIX;
132 fl->fl_type = F_RDLCK; /* as good as anything else */ 132 fl->fl_type = F_RDLCK; /* as good as anything else */
133 start = ntohl(*p++); 133 start = ntohl(*p++);
@@ -269,7 +269,7 @@ nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
269 memset(lock, 0, sizeof(*lock)); 269 memset(lock, 0, sizeof(*lock));
270 locks_init_lock(&lock->fl); 270 locks_init_lock(&lock->fl);
271 lock->svid = ~(u32) 0; 271 lock->svid = ~(u32) 0;
272 lock->fl.fl_pid = current->tgid; 272 lock->fl.fl_pid = (pid_t)lock->svid;
273 273
274 if (!(p = nlm_decode_cookie(p, &argp->cookie)) 274 if (!(p = nlm_decode_cookie(p, &argp->cookie))
275 || !(p = xdr_decode_string_inplace(p, &lock->caller, 275 || !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 70154f376695..7ed9edf9aed4 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -119,7 +119,7 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
119 119
120 locks_init_lock(fl); 120 locks_init_lock(fl);
121 fl->fl_owner = current->files; 121 fl->fl_owner = current->files;
122 fl->fl_pid = current->tgid; 122 fl->fl_pid = (pid_t)lock->svid;
123 fl->fl_flags = FL_POSIX; 123 fl->fl_flags = FL_POSIX;
124 fl->fl_type = F_RDLCK; /* as good as anything else */ 124 fl->fl_type = F_RDLCK; /* as good as anything else */
125 p = xdr_decode_hyper(p, &start); 125 p = xdr_decode_hyper(p, &start);
@@ -266,7 +266,7 @@ nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p)
266 memset(lock, 0, sizeof(*lock)); 266 memset(lock, 0, sizeof(*lock));
267 locks_init_lock(&lock->fl); 267 locks_init_lock(&lock->fl);
268 lock->svid = ~(u32) 0; 268 lock->svid = ~(u32) 0;
269 lock->fl.fl_pid = current->tgid; 269 lock->fl.fl_pid = (pid_t)lock->svid;
270 270
271 if (!(p = nlm4_decode_cookie(p, &argp->cookie)) 271 if (!(p = nlm4_decode_cookie(p, &argp->cookie))
272 || !(p = xdr_decode_string_inplace(p, &lock->caller, 272 || !(p = xdr_decode_string_inplace(p, &lock->caller,
diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
index f65f2b2f594d..1906cc962c4d 100644
--- a/fs/ocfs2/filecheck.c
+++ b/fs/ocfs2/filecheck.c
@@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
193 ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck, 193 ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
194 NULL, "filecheck"); 194 NULL, "filecheck");
195 if (ret) { 195 if (ret) {
196 kobject_put(&entry->fs_kobj);
196 kfree(fcheck); 197 kfree(fcheck);
197 return ret; 198 return ret;
198 } 199 }
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 77258d276f93..11e215d7937e 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -89,6 +89,11 @@ enum {
89 * Enable cpuset controller in v1 cgroup to use v2 behavior. 89 * Enable cpuset controller in v1 cgroup to use v2 behavior.
90 */ 90 */
91 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), 91 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
92
93 /*
94 * Enable legacy local memory.events.
95 */
96 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
92}; 97};
93 98
94/* cftype->flags */ 99/* cftype->flags */
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 3a91130a4fbd..02393c0c98f9 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -2,7 +2,7 @@
2#define _LINUX_GENERIC_RADIX_TREE_H 2#define _LINUX_GENERIC_RADIX_TREE_H
3 3
4/** 4/**
5 * DOC: Generic radix trees/sparse arrays: 5 * DOC: Generic radix trees/sparse arrays
6 * 6 *
7 * Very simple and minimalistic, supporting arbitrary size entries up to 7 * Very simple and minimalistic, supporting arbitrary size entries up to
8 * PAGE_SIZE. 8 * PAGE_SIZE.
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index aa5efd9351eb..d5ceb2839a2d 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -54,6 +54,7 @@ struct list_lru {
54#ifdef CONFIG_MEMCG_KMEM 54#ifdef CONFIG_MEMCG_KMEM
55 struct list_head list; 55 struct list_head list;
56 int shrinker_id; 56 int shrinker_id;
57 bool memcg_aware;
57#endif 58#endif
58}; 59};
59 60
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 73fe0a700911..edf9e8f32d70 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -737,8 +737,14 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
737static inline void memcg_memory_event(struct mem_cgroup *memcg, 737static inline void memcg_memory_event(struct mem_cgroup *memcg,
738 enum memcg_memory_event event) 738 enum memcg_memory_event event)
739{ 739{
740 atomic_long_inc(&memcg->memory_events[event]); 740 do {
741 cgroup_file_notify(&memcg->events_file); 741 atomic_long_inc(&memcg->memory_events[event]);
742 cgroup_file_notify(&memcg->events_file);
743
744 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
745 break;
746 } while ((memcg = parent_mem_cgroup(memcg)) &&
747 !mem_cgroup_is_root(memcg));
742} 748}
743 749
744static inline void memcg_memory_event_mm(struct mm_struct *mm, 750static inline void memcg_memory_event_mm(struct mm_struct *mm,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4a5a84d7bdd4..dd436da7eccc 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -344,6 +344,7 @@ struct pci_dev {
344 D3cold, not set for devices 344 D3cold, not set for devices
345 powered on/off by the 345 powered on/off by the
346 corresponding bridge */ 346 corresponding bridge */
347 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
347 unsigned int ignore_hotplug:1; /* Ignore hotplug events */ 348 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
348 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators 349 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
349 controlled exclusively by 350 controlled exclusively by
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6b3ea9ea6a9e..4a2ffd678887 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -359,7 +359,7 @@ extern void mark_free_pages(struct zone *zone);
359 * platforms which require special recovery actions in that situation. 359 * platforms which require special recovery actions in that situation.
360 */ 360 */
361struct platform_hibernation_ops { 361struct platform_hibernation_ops {
362 int (*begin)(void); 362 int (*begin)(pm_message_t stage);
363 void (*end)(void); 363 void (*end)(void);
364 int (*pre_snapshot)(void); 364 int (*pre_snapshot)(void);
365 void (*finish)(void); 365 void (*finish)(void);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 217cec4e22c6..426a0026225c 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1810,11 +1810,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1810 1810
1811enum cgroup2_param { 1811enum cgroup2_param {
1812 Opt_nsdelegate, 1812 Opt_nsdelegate,
1813 Opt_memory_localevents,
1813 nr__cgroup2_params 1814 nr__cgroup2_params
1814}; 1815};
1815 1816
1816static const struct fs_parameter_spec cgroup2_param_specs[] = { 1817static const struct fs_parameter_spec cgroup2_param_specs[] = {
1817 fsparam_flag ("nsdelegate", Opt_nsdelegate), 1818 fsparam_flag("nsdelegate", Opt_nsdelegate),
1819 fsparam_flag("memory_localevents", Opt_memory_localevents),
1818 {} 1820 {}
1819}; 1821};
1820 1822
@@ -1837,6 +1839,9 @@ static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param
1837 case Opt_nsdelegate: 1839 case Opt_nsdelegate:
1838 ctx->flags |= CGRP_ROOT_NS_DELEGATE; 1840 ctx->flags |= CGRP_ROOT_NS_DELEGATE;
1839 return 0; 1841 return 0;
1842 case Opt_memory_localevents:
1843 ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1844 return 0;
1840 } 1845 }
1841 return -EINVAL; 1846 return -EINVAL;
1842} 1847}
@@ -1848,6 +1853,11 @@ static void apply_cgroup_root_flags(unsigned int root_flags)
1848 cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; 1853 cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
1849 else 1854 else
1850 cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; 1855 cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
1856
1857 if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1858 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1859 else
1860 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS;
1851 } 1861 }
1852} 1862}
1853 1863
@@ -1855,6 +1865,8 @@ static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root
1855{ 1865{
1856 if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) 1866 if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
1857 seq_puts(seq, ",nsdelegate"); 1867 seq_puts(seq, ",nsdelegate");
1868 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1869 seq_puts(seq, ",memory_localevents");
1858 return 0; 1870 return 0;
1859} 1871}
1860 1872
@@ -6325,7 +6337,7 @@ static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
6325static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, 6337static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
6326 char *buf) 6338 char *buf)
6327{ 6339{
6328 return snprintf(buf, PAGE_SIZE, "nsdelegate\n"); 6340 return snprintf(buf, PAGE_SIZE, "nsdelegate\nmemory_localevents\n");
6329} 6341}
6330static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); 6342static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
6331 6343
diff --git a/kernel/fork.c b/kernel/fork.c
index b2b87d450b80..75675b9bf6df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -123,7 +123,7 @@
123unsigned long total_forks; /* Handle normal Linux uptimes. */ 123unsigned long total_forks; /* Handle normal Linux uptimes. */
124int nr_threads; /* The idle threads do not count.. */ 124int nr_threads; /* The idle threads do not count.. */
125 125
126int max_threads; /* tunable limit on nr_threads */ 126static int max_threads; /* tunable limit on nr_threads */
127 127
128DEFINE_PER_CPU(unsigned long, process_counts) = 0; 128DEFINE_PER_CPU(unsigned long, process_counts) = 0;
129 129
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index c8c272df7154..97522630b1b6 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -129,7 +129,7 @@ static int hibernation_test(int level) { return 0; }
129static int platform_begin(int platform_mode) 129static int platform_begin(int platform_mode)
130{ 130{
131 return (platform_mode && hibernation_ops) ? 131 return (platform_mode && hibernation_ops) ?
132 hibernation_ops->begin() : 0; 132 hibernation_ops->begin(PMSG_FREEZE) : 0;
133} 133}
134 134
135/** 135/**
@@ -542,7 +542,7 @@ int hibernation_platform_enter(void)
542 * hibernation_ops->finish() before saving the image, so we should let 542 * hibernation_ops->finish() before saving the image, so we should let
543 * the firmware know that we're going to enter the sleep state after all 543 * the firmware know that we're going to enter the sleep state after all
544 */ 544 */
545 error = hibernation_ops->begin(); 545 error = hibernation_ops->begin(PMSG_HIBERNATE);
546 if (error) 546 if (error)
547 goto Close; 547 goto Close;
548 548
diff --git a/kernel/signal.c b/kernel/signal.c
index d7b9d14ac80d..328a01e1a2f0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2485,6 +2485,8 @@ relock:
2485 if (signal_group_exit(signal)) { 2485 if (signal_group_exit(signal)) {
2486 ksig->info.si_signo = signr = SIGKILL; 2486 ksig->info.si_signo = signr = SIGKILL;
2487 sigdelset(&current->pending.signal, SIGKILL); 2487 sigdelset(&current->pending.signal, SIGKILL);
2488 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2489 &sighand->action[SIGKILL - 1]);
2488 recalc_sigpending(); 2490 recalc_sigpending();
2489 goto fatal; 2491 goto fatal;
2490 } 2492 }
diff --git a/kernel/sys.c b/kernel/sys.c
index bdbfe8d37418..2969304c29fe 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1882,13 +1882,14 @@ exit_err:
1882} 1882}
1883 1883
1884/* 1884/*
1885 * Check arithmetic relations of passed addresses.
1886 *
1885 * WARNING: we don't require any capability here so be very careful 1887 * WARNING: we don't require any capability here so be very careful
1886 * in what is allowed for modification from userspace. 1888 * in what is allowed for modification from userspace.
1887 */ 1889 */
1888static int validate_prctl_map(struct prctl_mm_map *prctl_map) 1890static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1889{ 1891{
1890 unsigned long mmap_max_addr = TASK_SIZE; 1892 unsigned long mmap_max_addr = TASK_SIZE;
1891 struct mm_struct *mm = current->mm;
1892 int error = -EINVAL, i; 1893 int error = -EINVAL, i;
1893 1894
1894 static const unsigned char offsets[] = { 1895 static const unsigned char offsets[] = {
@@ -1949,24 +1950,6 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1949 prctl_map->start_data)) 1950 prctl_map->start_data))
1950 goto out; 1951 goto out;
1951 1952
1952 /*
1953 * Someone is trying to cheat the auxv vector.
1954 */
1955 if (prctl_map->auxv_size) {
1956 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1957 goto out;
1958 }
1959
1960 /*
1961 * Finally, make sure the caller has the rights to
1962 * change /proc/pid/exe link: only local sys admin should
1963 * be allowed to.
1964 */
1965 if (prctl_map->exe_fd != (u32)-1) {
1966 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1967 goto out;
1968 }
1969
1970 error = 0; 1953 error = 0;
1971out: 1954out:
1972 return error; 1955 return error;
@@ -1993,11 +1976,18 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
1993 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 1976 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1994 return -EFAULT; 1977 return -EFAULT;
1995 1978
1996 error = validate_prctl_map(&prctl_map); 1979 error = validate_prctl_map_addr(&prctl_map);
1997 if (error) 1980 if (error)
1998 return error; 1981 return error;
1999 1982
2000 if (prctl_map.auxv_size) { 1983 if (prctl_map.auxv_size) {
1984 /*
1985 * Someone is trying to cheat the auxv vector.
1986 */
1987 if (!prctl_map.auxv ||
1988 prctl_map.auxv_size > sizeof(mm->saved_auxv))
1989 return -EINVAL;
1990
2001 memset(user_auxv, 0, sizeof(user_auxv)); 1991 memset(user_auxv, 0, sizeof(user_auxv));
2002 if (copy_from_user(user_auxv, 1992 if (copy_from_user(user_auxv,
2003 (const void __user *)prctl_map.auxv, 1993 (const void __user *)prctl_map.auxv,
@@ -2010,6 +2000,14 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
2010 } 2000 }
2011 2001
2012 if (prctl_map.exe_fd != (u32)-1) { 2002 if (prctl_map.exe_fd != (u32)-1) {
2003 /*
2004 * Make sure the caller has the rights to
2005 * change /proc/pid/exe link: only local sys admin should
2006 * be allowed to.
2007 */
2008 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
2009 return -EINVAL;
2010
2013 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 2011 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2014 if (error) 2012 if (error)
2015 return error; 2013 return error;
@@ -2097,7 +2095,11 @@ static int prctl_set_mm(int opt, unsigned long addr,
2097 unsigned long arg4, unsigned long arg5) 2095 unsigned long arg4, unsigned long arg5)
2098{ 2096{
2099 struct mm_struct *mm = current->mm; 2097 struct mm_struct *mm = current->mm;
2100 struct prctl_mm_map prctl_map; 2098 struct prctl_mm_map prctl_map = {
2099 .auxv = NULL,
2100 .auxv_size = 0,
2101 .exe_fd = -1,
2102 };
2101 struct vm_area_struct *vma; 2103 struct vm_area_struct *vma;
2102 int error; 2104 int error;
2103 2105
@@ -2125,9 +2127,15 @@ static int prctl_set_mm(int opt, unsigned long addr,
2125 2127
2126 error = -EINVAL; 2128 error = -EINVAL;
2127 2129
2128 down_write(&mm->mmap_sem); 2130 /*
2131 * arg_lock protects concurent updates of arg boundaries, we need
2132 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
2133 * validation.
2134 */
2135 down_read(&mm->mmap_sem);
2129 vma = find_vma(mm, addr); 2136 vma = find_vma(mm, addr);
2130 2137
2138 spin_lock(&mm->arg_lock);
2131 prctl_map.start_code = mm->start_code; 2139 prctl_map.start_code = mm->start_code;
2132 prctl_map.end_code = mm->end_code; 2140 prctl_map.end_code = mm->end_code;
2133 prctl_map.start_data = mm->start_data; 2141 prctl_map.start_data = mm->start_data;
@@ -2139,9 +2147,6 @@ static int prctl_set_mm(int opt, unsigned long addr,
2139 prctl_map.arg_end = mm->arg_end; 2147 prctl_map.arg_end = mm->arg_end;
2140 prctl_map.env_start = mm->env_start; 2148 prctl_map.env_start = mm->env_start;
2141 prctl_map.env_end = mm->env_end; 2149 prctl_map.env_end = mm->env_end;
2142 prctl_map.auxv = NULL;
2143 prctl_map.auxv_size = 0;
2144 prctl_map.exe_fd = -1;
2145 2150
2146 switch (opt) { 2151 switch (opt) {
2147 case PR_SET_MM_START_CODE: 2152 case PR_SET_MM_START_CODE:
@@ -2181,7 +2186,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
2181 goto out; 2186 goto out;
2182 } 2187 }
2183 2188
2184 error = validate_prctl_map(&prctl_map); 2189 error = validate_prctl_map_addr(&prctl_map);
2185 if (error) 2190 if (error)
2186 goto out; 2191 goto out;
2187 2192
@@ -2218,7 +2223,8 @@ static int prctl_set_mm(int opt, unsigned long addr,
2218 2223
2219 error = 0; 2224 error = 0;
2220out: 2225out:
2221 up_write(&mm->mmap_sem); 2226 spin_unlock(&mm->arg_lock);
2227 up_read(&mm->mmap_sem);
2222 return error; 2228 return error;
2223} 2229}
2224 2230
diff --git a/lib/sort.c b/lib/sort.c
index 50855ea8c262..cf408aec3733 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -43,8 +43,9 @@ static bool is_aligned(const void *base, size_t size, unsigned char align)
43 43
44/** 44/**
45 * swap_words_32 - swap two elements in 32-bit chunks 45 * swap_words_32 - swap two elements in 32-bit chunks
46 * @a, @b: pointers to the elements 46 * @a: pointer to the first element to swap
47 * @size: element size (must be a multiple of 4) 47 * @b: pointer to the second element to swap
48 * @n: element size (must be a multiple of 4)
48 * 49 *
49 * Exchange the two objects in memory. This exploits base+index addressing, 50 * Exchange the two objects in memory. This exploits base+index addressing,
50 * which basically all CPUs have, to minimize loop overhead computations. 51 * which basically all CPUs have, to minimize loop overhead computations.
@@ -65,8 +66,9 @@ static void swap_words_32(void *a, void *b, size_t n)
65 66
66/** 67/**
67 * swap_words_64 - swap two elements in 64-bit chunks 68 * swap_words_64 - swap two elements in 64-bit chunks
68 * @a, @b: pointers to the elements 69 * @a: pointer to the first element to swap
69 * @size: element size (must be a multiple of 8) 70 * @b: pointer to the second element to swap
71 * @n: element size (must be a multiple of 8)
70 * 72 *
71 * Exchange the two objects in memory. This exploits base+index 73 * Exchange the two objects in memory. This exploits base+index
72 * addressing, which basically all CPUs have, to minimize loop overhead 74 * addressing, which basically all CPUs have, to minimize loop overhead
@@ -100,8 +102,9 @@ static void swap_words_64(void *a, void *b, size_t n)
100 102
101/** 103/**
102 * swap_bytes - swap two elements a byte at a time 104 * swap_bytes - swap two elements a byte at a time
103 * @a, @b: pointers to the elements 105 * @a: pointer to the first element to swap
104 * @size: element size 106 * @b: pointer to the second element to swap
107 * @n: element size
105 * 108 *
106 * This is the fallback if alignment doesn't allow using larger chunks. 109 * This is the fallback if alignment doesn't allow using larger chunks.
107 */ 110 */
diff --git a/mm/compaction.c b/mm/compaction.c
index 9febc8cc84e7..9e1b9acb116b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1399,7 +1399,7 @@ fast_isolate_freepages(struct compact_control *cc)
1399 page = pfn_to_page(highest); 1399 page = pfn_to_page(highest);
1400 cc->free_pfn = highest; 1400 cc->free_pfn = highest;
1401 } else { 1401 } else {
1402 if (cc->direct_compaction) { 1402 if (cc->direct_compaction && pfn_valid(min_pfn)) {
1403 page = pfn_to_page(min_pfn); 1403 page = pfn_to_page(min_pfn);
1404 cc->free_pfn = min_pfn; 1404 cc->free_pfn = min_pfn;
1405 } 1405 }
diff --git a/mm/gup.c b/mm/gup.c
index f173fcbaf1b2..ddde097cf9e4 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1042,10 +1042,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
1042 BUG_ON(ret >= nr_pages); 1042 BUG_ON(ret >= nr_pages);
1043 } 1043 }
1044 1044
1045 if (!pages)
1046 /* If it's a prefault don't insist harder */
1047 return ret;
1048
1049 if (ret > 0) { 1045 if (ret > 0) {
1050 nr_pages -= ret; 1046 nr_pages -= ret;
1051 pages_done += ret; 1047 pages_done += ret;
@@ -1061,8 +1057,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
1061 pages_done = ret; 1057 pages_done = ret;
1062 break; 1058 break;
1063 } 1059 }
1064 /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ 1060 /*
1065 pages += ret; 1061 * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1062 * For the prefault case (!pages) we only update counts.
1063 */
1064 if (likely(pages))
1065 pages += ret;
1066 start += ret << PAGE_SHIFT; 1066 start += ret << PAGE_SHIFT;
1067 1067
1068 /* 1068 /*
@@ -1085,7 +1085,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
1085 pages_done++; 1085 pages_done++;
1086 if (!nr_pages) 1086 if (!nr_pages)
1087 break; 1087 break;
1088 pages++; 1088 if (likely(pages))
1089 pages++;
1089 start += PAGE_SIZE; 1090 start += PAGE_SIZE;
1090 } 1091 }
1091 if (lock_dropped && *locked) { 1092 if (lock_dropped && *locked) {
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 36afcf64e016..242fdc01aaa9 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -464,7 +464,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
464{ 464{
465 unsigned long redzone_start; 465 unsigned long redzone_start;
466 unsigned long redzone_end; 466 unsigned long redzone_end;
467 u8 tag; 467 u8 tag = 0xff;
468 468
469 if (gfpflags_allow_blocking(flags)) 469 if (gfpflags_allow_blocking(flags))
470 quarantine_reduce(); 470 quarantine_reduce();
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 0bdf3152735e..e4709fdaa8e6 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -38,11 +38,7 @@ static int lru_shrinker_id(struct list_lru *lru)
38 38
39static inline bool list_lru_memcg_aware(struct list_lru *lru) 39static inline bool list_lru_memcg_aware(struct list_lru *lru)
40{ 40{
41 /* 41 return lru->memcg_aware;
42 * This needs node 0 to be always present, even
43 * in the systems supporting sparse numa ids.
44 */
45 return !!lru->node[0].memcg_lrus;
46} 42}
47 43
48static inline struct list_lru_one * 44static inline struct list_lru_one *
@@ -452,6 +448,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
452{ 448{
453 int i; 449 int i;
454 450
451 lru->memcg_aware = memcg_aware;
452
455 if (!memcg_aware) 453 if (!memcg_aware)
456 return 0; 454 return 0;
457 455
diff --git a/mm/util.c b/mm/util.c
index 91682a2090ee..9834c4ab7d8e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -718,12 +718,12 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
718 if (!mm->arg_end) 718 if (!mm->arg_end)
719 goto out_mm; /* Shh! No looking before we're done */ 719 goto out_mm; /* Shh! No looking before we're done */
720 720
721 down_read(&mm->mmap_sem); 721 spin_lock(&mm->arg_lock);
722 arg_start = mm->arg_start; 722 arg_start = mm->arg_start;
723 arg_end = mm->arg_end; 723 arg_end = mm->arg_end;
724 env_start = mm->env_start; 724 env_start = mm->env_start;
725 env_end = mm->env_end; 725 env_end = mm->env_end;
726 up_read(&mm->mmap_sem); 726 spin_unlock(&mm->arg_lock);
727 727
728 len = arg_end - arg_start; 728 len = arg_end - arg_start;
729 729
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 233af6936c93..7350a124524b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -815,7 +815,7 @@ find_vmap_lowest_match(unsigned long size,
815 } 815 }
816 816
817 /* 817 /*
818 * OK. We roll back and find the fist right sub-tree, 818 * OK. We roll back and find the first right sub-tree,
819 * that will satisfy the search criteria. It can happen 819 * that will satisfy the search criteria. It can happen
820 * only once due to "vstart" restriction. 820 * only once due to "vstart" restriction.
821 */ 821 */
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 99be52c5ca45..985732c8b025 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -190,10 +190,11 @@ static int size_to_chunks(size_t size)
190 190
191static void compact_page_work(struct work_struct *w); 191static void compact_page_work(struct work_struct *w);
192 192
193static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool) 193static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
194 gfp_t gfp)
194{ 195{
195 struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle, 196 struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
196 GFP_KERNEL); 197 gfp);
197 198
198 if (slots) { 199 if (slots) {
199 memset(slots->slot, 0, sizeof(slots->slot)); 200 memset(slots->slot, 0, sizeof(slots->slot));
@@ -295,10 +296,10 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
295 296
296/* Initializes the z3fold header of a newly allocated z3fold page */ 297/* Initializes the z3fold header of a newly allocated z3fold page */
297static struct z3fold_header *init_z3fold_page(struct page *page, 298static struct z3fold_header *init_z3fold_page(struct page *page,
298 struct z3fold_pool *pool) 299 struct z3fold_pool *pool, gfp_t gfp)
299{ 300{
300 struct z3fold_header *zhdr = page_address(page); 301 struct z3fold_header *zhdr = page_address(page);
301 struct z3fold_buddy_slots *slots = alloc_slots(pool); 302 struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
302 303
303 if (!slots) 304 if (!slots)
304 return NULL; 305 return NULL;
@@ -912,7 +913,7 @@ retry:
912 if (!page) 913 if (!page)
913 return -ENOMEM; 914 return -ENOMEM;
914 915
915 zhdr = init_z3fold_page(page, pool); 916 zhdr = init_z3fold_page(page, pool, gfp);
916 if (!zhdr) { 917 if (!zhdr) {
917 __free_page(page); 918 __free_page(page);
918 return -ENOMEM; 919 return -ENOMEM;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 552d5efd7cb7..17f06079a712 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -150,8 +150,12 @@ void print_gimple_expr(FILE *, gimple, int, int);
150void dump_gimple_stmt(pretty_printer *, gimple, int, int); 150void dump_gimple_stmt(pretty_printer *, gimple, int, int);
151#endif 151#endif
152 152
153#ifndef __unused
153#define __unused __attribute__((__unused__)) 154#define __unused __attribute__((__unused__))
155#endif
156#ifndef __visible
154#define __visible __attribute__((visibility("default"))) 157#define __visible __attribute__((visibility("default")))
158#endif
155 159
156#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node)) 160#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
157#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node)) 161#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
index 1d73083da6cb..2efbec6b6b8d 100644
--- a/scripts/gdb/linux/constants.py.in
+++ b/scripts/gdb/linux/constants.py.in
@@ -40,7 +40,8 @@
40import gdb 40import gdb
41 41
42/* linux/clk-provider.h */ 42/* linux/clk-provider.h */
43LX_GDBPARSED(CLK_GET_RATE_NOCACHE) 43if IS_BUILTIN(CONFIG_COMMON_CLK):
44 LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
44 45
45/* linux/fs.h */ 46/* linux/fs.h */
46LX_VALUE(SB_RDONLY) 47LX_VALUE(SB_RDONLY)
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 33df646618e2..6374e078a5f2 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -32,7 +32,8 @@ class SPDXdata(object):
32def read_spdxdata(repo): 32def read_spdxdata(repo):
33 33
34 # The subdirectories of LICENSES in the kernel source 34 # The subdirectories of LICENSES in the kernel source
35 license_dirs = [ "preferred", "deprecated", "exceptions", "dual" ] 35 # Note: exceptions needs to be parsed as last directory.
36 license_dirs = [ "preferred", "dual", "deprecated", "exceptions" ]
36 lictree = repo.head.commit.tree['LICENSES'] 37 lictree = repo.head.commit.tree['LICENSES']
37 38
38 spdx = SPDXdata() 39 spdx = SPDXdata()
@@ -58,13 +59,13 @@ def read_spdxdata(repo):
58 elif l.startswith('SPDX-Licenses:'): 59 elif l.startswith('SPDX-Licenses:'):
59 for lic in l.split(':')[1].upper().strip().replace(' ', '').replace('\t', '').split(','): 60 for lic in l.split(':')[1].upper().strip().replace(' ', '').replace('\t', '').split(','):
60 if not lic in spdx.licenses: 61 if not lic in spdx.licenses:
61 raise SPDXException(None, 'Exception %s missing license %s' %(ex, lic)) 62 raise SPDXException(None, 'Exception %s missing license %s' %(exception, lic))
62 spdx.exceptions[exception].append(lic) 63 spdx.exceptions[exception].append(lic)
63 64
64 elif l.startswith("License-Text:"): 65 elif l.startswith("License-Text:"):
65 if exception: 66 if exception:
66 if not len(spdx.exceptions[exception]): 67 if not len(spdx.exceptions[exception]):
67 raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %excid) 68 raise SPDXException(el, 'Exception %s is missing SPDX-Licenses' %exception)
68 spdx.exception_files += 1 69 spdx.exception_files += 1
69 else: 70 else:
70 spdx.license_files += 1 71 spdx.license_files += 1
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index e11564eb645b..82a38e801ee4 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -89,6 +89,9 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
89 tfm = &hmac_tfm; 89 tfm = &hmac_tfm;
90 algo = evm_hmac; 90 algo = evm_hmac;
91 } else { 91 } else {
92 if (hash_algo >= HASH_ALGO__LAST)
93 return ERR_PTR(-EINVAL);
94
92 tfm = &evm_tfm[hash_algo]; 95 tfm = &evm_tfm[hash_algo];
93 algo = hash_algo_name[hash_algo]; 96 algo = hash_algo_name[hash_algo];
94 } 97 }
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e0cc323f948f..1cc822a59054 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -498,10 +498,11 @@ static void add_rules(struct ima_rule_entry *entries, int count,
498 498
499 list_add_tail(&entry->list, &ima_policy_rules); 499 list_add_tail(&entry->list, &ima_policy_rules);
500 } 500 }
501 if (entries[i].action == APPRAISE) 501 if (entries[i].action == APPRAISE) {
502 temp_ima_appraise |= ima_appraise_flag(entries[i].func); 502 temp_ima_appraise |= ima_appraise_flag(entries[i].func);
503 if (entries[i].func == POLICY_CHECK) 503 if (entries[i].func == POLICY_CHECK)
504 temp_ima_appraise |= IMA_APPRAISE_POLICY; 504 temp_ima_appraise |= IMA_APPRAISE_POLICY;
505 }
505 } 506 }
506} 507}
507 508
@@ -1146,10 +1147,10 @@ enum {
1146}; 1147};
1147 1148
1148static const char *const mask_tokens[] = { 1149static const char *const mask_tokens[] = {
1149 "MAY_EXEC", 1150 "^MAY_EXEC",
1150 "MAY_WRITE", 1151 "^MAY_WRITE",
1151 "MAY_READ", 1152 "^MAY_READ",
1152 "MAY_APPEND" 1153 "^MAY_APPEND"
1153}; 1154};
1154 1155
1155#define __ima_hook_stringify(str) (#str), 1156#define __ima_hook_stringify(str) (#str),
@@ -1209,6 +1210,7 @@ int ima_policy_show(struct seq_file *m, void *v)
1209 struct ima_rule_entry *entry = v; 1210 struct ima_rule_entry *entry = v;
1210 int i; 1211 int i;
1211 char tbuf[64] = {0,}; 1212 char tbuf[64] = {0,};
1213 int offset = 0;
1212 1214
1213 rcu_read_lock(); 1215 rcu_read_lock();
1214 1216
@@ -1232,15 +1234,17 @@ int ima_policy_show(struct seq_file *m, void *v)
1232 if (entry->flags & IMA_FUNC) 1234 if (entry->flags & IMA_FUNC)
1233 policy_func_show(m, entry->func); 1235 policy_func_show(m, entry->func);
1234 1236
1235 if (entry->flags & IMA_MASK) { 1237 if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) {
1238 if (entry->flags & IMA_MASK)
1239 offset = 1;
1236 if (entry->mask & MAY_EXEC) 1240 if (entry->mask & MAY_EXEC)
1237 seq_printf(m, pt(Opt_mask), mt(mask_exec)); 1241 seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset);
1238 if (entry->mask & MAY_WRITE) 1242 if (entry->mask & MAY_WRITE)
1239 seq_printf(m, pt(Opt_mask), mt(mask_write)); 1243 seq_printf(m, pt(Opt_mask), mt(mask_write) + offset);
1240 if (entry->mask & MAY_READ) 1244 if (entry->mask & MAY_READ)
1241 seq_printf(m, pt(Opt_mask), mt(mask_read)); 1245 seq_printf(m, pt(Opt_mask), mt(mask_read) + offset);
1242 if (entry->mask & MAY_APPEND) 1246 if (entry->mask & MAY_APPEND)
1243 seq_printf(m, pt(Opt_mask), mt(mask_append)); 1247 seq_printf(m, pt(Opt_mask), mt(mask_append) + offset);
1244 seq_puts(m, " "); 1248 seq_puts(m, " ");
1245 } 1249 }
1246 1250
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 90cedebaeb94..7eeebe5e9da2 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -224,6 +224,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
224 case KVM_CAP_MAX_VCPUS: 224 case KVM_CAP_MAX_VCPUS:
225 r = KVM_MAX_VCPUS; 225 r = KVM_MAX_VCPUS;
226 break; 226 break;
227 case KVM_CAP_MAX_VCPU_ID:
228 r = KVM_MAX_VCPU_ID;
229 break;
227 case KVM_CAP_MSI_DEVID: 230 case KVM_CAP_MSI_DEVID:
228 if (!kvm) 231 if (!kvm)
229 r = -EINVAL; 232 r = -EINVAL;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 134ec0283a8a..ca54b09adf5b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1795,8 +1795,10 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
1795 1795
1796 if (map->page) 1796 if (map->page)
1797 kunmap(map->page); 1797 kunmap(map->page);
1798#ifdef CONFIG_HAS_IOMEM
1798 else 1799 else
1799 memunmap(map->hva); 1800 memunmap(map->hva);
1801#endif
1800 1802
1801 if (dirty) { 1803 if (dirty) {
1802 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 1804 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
@@ -3149,8 +3151,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
3149 case KVM_CAP_MULTI_ADDRESS_SPACE: 3151 case KVM_CAP_MULTI_ADDRESS_SPACE:
3150 return KVM_ADDRESS_SPACE_NUM; 3152 return KVM_ADDRESS_SPACE_NUM;
3151#endif 3153#endif
3152 case KVM_CAP_MAX_VCPU_ID:
3153 return KVM_MAX_VCPU_ID;
3154 case KVM_CAP_NR_MEMSLOTS: 3154 case KVM_CAP_NR_MEMSLOTS:
3155 return KVM_USER_MEM_SLOTS; 3155 return KVM_USER_MEM_SLOTS;
3156 default: 3156 default: