aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-02-23 01:36:46 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-23 01:36:46 -0500
commitfb86780bf7708cd6553f592a6318f10eda766127 (patch)
treeb59737c56a407c9f82166576209ef8e8cd09ecc6
parent8f8e2aec9944dd12671182a1a26b8e1a35872a1d (diff)
parentdd71a17b1193dd4a4c35ecd0ba227aac3d110836 (diff)
Merge branch 'x86/urgent' into x86/platform, to queue up dependent patch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt5
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/filesystems/efivarfs.txt7
-rw-r--r--Documentation/timers/hpet.txt4
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/crypto/aes-ce-glue.c4
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/Makefile4
-rw-r--r--arch/arm64/boot/install.sh14
-rw-r--r--arch/arm64/crypto/aes-glue.c4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/kernel/debug-monitors.c48
-rw-r--r--arch/arm64/kernel/image.h1
-rw-r--r--arch/arm64/kernel/stacktrace.c17
-rw-r--r--arch/arm64/kernel/traps.c11
-rw-r--r--arch/arm64/kvm/hyp-init.S12
-rw-r--r--arch/arm64/lib/strnlen.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c9
-rw-r--r--arch/m68k/configs/amiga_defconfig9
-rw-r--r--arch/m68k/configs/apollo_defconfig9
-rw-r--r--arch/m68k/configs/atari_defconfig9
-rw-r--r--arch/m68k/configs/bvme6000_defconfig9
-rw-r--r--arch/m68k/configs/hp300_defconfig9
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig9
-rw-r--r--arch/m68k/configs/mvme147_defconfig9
-rw-r--r--arch/m68k/configs/mvme16x_defconfig9
-rw-r--r--arch/m68k/configs/q40_defconfig9
-rw-r--r--arch/m68k/configs/sun3_defconfig9
-rw-r--r--arch/m68k/configs/sun3x_defconfig9
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h1
-rw-r--r--arch/powerpc/include/asm/trace.h8
-rw-r--r--arch/powerpc/kernel/eeh_driver.c3
-rw-r--r--arch/powerpc/kernel/eeh_pe.c2
-rw-r--r--arch/powerpc/kernel/module_64.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c32
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c26
-rw-r--r--arch/powerpc/platforms/powernv/pci.h1
-rw-r--r--arch/s390/include/asm/livepatch.h2
-rw-r--r--arch/s390/kernel/perf_event.c8
-rw-r--r--arch/s390/kernel/stacktrace.c47
-rw-r--r--arch/s390/kernel/trace.c3
-rw-r--r--arch/s390/mm/maccess.c12
-rw-r--r--arch/s390/oprofile/backtrace.c8
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/livepatch.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c2
-rw-r--r--arch/x86/lib/copy_user_64.S142
-rw-r--r--arch/x86/mm/fault.c15
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/platform/intel-quark/imr.c4
-rw-r--r--block/bio.c9
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--drivers/block/floppy.c67
-rw-r--r--drivers/block/null_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c74
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c26
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c26
-rw-r--r--drivers/clk/tegra/clk-emc.c6
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-pll.c50
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c5
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c6
-rw-r--r--drivers/clk/tegra/clk-tegra210.c132
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/dma/dw/core.c15
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/edma.c41
-rw-r--r--drivers/dma/ioat/dma.c34
-rw-r--r--drivers/firmware/efi/efivars.c35
-rw-r--r--drivers/firmware/efi/vars.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c44
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c49
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c37
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c65
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/lightnvm/core.c25
-rw-r--r--drivers/lightnvm/rrpc.c4
-rw-r--r--drivers/lightnvm/rrpc.h5
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/lightnvm.c12
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/of/irq.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c48
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c1
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-imx.c8
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/efivarfs/file.c70
-rw-r--r--fs/efivarfs/inode.c30
-rw-r--r--fs/efivarfs/internal.h3
-rw-r--r--fs/efivarfs/super.c16
-rw-r--r--fs/ext4/balloc.c7
-rw-r--r--fs/ext4/crypto.c56
-rw-r--r--fs/ext4/dir.c13
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c9
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/inode.c72
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/move_extent.c15
-rw-r--r--fs/ext4/namei.c26
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/fs-writeback.c15
-rw-r--r--fs/inode.c6
-rw-r--r--fs/notify/mark.c53
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/drm/drm_crtc.h8
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/fsnotify_backend.h5
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/lightnvm.h4
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/ucs2_string.h4
-rw-r--r--ipc/shm.c53
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/memremap.c2
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/resource.c5
-rw-r--r--kernel/trace/ftrace.c36
-rw-r--r--lib/ucs2_string.c62
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/slab.c12
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c1
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c38
-rw-r--r--sound/core/pcm_native.c16
-rw-r--r--sound/core/seq/seq_memory.c13
-rw-r--r--sound/core/seq/seq_ports.c13
-rw-r--r--sound/pci/hda/hda_intel.c4
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh19
-rw-r--r--tools/testing/selftests/efivarfs/open-unlink.c72
-rw-r--r--virt/kvm/arm/arch_timer.c9
221 files changed, 2124 insertions, 919 deletions
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
index ace05992a262..20df350b9ef3 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
@@ -30,7 +30,7 @@ that they are defined using standard clock bindings with following
30clock-output-names: 30clock-output-names:
31 - "xin24m" - crystal input - required, 31 - "xin24m" - crystal input - required,
32 - "ext_i2s" - external I2S clock - optional, 32 - "ext_i2s" - external I2S clock - optional,
33 - "ext_gmac" - external GMAC clock - optional 33 - "rmii_clkin" - external EMAC clock - optional
34 34
35Example: Clock controller node: 35Example: Clock controller node:
36 36
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 7803e77d85cb..007a5b46256a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -24,9 +24,8 @@ Main node required properties:
24 1 = edge triggered 24 1 = edge triggered
25 4 = level triggered 25 4 = level triggered
26 26
27 Cells 4 and beyond are reserved for future use. When the 1st cell 27 Cells 4 and beyond are reserved for future use and must have a value
28 has a value of 0 or 1, cells 4 and beyond act as padding, and may be 28 of 0 if present.
29 ignored. It is recommended that padding cells have a value of 0.
30 29
31- reg : Specifies base physical address(s) and size of the GIC 30- reg : Specifies base physical address(s) and size of the GIC
32 registers, in the following order: 31 registers, in the following order:
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index ac2fcd6ff4b8..1068ffce9f91 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -14,6 +14,10 @@ Required properties:
14 interrupt number is the rtc alarm interrupt and second interrupt number 14 interrupt number is the rtc alarm interrupt and second interrupt number
15 is the rtc tick interrupt. The number of cells representing a interrupt 15 is the rtc tick interrupt. The number of cells representing a interrupt
16 depends on the parent interrupt controller. 16 depends on the parent interrupt controller.
17- clocks: Must contain a list of phandle and clock specifier for the rtc
18 and source clocks.
19- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
20 same order as the clocks property.
17 21
18Example: 22Example:
19 23
@@ -21,4 +25,6 @@ Example:
21 compatible = "samsung,s3c6410-rtc"; 25 compatible = "samsung,s3c6410-rtc";
22 reg = <0x10070000 0x100>; 26 reg = <0x10070000 0x100>;
23 interrupts = <44 0 45 0>; 27 interrupts = <44 0 45 0>;
28 clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
29 clock-names = "rtc", "rtc_src";
24 }; 30 };
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 35ae1fb3537f..ed94c217c98d 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,7 @@ Optional properties:
9- fsl,uart-has-rtscts : Indicate the uart has rts and cts 9- fsl,uart-has-rtscts : Indicate the uart has rts and cts
10- fsl,irda-mode : Indicate the uart supports irda mode 10- fsl,irda-mode : Indicate the uart supports irda mode
11- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works 11- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
12 is DCE mode by default. 12 in DCE mode by default.
13 13
14Note: Each uart controller should have an alias correctly numbered 14Note: Each uart controller should have an alias correctly numbered
15in "aliases" node. 15in "aliases" node.
diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
index c477af086e65..686a64bba775 100644
--- a/Documentation/filesystems/efivarfs.txt
+++ b/Documentation/filesystems/efivarfs.txt
@@ -14,3 +14,10 @@ filesystem.
14efivarfs is typically mounted like this, 14efivarfs is typically mounted like this,
15 15
16 mount -t efivarfs none /sys/firmware/efi/efivars 16 mount -t efivarfs none /sys/firmware/efi/efivars
17
18Due to the presence of numerous firmware bugs where removing non-standard
19UEFI variables causes the system firmware to fail to POST, efivarfs
20files that are not well-known standardized variables are created
21as immutable files. This doesn't prevent removal - "chattr -i" will work -
22but it does prevent this kind of failure from being accomplished
23accidentally.
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index 767392ffd31e..a484d2c109d7 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -1,9 +1,7 @@
1 High Precision Event Timer Driver for Linux 1 High Precision Event Timer Driver for Linux
2 2
3The High Precision Event Timer (HPET) hardware follows a specification 3The High Precision Event Timer (HPET) hardware follows a specification
4by Intel and Microsoft which can be found at 4by Intel and Microsoft, revision 1.
5
6 http://www.intel.com/hardwaredesign/hpetspec_1.pdf
7 5
8Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision") 6Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
9and up to 32 comparators. Normally three or more comparators are provided, 7and up to 32 comparators. Normally three or more comparators are provided,
diff --git a/MAINTAINERS b/MAINTAINERS
index 28eb61bbecf4..4978dc19a4d2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6128,7 +6128,7 @@ F: include/uapi/linux/sunrpc/
6128 6128
6129KERNEL SELFTEST FRAMEWORK 6129KERNEL SELFTEST FRAMEWORK
6130M: Shuah Khan <shuahkh@osg.samsung.com> 6130M: Shuah Khan <shuahkh@osg.samsung.com>
6131L: linux-api@vger.kernel.org 6131L: linux-kselftest@vger.kernel.org
6132T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest 6132T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
6133S: Maintained 6133S: Maintained
6134F: tools/testing/selftests 6134F: tools/testing/selftests
diff --git a/Makefile b/Makefile
index 701c36056ca8..fbe1b921798f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 5 2PATCHLEVEL = 5
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b445a5d56f43..89a3a3e592d6 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -364,7 +364,7 @@ static struct crypto_alg aes_algs[] = { {
364 .cra_blkcipher = { 364 .cra_blkcipher = {
365 .min_keysize = AES_MIN_KEY_SIZE, 365 .min_keysize = AES_MIN_KEY_SIZE,
366 .max_keysize = AES_MAX_KEY_SIZE, 366 .max_keysize = AES_MAX_KEY_SIZE,
367 .ivsize = AES_BLOCK_SIZE, 367 .ivsize = 0,
368 .setkey = ce_aes_setkey, 368 .setkey = ce_aes_setkey,
369 .encrypt = ecb_encrypt, 369 .encrypt = ecb_encrypt,
370 .decrypt = ecb_decrypt, 370 .decrypt = ecb_decrypt,
@@ -441,7 +441,7 @@ static struct crypto_alg aes_algs[] = { {
441 .cra_ablkcipher = { 441 .cra_ablkcipher = {
442 .min_keysize = AES_MIN_KEY_SIZE, 442 .min_keysize = AES_MIN_KEY_SIZE,
443 .max_keysize = AES_MAX_KEY_SIZE, 443 .max_keysize = AES_MAX_KEY_SIZE,
444 .ivsize = AES_BLOCK_SIZE, 444 .ivsize = 0,
445 .setkey = ablk_set_key, 445 .setkey = ablk_set_key,
446 .encrypt = ablk_encrypt, 446 .encrypt = ablk_encrypt,
447 .decrypt = ablk_decrypt, 447 .decrypt = ablk_decrypt,
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 307237cfe728..b5e3f6d42b88 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -88,7 +88,7 @@ Image: vmlinux
88Image.%: vmlinux 88Image.%: vmlinux
89 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 89 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
90 90
91zinstall install: vmlinux 91zinstall install:
92 $(Q)$(MAKE) $(build)=$(boot) $@ 92 $(Q)$(MAKE) $(build)=$(boot) $@
93 93
94%.dtb: scripts 94%.dtb: scripts
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abcbba2f01ba..305c552b5ec1 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -34,10 +34,10 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
34$(obj)/Image.lzo: $(obj)/Image FORCE 34$(obj)/Image.lzo: $(obj)/Image FORCE
35 $(call if_changed,lzo) 35 $(call if_changed,lzo)
36 36
37install: $(obj)/Image 37install:
38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
39 $(obj)/Image System.map "$(INSTALL_PATH)" 39 $(obj)/Image System.map "$(INSTALL_PATH)"
40 40
41zinstall: $(obj)/Image.gz 41zinstall:
42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
43 $(obj)/Image.gz System.map "$(INSTALL_PATH)" 43 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 12ed78aa6f0c..d91e1f022573 100644
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 05d9e16c0dfd..7a3d22a46faf 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -294,7 +294,7 @@ static struct crypto_alg aes_algs[] = { {
294 .cra_blkcipher = { 294 .cra_blkcipher = {
295 .min_keysize = AES_MIN_KEY_SIZE, 295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE, 296 .max_keysize = AES_MAX_KEY_SIZE,
297 .ivsize = AES_BLOCK_SIZE, 297 .ivsize = 0,
298 .setkey = aes_setkey, 298 .setkey = aes_setkey,
299 .encrypt = ecb_encrypt, 299 .encrypt = ecb_encrypt,
300 .decrypt = ecb_decrypt, 300 .decrypt = ecb_decrypt,
@@ -371,7 +371,7 @@ static struct crypto_alg aes_algs[] = { {
371 .cra_ablkcipher = { 371 .cra_ablkcipher = {
372 .min_keysize = AES_MIN_KEY_SIZE, 372 .min_keysize = AES_MIN_KEY_SIZE,
373 .max_keysize = AES_MAX_KEY_SIZE, 373 .max_keysize = AES_MAX_KEY_SIZE,
374 .ivsize = AES_BLOCK_SIZE, 374 .ivsize = 0,
375 .setkey = ablk_set_key, 375 .setkey = ablk_set_key,
376 .encrypt = ablk_encrypt, 376 .encrypt = ablk_encrypt,
377 .decrypt = ablk_decrypt, 377 .decrypt = ablk_decrypt,
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index bef6e9243c63..d201d4b396d1 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,8 +107,6 @@
107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ 107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) 108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
109 109
110#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
111
112/* VTCR_EL2 Registers bits */ 110/* VTCR_EL2 Registers bits */
113#define VTCR_EL2_RES1 (1 << 31) 111#define VTCR_EL2_RES1 (1 << 31)
114#define VTCR_EL2_PS_MASK (7 << 16) 112#define VTCR_EL2_PS_MASK (7 << 16)
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 8aee3aeec3e6..c536c9e307b9 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,11 +226,28 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
226 return retval; 226 return retval;
227} 227}
228 228
229static void send_user_sigtrap(int si_code)
230{
231 struct pt_regs *regs = current_pt_regs();
232 siginfo_t info = {
233 .si_signo = SIGTRAP,
234 .si_errno = 0,
235 .si_code = si_code,
236 .si_addr = (void __user *)instruction_pointer(regs),
237 };
238
239 if (WARN_ON(!user_mode(regs)))
240 return;
241
242 if (interrupts_enabled(regs))
243 local_irq_enable();
244
245 force_sig_info(SIGTRAP, &info, current);
246}
247
229static int single_step_handler(unsigned long addr, unsigned int esr, 248static int single_step_handler(unsigned long addr, unsigned int esr,
230 struct pt_regs *regs) 249 struct pt_regs *regs)
231{ 250{
232 siginfo_t info;
233
234 /* 251 /*
235 * If we are stepping a pending breakpoint, call the hw_breakpoint 252 * If we are stepping a pending breakpoint, call the hw_breakpoint
236 * handler first. 253 * handler first.
@@ -239,11 +256,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
239 return 0; 256 return 0;
240 257
241 if (user_mode(regs)) { 258 if (user_mode(regs)) {
242 info.si_signo = SIGTRAP; 259 send_user_sigtrap(TRAP_HWBKPT);
243 info.si_errno = 0;
244 info.si_code = TRAP_HWBKPT;
245 info.si_addr = (void __user *)instruction_pointer(regs);
246 force_sig_info(SIGTRAP, &info, current);
247 260
248 /* 261 /*
249 * ptrace will disable single step unless explicitly 262 * ptrace will disable single step unless explicitly
@@ -307,17 +320,8 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
307static int brk_handler(unsigned long addr, unsigned int esr, 320static int brk_handler(unsigned long addr, unsigned int esr,
308 struct pt_regs *regs) 321 struct pt_regs *regs)
309{ 322{
310 siginfo_t info;
311
312 if (user_mode(regs)) { 323 if (user_mode(regs)) {
313 info = (siginfo_t) { 324 send_user_sigtrap(TRAP_BRKPT);
314 .si_signo = SIGTRAP,
315 .si_errno = 0,
316 .si_code = TRAP_BRKPT,
317 .si_addr = (void __user *)instruction_pointer(regs),
318 };
319
320 force_sig_info(SIGTRAP, &info, current);
321 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { 325 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
322 pr_warning("Unexpected kernel BRK exception at EL1\n"); 326 pr_warning("Unexpected kernel BRK exception at EL1\n");
323 return -EFAULT; 327 return -EFAULT;
@@ -328,7 +332,6 @@ static int brk_handler(unsigned long addr, unsigned int esr,
328 332
329int aarch32_break_handler(struct pt_regs *regs) 333int aarch32_break_handler(struct pt_regs *regs)
330{ 334{
331 siginfo_t info;
332 u32 arm_instr; 335 u32 arm_instr;
333 u16 thumb_instr; 336 u16 thumb_instr;
334 bool bp = false; 337 bool bp = false;
@@ -359,14 +362,7 @@ int aarch32_break_handler(struct pt_regs *regs)
359 if (!bp) 362 if (!bp)
360 return -EFAULT; 363 return -EFAULT;
361 364
362 info = (siginfo_t) { 365 send_user_sigtrap(TRAP_BRKPT);
363 .si_signo = SIGTRAP,
364 .si_errno = 0,
365 .si_code = TRAP_BRKPT,
366 .si_addr = pc,
367 };
368
369 force_sig_info(SIGTRAP, &info, current);
370 return 0; 366 return 0;
371} 367}
372 368
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 999633bd7294..352f7abd91c9 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -89,6 +89,7 @@ __efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
89__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); 89__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
90__efistub_memset = KALLSYMS_HIDE(__pi_memset); 90__efistub_memset = KALLSYMS_HIDE(__pi_memset);
91__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); 91__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
92__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
92__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); 93__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
93__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); 94__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
94__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); 95__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4fad9787ab46..d9751a4769e7 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -44,14 +44,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
44 unsigned long irq_stack_ptr; 44 unsigned long irq_stack_ptr;
45 45
46 /* 46 /*
47 * Use raw_smp_processor_id() to avoid false-positives from 47 * Switching between stacks is valid when tracing current and in
48 * CONFIG_DEBUG_PREEMPT. get_wchan() calls unwind_frame() on sleeping 48 * non-preemptible context.
49 * task stacks, we can be pre-empted in this case, so
50 * {raw_,}smp_processor_id() may give us the wrong value. Sleeping
51 * tasks can't ever be on an interrupt stack, so regardless of cpu,
52 * the checks will always fail.
53 */ 49 */
54 irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); 50 if (tsk == current && !preemptible())
51 irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
52 else
53 irq_stack_ptr = 0;
55 54
56 low = frame->sp; 55 low = frame->sp;
57 /* irq stacks are not THREAD_SIZE aligned */ 56 /* irq stacks are not THREAD_SIZE aligned */
@@ -64,8 +63,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
64 return -EINVAL; 63 return -EINVAL;
65 64
66 frame->sp = fp + 0x10; 65 frame->sp = fp + 0x10;
67 frame->fp = *(unsigned long *)(fp); 66 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
68 frame->pc = *(unsigned long *)(fp + 8); 67 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
69 68
70#ifdef CONFIG_FUNCTION_GRAPH_TRACER 69#ifdef CONFIG_FUNCTION_GRAPH_TRACER
71 if (tsk && tsk->ret_stack && 70 if (tsk && tsk->ret_stack &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cbedd724f48e..c5392081b49b 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -146,9 +146,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
147{ 147{
148 struct stackframe frame; 148 struct stackframe frame;
149 unsigned long irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); 149 unsigned long irq_stack_ptr;
150 int skip; 150 int skip;
151 151
152 /*
153 * Switching between stacks is valid when tracing current and in
154 * non-preemptible context.
155 */
156 if (tsk == current && !preemptible())
157 irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
158 else
159 irq_stack_ptr = 0;
160
152 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 161 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
153 162
154 if (!tsk) 163 if (!tsk)
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3e568dcd907b..d073b5a216f7 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -64,7 +64,7 @@ __do_hyp_init:
64 mrs x4, tcr_el1 64 mrs x4, tcr_el1
65 ldr x5, =TCR_EL2_MASK 65 ldr x5, =TCR_EL2_MASK
66 and x4, x4, x5 66 and x4, x4, x5
67 ldr x5, =TCR_EL2_FLAGS 67 mov x5, #TCR_EL2_RES1
68 orr x4, x4, x5 68 orr x4, x4, x5
69 69
70#ifndef CONFIG_ARM64_VA_BITS_48 70#ifndef CONFIG_ARM64_VA_BITS_48
@@ -85,15 +85,17 @@ __do_hyp_init:
85 ldr_l x5, idmap_t0sz 85 ldr_l x5, idmap_t0sz
86 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 86 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
87#endif 87#endif
88 msr tcr_el2, x4
89
90 ldr x4, =VTCR_EL2_FLAGS
91 /* 88 /*
92 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in 89 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
93 * VTCR_EL2. 90 * TCR_EL2 and VTCR_EL2.
94 */ 91 */
95 mrs x5, ID_AA64MMFR0_EL1 92 mrs x5, ID_AA64MMFR0_EL1
96 bfi x4, x5, #16, #3 93 bfi x4, x5, #16, #3
94
95 msr tcr_el2, x4
96
97 ldr x4, =VTCR_EL2_FLAGS
98 bfi x4, x5, #16, #3
97 /* 99 /*
98 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in 100 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
99 * VTCR_EL2. 101 * VTCR_EL2.
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index 2ca665711bf2..eae38da6e0bb 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -168,4 +168,4 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
168.Lhit_limit: 168.Lhit_limit:
169 mov len, limit 169 mov len, limit
170 ret 170 ret
171ENDPROC(strnlen) 171ENDPIPROC(strnlen)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 331c4ca6205c..a6e757cbab77 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void)
933 ret = register_iommu_dma_ops_notifier(&platform_bus_type); 933 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
934 if (!ret) 934 if (!ret)
935 ret = register_iommu_dma_ops_notifier(&amba_bustype); 935 ret = register_iommu_dma_ops_notifier(&amba_bustype);
936
937 /* handle devices queued before this arch_initcall */
938 if (!ret)
939 __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
936 return ret; 940 return ret;
937} 941}
938arch_initcall(__iommu_dma_init); 942arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 92ddac1e8ca2..abe2a9542b3a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,6 +371,13 @@ static int __kprobes do_translation_fault(unsigned long addr,
371 return 0; 371 return 0;
372} 372}
373 373
374static int do_alignment_fault(unsigned long addr, unsigned int esr,
375 struct pt_regs *regs)
376{
377 do_bad_area(addr, esr, regs);
378 return 0;
379}
380
374/* 381/*
375 * This abort handler always returns "fault". 382 * This abort handler always returns "fault".
376 */ 383 */
@@ -418,7 +425,7 @@ static struct fault_info {
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, 425 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, 426 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
420 { do_bad, SIGBUS, 0, "unknown 32" }, 427 { do_bad, SIGBUS, 0, "unknown 32" },
421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, 428 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
422 { do_bad, SIGBUS, 0, "unknown 34" }, 429 { do_bad, SIGBUS, 0, "unknown 34" },
423 { do_bad, SIGBUS, 0, "unknown 35" }, 430 { do_bad, SIGBUS, 0, "unknown 35" },
424 { do_bad, SIGBUS, 0, "unknown 36" }, 431 { do_bad, SIGBUS, 0, "unknown 36" },
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index fc96e814188e..d1fc4796025e 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -108,6 +108,8 @@ CONFIG_NFT_NAT=m
108CONFIG_NFT_QUEUE=m 108CONFIG_NFT_QUEUE=m
109CONFIG_NFT_REJECT=m 109CONFIG_NFT_REJECT=m
110CONFIG_NFT_COMPAT=m 110CONFIG_NFT_COMPAT=m
111CONFIG_NFT_DUP_NETDEV=m
112CONFIG_NFT_FWD_NETDEV=m
111CONFIG_NETFILTER_XT_SET=m 113CONFIG_NETFILTER_XT_SET=m
112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 114CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 115CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_L2TP=m
266CONFIG_BRIDGE=m 268CONFIG_BRIDGE=m
267CONFIG_ATALK=m 269CONFIG_ATALK=m
268CONFIG_6LOWPAN=m 270CONFIG_6LOWPAN=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
272CONFIG_6LOWPAN_GHC_UDP=m
273CONFIG_6LOWPAN_GHC_ICMPV6=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
275CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
276CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
269CONFIG_DNS_RESOLVER=y 277CONFIG_DNS_RESOLVER=y
270CONFIG_BATMAN_ADV=m 278CONFIG_BATMAN_ADV=m
271CONFIG_BATMAN_ADV_DAT=y 279CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@ CONFIG_ARIADNE=y
366# CONFIG_NET_VENDOR_INTEL is not set 374# CONFIG_NET_VENDOR_INTEL is not set
367# CONFIG_NET_VENDOR_MARVELL is not set 375# CONFIG_NET_VENDOR_MARVELL is not set
368# CONFIG_NET_VENDOR_MICREL is not set 376# CONFIG_NET_VENDOR_MICREL is not set
377# CONFIG_NET_VENDOR_NETRONOME is not set
369CONFIG_HYDRA=y 378CONFIG_HYDRA=y
370CONFIG_APNE=y 379CONFIG_APNE=y
371CONFIG_ZORRO8390=y 380CONFIG_ZORRO8390=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 05c904f08d9d..9bfe8be3658c 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@ CONFIG_VETH=m
344# CONFIG_NET_VENDOR_MARVELL is not set 352# CONFIG_NET_VENDOR_MARVELL is not set
345# CONFIG_NET_VENDOR_MICREL is not set 353# CONFIG_NET_VENDOR_MICREL is not set
346# CONFIG_NET_VENDOR_NATSEMI is not set 354# CONFIG_NET_VENDOR_NATSEMI is not set
355# CONFIG_NET_VENDOR_NETRONOME is not set
347# CONFIG_NET_VENDOR_QUALCOMM is not set 356# CONFIG_NET_VENDOR_QUALCOMM is not set
348# CONFIG_NET_VENDOR_RENESAS is not set 357# CONFIG_NET_VENDOR_RENESAS is not set
349# CONFIG_NET_VENDOR_ROCKER is not set 358# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index d572b731c510..ebdcfae55580 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@ CONFIG_ATARILANCE=y
353# CONFIG_NET_VENDOR_INTEL is not set 361# CONFIG_NET_VENDOR_INTEL is not set
354# CONFIG_NET_VENDOR_MARVELL is not set 362# CONFIG_NET_VENDOR_MARVELL is not set
355# CONFIG_NET_VENDOR_MICREL is not set 363# CONFIG_NET_VENDOR_MICREL is not set
364# CONFIG_NET_VENDOR_NETRONOME is not set
356CONFIG_NE2000=y 365CONFIG_NE2000=y
357# CONFIG_NET_VENDOR_QUALCOMM is not set 366# CONFIG_NET_VENDOR_QUALCOMM is not set
358# CONFIG_NET_VENDOR_RENESAS is not set 367# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 11a30c65ad44..8acc65e54995 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_BVME6000_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6630a5154b9d..0c6a3d52b26e 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@ CONFIG_HPLANCE=y
345# CONFIG_NET_VENDOR_MARVELL is not set 353# CONFIG_NET_VENDOR_MARVELL is not set
346# CONFIG_NET_VENDOR_MICREL is not set 354# CONFIG_NET_VENDOR_MICREL is not set
347# CONFIG_NET_VENDOR_NATSEMI is not set 355# CONFIG_NET_VENDOR_NATSEMI is not set
356# CONFIG_NET_VENDOR_NETRONOME is not set
348# CONFIG_NET_VENDOR_QUALCOMM is not set 357# CONFIG_NET_VENDOR_QUALCOMM is not set
349# CONFIG_NET_VENDOR_RENESAS is not set 358# CONFIG_NET_VENDOR_RENESAS is not set
350# CONFIG_NET_VENDOR_ROCKER is not set 359# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 1d90b71d0903..12a8a6cb32f4 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -105,6 +105,8 @@ CONFIG_NFT_NAT=m
105CONFIG_NFT_QUEUE=m 105CONFIG_NFT_QUEUE=m
106CONFIG_NFT_REJECT=m 106CONFIG_NFT_REJECT=m
107CONFIG_NFT_COMPAT=m 107CONFIG_NFT_COMPAT=m
108CONFIG_NFT_DUP_NETDEV=m
109CONFIG_NFT_FWD_NETDEV=m
108CONFIG_NETFILTER_XT_SET=m 110CONFIG_NETFILTER_XT_SET=m
109CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 111CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
110CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 112CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_DEV_APPLETALK=m
266CONFIG_IPDDP=m 268CONFIG_IPDDP=m
267CONFIG_IPDDP_ENCAP=y 269CONFIG_IPDDP_ENCAP=y
268CONFIG_6LOWPAN=m 270CONFIG_6LOWPAN=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
272CONFIG_6LOWPAN_GHC_UDP=m
273CONFIG_6LOWPAN_GHC_ICMPV6=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
275CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
276CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
269CONFIG_DNS_RESOLVER=y 277CONFIG_DNS_RESOLVER=y
270CONFIG_BATMAN_ADV=m 278CONFIG_BATMAN_ADV=m
271CONFIG_BATMAN_ADV_DAT=y 279CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@ CONFIG_MAC89x0=y
362# CONFIG_NET_VENDOR_MARVELL is not set 370# CONFIG_NET_VENDOR_MARVELL is not set
363# CONFIG_NET_VENDOR_MICREL is not set 371# CONFIG_NET_VENDOR_MICREL is not set
364CONFIG_MACSONIC=y 372CONFIG_MACSONIC=y
373# CONFIG_NET_VENDOR_NETRONOME is not set
365CONFIG_MAC8390=y 374CONFIG_MAC8390=y
366# CONFIG_NET_VENDOR_QUALCOMM is not set 375# CONFIG_NET_VENDOR_QUALCOMM is not set
367# CONFIG_NET_VENDOR_RENESAS is not set 376# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 1fd21c1ca87f..64ff2dcb34c8 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -115,6 +115,8 @@ CONFIG_NFT_NAT=m
115CONFIG_NFT_QUEUE=m 115CONFIG_NFT_QUEUE=m
116CONFIG_NFT_REJECT=m 116CONFIG_NFT_REJECT=m
117CONFIG_NFT_COMPAT=m 117CONFIG_NFT_COMPAT=m
118CONFIG_NFT_DUP_NETDEV=m
119CONFIG_NFT_FWD_NETDEV=m
118CONFIG_NETFILTER_XT_SET=m 120CONFIG_NETFILTER_XT_SET=m
119CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 121CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
120CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 122CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@ CONFIG_DEV_APPLETALK=m
276CONFIG_IPDDP=m 278CONFIG_IPDDP=m
277CONFIG_IPDDP_ENCAP=y 279CONFIG_IPDDP_ENCAP=y
278CONFIG_6LOWPAN=m 280CONFIG_6LOWPAN=m
281CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
282CONFIG_6LOWPAN_GHC_UDP=m
283CONFIG_6LOWPAN_GHC_ICMPV6=m
284CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
285CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
286CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
279CONFIG_DNS_RESOLVER=y 287CONFIG_DNS_RESOLVER=y
280CONFIG_BATMAN_ADV=m 288CONFIG_BATMAN_ADV=m
281CONFIG_BATMAN_ADV_DAT=y 289CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@ CONFIG_MVME16x_NET=y
404# CONFIG_NET_VENDOR_MARVELL is not set 412# CONFIG_NET_VENDOR_MARVELL is not set
405# CONFIG_NET_VENDOR_MICREL is not set 413# CONFIG_NET_VENDOR_MICREL is not set
406CONFIG_MACSONIC=y 414CONFIG_MACSONIC=y
415# CONFIG_NET_VENDOR_NETRONOME is not set
407CONFIG_HYDRA=y 416CONFIG_HYDRA=y
408CONFIG_MAC8390=y 417CONFIG_MAC8390=y
409CONFIG_NE2000=y 418CONFIG_NE2000=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 74e10f79d7b1..07fc6abcfe0c 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -103,6 +103,8 @@ CONFIG_NFT_NAT=m
103CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
104CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
105CONFIG_NFT_COMPAT=m 105CONFIG_NFT_COMPAT=m
106CONFIG_NFT_DUP_NETDEV=m
107CONFIG_NFT_FWD_NETDEV=m
106CONFIG_NETFILTER_XT_SET=m 108CONFIG_NETFILTER_XT_SET=m
107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 109CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 110CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@ CONFIG_L2TP=m
261CONFIG_BRIDGE=m 263CONFIG_BRIDGE=m
262CONFIG_ATALK=m 264CONFIG_ATALK=m
263CONFIG_6LOWPAN=m 265CONFIG_6LOWPAN=m
266CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
267CONFIG_6LOWPAN_GHC_UDP=m
268CONFIG_6LOWPAN_GHC_ICMPV6=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
264CONFIG_DNS_RESOLVER=y 272CONFIG_DNS_RESOLVER=y
265CONFIG_BATMAN_ADV=m 273CONFIG_BATMAN_ADV=m
266CONFIG_BATMAN_ADV_DAT=y 274CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME147_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7034e716f166..69903ded88f7 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME16x_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index f7deb5f702a6..bd8401686dde 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@ CONFIG_VETH=m
352# CONFIG_NET_VENDOR_INTEL is not set 360# CONFIG_NET_VENDOR_INTEL is not set
353# CONFIG_NET_VENDOR_MARVELL is not set 361# CONFIG_NET_VENDOR_MARVELL is not set
354# CONFIG_NET_VENDOR_MICREL is not set 362# CONFIG_NET_VENDOR_MICREL is not set
363# CONFIG_NET_VENDOR_NETRONOME is not set
355CONFIG_NE2000=y 364CONFIG_NE2000=y
356# CONFIG_NET_VENDOR_QUALCOMM is not set 365# CONFIG_NET_VENDOR_QUALCOMM is not set
357# CONFIG_NET_VENDOR_RENESAS is not set 366# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 0ce79eb0d805..5f9fb3ab9636 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
101CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
102CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
103CONFIG_NFT_COMPAT=m 103CONFIG_NFT_COMPAT=m
104CONFIG_NFT_DUP_NETDEV=m
105CONFIG_NFT_FWD_NETDEV=m
104CONFIG_NETFILTER_XT_SET=m 106CONFIG_NETFILTER_XT_SET=m
105CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
106CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
259CONFIG_BRIDGE=m 261CONFIG_BRIDGE=m
260CONFIG_ATALK=m 262CONFIG_ATALK=m
261CONFIG_6LOWPAN=m 263CONFIG_6LOWPAN=m
264CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
265CONFIG_6LOWPAN_GHC_UDP=m
266CONFIG_6LOWPAN_GHC_ICMPV6=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
268CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
262CONFIG_DNS_RESOLVER=y 270CONFIG_DNS_RESOLVER=y
263CONFIG_BATMAN_ADV=m 271CONFIG_BATMAN_ADV=m
264CONFIG_BATMAN_ADV_DAT=y 272CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@ CONFIG_SUN3_82586=y
340# CONFIG_NET_VENDOR_MARVELL is not set 348# CONFIG_NET_VENDOR_MARVELL is not set
341# CONFIG_NET_VENDOR_MICREL is not set 349# CONFIG_NET_VENDOR_MICREL is not set
342# CONFIG_NET_VENDOR_NATSEMI is not set 350# CONFIG_NET_VENDOR_NATSEMI is not set
351# CONFIG_NET_VENDOR_NETRONOME is not set
343# CONFIG_NET_VENDOR_QUALCOMM is not set 352# CONFIG_NET_VENDOR_QUALCOMM is not set
344# CONFIG_NET_VENDOR_RENESAS is not set 353# CONFIG_NET_VENDOR_RENESAS is not set
345# CONFIG_NET_VENDOR_ROCKER is not set 354# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 4cb787e4991f..5d1c674530e2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
101CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
102CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
103CONFIG_NFT_COMPAT=m 103CONFIG_NFT_COMPAT=m
104CONFIG_NFT_DUP_NETDEV=m
105CONFIG_NFT_FWD_NETDEV=m
104CONFIG_NETFILTER_XT_SET=m 106CONFIG_NETFILTER_XT_SET=m
105CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
106CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
259CONFIG_BRIDGE=m 261CONFIG_BRIDGE=m
260CONFIG_ATALK=m 262CONFIG_ATALK=m
261CONFIG_6LOWPAN=m 263CONFIG_6LOWPAN=m
264CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
265CONFIG_6LOWPAN_GHC_UDP=m
266CONFIG_6LOWPAN_GHC_ICMPV6=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
268CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
262CONFIG_DNS_RESOLVER=y 270CONFIG_DNS_RESOLVER=y
263CONFIG_BATMAN_ADV=m 271CONFIG_BATMAN_ADV=m
264CONFIG_BATMAN_ADV_DAT=y 272CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@ CONFIG_SUN3LANCE=y
341# CONFIG_NET_VENDOR_MARVELL is not set 349# CONFIG_NET_VENDOR_MARVELL is not set
342# CONFIG_NET_VENDOR_MICREL is not set 350# CONFIG_NET_VENDOR_MICREL is not set
343# CONFIG_NET_VENDOR_NATSEMI is not set 351# CONFIG_NET_VENDOR_NATSEMI is not set
352# CONFIG_NET_VENDOR_NETRONOME is not set
344# CONFIG_NET_VENDOR_QUALCOMM is not set 353# CONFIG_NET_VENDOR_QUALCOMM is not set
345# CONFIG_NET_VENDOR_RENESAS is not set 354# CONFIG_NET_VENDOR_RENESAS is not set
346# CONFIG_NET_VENDOR_ROCKER is not set 355# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f9d96bf86910..bafaff6dcd7b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 376 7#define NR_syscalls 377
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 36cf129de663..0ca729665f29 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -381,5 +381,6 @@
381#define __NR_userfaultfd 373 381#define __NR_userfaultfd 373
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375 383#define __NR_mlock2 375
384#define __NR_copy_file_range 376
384 385
385#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 386#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 282cd903f4c4..8bb94261ff97 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -396,3 +396,4 @@ ENTRY(sys_call_table)
396 .long sys_userfaultfd 396 .long sys_userfaultfd
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */ 398 .long sys_mlock2 /* 375 */
399 .long sys_copy_file_range
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e4824fd04bb7..9faa18c4f3f7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -557,7 +557,7 @@ choice
557 557
558config PPC_4K_PAGES 558config PPC_4K_PAGES
559 bool "4k page size" 559 bool "4k page size"
560 select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S 560 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
561 561
562config PPC_16K_PAGES 562config PPC_16K_PAGES
563 bool "16k page size" 563 bool "16k page size"
@@ -566,7 +566,7 @@ config PPC_16K_PAGES
566config PPC_64K_PAGES 566config PPC_64K_PAGES
567 bool "64k page size" 567 bool "64k page size"
568 depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) 568 depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
569 select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S 569 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
570 570
571config PPC_256K_PAGES 571config PPC_256K_PAGES
572 bool "256k page size" 572 bool "256k page size"
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8d1c41d28318..ac07a30a7934 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -281,6 +281,10 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
281extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 281extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
282 pmd_t *pmdp); 282 pmd_t *pmdp);
283 283
284#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
285extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
286 unsigned long address, pmd_t *pmdp);
287
284#define pmd_move_must_withdraw pmd_move_must_withdraw 288#define pmd_move_must_withdraw pmd_move_must_withdraw
285struct spinlock; 289struct spinlock;
286static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 290static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c5eb86f3d452..867c39b45df6 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -81,6 +81,7 @@ struct pci_dn;
81#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 81#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
82#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ 82#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
83#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */ 83#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
84#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
84 85
85struct eeh_pe { 86struct eeh_pe {
86 int type; /* PE type: PHB/Bus/Device */ 87 int type; /* PE type: PHB/Bus/Device */
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 8e86b48d0369..32e36b16773f 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -57,12 +57,14 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
57extern void hcall_tracepoint_regfunc(void); 57extern void hcall_tracepoint_regfunc(void);
58extern void hcall_tracepoint_unregfunc(void); 58extern void hcall_tracepoint_unregfunc(void);
59 59
60TRACE_EVENT_FN(hcall_entry, 60TRACE_EVENT_FN_COND(hcall_entry,
61 61
62 TP_PROTO(unsigned long opcode, unsigned long *args), 62 TP_PROTO(unsigned long opcode, unsigned long *args),
63 63
64 TP_ARGS(opcode, args), 64 TP_ARGS(opcode, args),
65 65
66 TP_CONDITION(cpu_online(raw_smp_processor_id())),
67
66 TP_STRUCT__entry( 68 TP_STRUCT__entry(
67 __field(unsigned long, opcode) 69 __field(unsigned long, opcode)
68 ), 70 ),
@@ -76,13 +78,15 @@ TRACE_EVENT_FN(hcall_entry,
76 hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc 78 hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
77); 79);
78 80
79TRACE_EVENT_FN(hcall_exit, 81TRACE_EVENT_FN_COND(hcall_exit,
80 82
81 TP_PROTO(unsigned long opcode, unsigned long retval, 83 TP_PROTO(unsigned long opcode, unsigned long retval,
82 unsigned long *retbuf), 84 unsigned long *retbuf),
83 85
84 TP_ARGS(opcode, retval, retbuf), 86 TP_ARGS(opcode, retval, retbuf),
85 87
88 TP_CONDITION(cpu_online(raw_smp_processor_id())),
89
86 TP_STRUCT__entry( 90 TP_STRUCT__entry(
87 __field(unsigned long, opcode) 91 __field(unsigned long, opcode)
88 __field(unsigned long, retval) 92 __field(unsigned long, retval)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 938742135ee0..301be3126ae3 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -564,6 +564,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
564 */ 564 */
565 eeh_pe_state_mark(pe, EEH_PE_KEEP); 565 eeh_pe_state_mark(pe, EEH_PE_KEEP);
566 if (bus) { 566 if (bus) {
567 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
567 pci_lock_rescan_remove(); 568 pci_lock_rescan_remove();
568 pcibios_remove_pci_devices(bus); 569 pcibios_remove_pci_devices(bus);
569 pci_unlock_rescan_remove(); 570 pci_unlock_rescan_remove();
@@ -803,6 +804,7 @@ perm_error:
803 * the their PCI config any more. 804 * the their PCI config any more.
804 */ 805 */
805 if (frozen_bus) { 806 if (frozen_bus) {
807 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
806 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 808 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
807 809
808 pci_lock_rescan_remove(); 810 pci_lock_rescan_remove();
@@ -886,6 +888,7 @@ static void eeh_handle_special_event(void)
886 continue; 888 continue;
887 889
888 /* Notify all devices to be down */ 890 /* Notify all devices to be down */
891 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
889 bus = eeh_pe_bus_get(phb_pe); 892 bus = eeh_pe_bus_get(phb_pe);
890 eeh_pe_dev_traverse(pe, 893 eeh_pe_dev_traverse(pe,
891 eeh_report_failure, NULL); 894 eeh_report_failure, NULL);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index ca9e5371930e..98f81800e00c 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -928,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
928 bus = pe->phb->bus; 928 bus = pe->phb->bus;
929 } else if (pe->type & EEH_PE_BUS || 929 } else if (pe->type & EEH_PE_BUS ||
930 pe->type & EEH_PE_DEVICE) { 930 pe->type & EEH_PE_DEVICE) {
931 if (pe->bus) { 931 if (pe->state & EEH_PE_PRI_BUS) {
932 bus = pe->bus; 932 bus = pe->bus;
933 goto out; 933 goto out;
934 } 934 }
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ac64ffdb52c8..08b7a40de5f8 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -340,7 +340,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
340 if (name[0] == '.') { 340 if (name[0] == '.') {
341 if (strcmp(name+1, "TOC.") == 0) 341 if (strcmp(name+1, "TOC.") == 0)
342 syms[i].st_shndx = SHN_ABS; 342 syms[i].st_shndx = SHN_ABS;
343 memmove(name, name+1, strlen(name)); 343 syms[i].st_name++;
344 } 344 }
345 } 345 }
346 } 346 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20d0fab..cdf2123d46db 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -646,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
646 return pgtable; 646 return pgtable;
647} 647}
648 648
649void pmdp_huge_split_prepare(struct vm_area_struct *vma,
650 unsigned long address, pmd_t *pmdp)
651{
652 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
653 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
654
655 /*
656 * We can't mark the pmd none here, because that will cause a race
657 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
658 * we spilt, but at the same time we wan't rest of the ppc64 code
659 * not to insert hash pte on this, because we will be modifying
660 * the deposited pgtable in the caller of this function. Hence
661 * clear the _PAGE_USER so that we move the fault handling to
662 * higher level function and that will serialize against ptl.
663 * We need to flush existing hash pte entries here even though,
664 * the translation is still valid, because we will withdraw
665 * pgtable_t after this.
666 */
667 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
668}
669
670
649/* 671/*
650 * set a new huge pmd. We should not be called for updating 672 * set a new huge pmd. We should not be called for updating
651 * an existing pmd entry. That should go via pmd_hugepage_update. 673 * an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,20 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
663 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 685 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
664} 686}
665 687
688/*
689 * We use this to invalidate a pmdp entry before switching from a
690 * hugepte to regular pmd entry.
691 */
666void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 692void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
667 pmd_t *pmdp) 693 pmd_t *pmdp)
668{ 694{
669 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 695 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
696
697 /*
698 * This ensures that generic code that rely on IRQ disabling
699 * to prevent a parallel THP split work as expected.
700 */
701 kick_all_cpus_sync();
670} 702}
671 703
672/* 704/*
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5f152b95ca0c..87f47e55aab6 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
444 * PCI devices of the PE are expected to be removed prior 444 * PCI devices of the PE are expected to be removed prior
445 * to PE reset. 445 * to PE reset.
446 */ 446 */
447 if (!edev->pe->bus) 447 if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
448 edev->pe->bus = pci_find_bus(hose->global_number, 448 edev->pe->bus = pci_find_bus(hose->global_number,
449 pdn->busno); 449 pdn->busno);
450 if (edev->pe->bus)
451 edev->pe->state |= EEH_PE_PRI_BUS;
452 }
450 453
451 /* 454 /*
452 * Enable EEH explicitly so that we will do EEH check 455 * Enable EEH explicitly so that we will do EEH check
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 573ae1994097..f90dc04395bf 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3180,6 +3180,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3180 3180
3181static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { 3181static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3182 .dma_dev_setup = pnv_pci_dma_dev_setup, 3182 .dma_dev_setup = pnv_pci_dma_dev_setup,
3183 .dma_bus_setup = pnv_pci_dma_bus_setup,
3183#ifdef CONFIG_PCI_MSI 3184#ifdef CONFIG_PCI_MSI
3184 .setup_msi_irqs = pnv_setup_msi_irqs, 3185 .setup_msi_irqs = pnv_setup_msi_irqs,
3185 .teardown_msi_irqs = pnv_teardown_msi_irqs, 3186 .teardown_msi_irqs = pnv_teardown_msi_irqs,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 2f55c86df703..b1ef84a6c9d1 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -599,6 +599,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
599 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; 599 u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
600 long i; 600 long i;
601 601
602 if (proto_tce & TCE_PCI_WRITE)
603 proto_tce |= TCE_PCI_READ;
604
602 for (i = 0; i < npages; i++) { 605 for (i = 0; i < npages; i++) {
603 unsigned long newtce = proto_tce | 606 unsigned long newtce = proto_tce |
604 ((rpn + i) << tbl->it_page_shift); 607 ((rpn + i) << tbl->it_page_shift);
@@ -620,6 +623,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
620 623
621 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); 624 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
622 625
626 if (newtce & TCE_PCI_WRITE)
627 newtce |= TCE_PCI_READ;
628
623 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)); 629 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
624 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE); 630 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
625 *direction = iommu_tce_direction(oldtce); 631 *direction = iommu_tce_direction(oldtce);
@@ -760,6 +766,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
760 phb->dma_dev_setup(phb, pdev); 766 phb->dma_dev_setup(phb, pdev);
761} 767}
762 768
769void pnv_pci_dma_bus_setup(struct pci_bus *bus)
770{
771 struct pci_controller *hose = bus->sysdata;
772 struct pnv_phb *phb = hose->private_data;
773 struct pnv_ioda_pe *pe;
774
775 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
776 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
777 continue;
778
779 if (!pe->pbus)
780 continue;
781
782 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
783 pe->pbus = bus;
784 break;
785 }
786 }
787}
788
763void pnv_pci_shutdown(void) 789void pnv_pci_shutdown(void)
764{ 790{
765 struct pci_controller *hose; 791 struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7f56313e8d72..00691a9b99af 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -242,6 +242,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
242extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); 242extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
243 243
244extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev); 244extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
245extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
245extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); 246extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
246extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); 247extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
247 248
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 7aa799134a11..a52b6cca873d 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -37,7 +37,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
37 regs->psw.addr = ip; 37 regs->psw.addr = ip;
38} 38}
39#else 39#else
40#error Live patching support is disabled; check CONFIG_LIVEPATCH 40#error Include linux/livepatch.h, not asm/livepatch.h
41#endif 41#endif
42 42
43#endif 43#endif
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index cfcba2dd9bb5..0943b11a2f6e 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -260,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
260void perf_callchain_kernel(struct perf_callchain_entry *entry, 260void perf_callchain_kernel(struct perf_callchain_entry *entry,
261 struct pt_regs *regs) 261 struct pt_regs *regs)
262{ 262{
263 unsigned long head; 263 unsigned long head, frame_size;
264 struct stack_frame *head_sf; 264 struct stack_frame *head_sf;
265 265
266 if (user_mode(regs)) 266 if (user_mode(regs))
267 return; 267 return;
268 268
269 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
269 head = regs->gprs[15]; 270 head = regs->gprs[15];
270 head_sf = (struct stack_frame *) head; 271 head_sf = (struct stack_frame *) head;
271 272
@@ -273,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
273 return; 274 return;
274 275
275 head = head_sf->back_chain; 276 head = head_sf->back_chain;
276 head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, 277 head = __store_trace(entry, head,
277 S390_lowcore.async_stack); 278 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
279 S390_lowcore.async_stack + frame_size);
278 280
279 __store_trace(entry, head, S390_lowcore.thread_info, 281 __store_trace(entry, head, S390_lowcore.thread_info,
280 S390_lowcore.thread_info + THREAD_SIZE); 282 S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 5acba3cb7220..8f64ebd63767 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -59,26 +59,32 @@ static unsigned long save_context_stack(struct stack_trace *trace,
59 } 59 }
60} 60}
61 61
62void save_stack_trace(struct stack_trace *trace) 62static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
63{ 63{
64 register unsigned long sp asm ("15"); 64 unsigned long new_sp, frame_size;
65 unsigned long orig_sp, new_sp;
66 65
67 orig_sp = sp; 66 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
68 new_sp = save_context_stack(trace, orig_sp, 67 new_sp = save_context_stack(trace, sp,
69 S390_lowcore.panic_stack - PAGE_SIZE, 68 S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
70 S390_lowcore.panic_stack, 1); 69 S390_lowcore.panic_stack + frame_size, 1);
71 if (new_sp != orig_sp)
72 return;
73 new_sp = save_context_stack(trace, new_sp, 70 new_sp = save_context_stack(trace, new_sp,
74 S390_lowcore.async_stack - ASYNC_SIZE, 71 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
75 S390_lowcore.async_stack, 1); 72 S390_lowcore.async_stack + frame_size, 1);
76 if (new_sp != orig_sp)
77 return;
78 save_context_stack(trace, new_sp, 73 save_context_stack(trace, new_sp,
79 S390_lowcore.thread_info, 74 S390_lowcore.thread_info,
80 S390_lowcore.thread_info + THREAD_SIZE, 1); 75 S390_lowcore.thread_info + THREAD_SIZE, 1);
81} 76}
77
78void save_stack_trace(struct stack_trace *trace)
79{
80 register unsigned long r15 asm ("15");
81 unsigned long sp;
82
83 sp = r15;
84 __save_stack_trace(trace, sp);
85 if (trace->nr_entries < trace->max_entries)
86 trace->entries[trace->nr_entries++] = ULONG_MAX;
87}
82EXPORT_SYMBOL_GPL(save_stack_trace); 88EXPORT_SYMBOL_GPL(save_stack_trace);
83 89
84void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 90void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
@@ -86,6 +92,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
86 unsigned long sp, low, high; 92 unsigned long sp, low, high;
87 93
88 sp = tsk->thread.ksp; 94 sp = tsk->thread.ksp;
95 if (tsk == current) {
96 /* Get current stack pointer. */
97 asm volatile("la %0,0(15)" : "=a" (sp));
98 }
89 low = (unsigned long) task_stack_page(tsk); 99 low = (unsigned long) task_stack_page(tsk);
90 high = (unsigned long) task_pt_regs(tsk); 100 high = (unsigned long) task_pt_regs(tsk);
91 save_context_stack(trace, sp, low, high, 0); 101 save_context_stack(trace, sp, low, high, 0);
@@ -93,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
93 trace->entries[trace->nr_entries++] = ULONG_MAX; 103 trace->entries[trace->nr_entries++] = ULONG_MAX;
94} 104}
95EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 105EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
106
107void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
108{
109 unsigned long sp;
110
111 sp = kernel_stack_pointer(regs);
112 __save_stack_trace(trace, sp);
113 if (trace->nr_entries < trace->max_entries)
114 trace->entries[trace->nr_entries++] = ULONG_MAX;
115}
116EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 21a5df99552b..dde7654f5c68 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -18,6 +18,9 @@ void trace_s390_diagnose_norecursion(int diag_nr)
18 unsigned long flags; 18 unsigned long flags;
19 unsigned int *depth; 19 unsigned int *depth;
20 20
21 /* Avoid lockdep recursion. */
22 if (IS_ENABLED(CONFIG_LOCKDEP))
23 return;
21 local_irq_save(flags); 24 local_irq_save(flags);
22 depth = this_cpu_ptr(&diagnose_trace_depth); 25 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) { 26 if (*depth == 0) {
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index fec59c067d0d..792f9c63fbca 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -93,15 +93,19 @@ static int __memcpy_real(void *dest, void *src, size_t count)
93 */ 93 */
94int memcpy_real(void *dest, void *src, size_t count) 94int memcpy_real(void *dest, void *src, size_t count)
95{ 95{
96 int irqs_disabled, rc;
96 unsigned long flags; 97 unsigned long flags;
97 int rc;
98 98
99 if (!count) 99 if (!count)
100 return 0; 100 return 0;
101 local_irq_save(flags); 101 flags = __arch_local_irq_stnsm(0xf8UL);
102 __arch_local_irq_stnsm(0xfbUL); 102 irqs_disabled = arch_irqs_disabled_flags(flags);
103 if (!irqs_disabled)
104 trace_hardirqs_off();
103 rc = __memcpy_real(dest, src, count); 105 rc = __memcpy_real(dest, src, count);
104 local_irq_restore(flags); 106 if (!irqs_disabled)
107 trace_hardirqs_on();
108 __arch_local_irq_ssm(flags);
105 return rc; 109 return rc;
106} 110}
107 111
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index fe0bfe370c45..1884e1759529 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -54,12 +54,13 @@ __show_trace(unsigned int *depth, unsigned long sp,
54 54
55void s390_backtrace(struct pt_regs * const regs, unsigned int depth) 55void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
56{ 56{
57 unsigned long head; 57 unsigned long head, frame_size;
58 struct stack_frame* head_sf; 58 struct stack_frame* head_sf;
59 59
60 if (user_mode(regs)) 60 if (user_mode(regs))
61 return; 61 return;
62 62
63 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
63 head = regs->gprs[15]; 64 head = regs->gprs[15];
64 head_sf = (struct stack_frame*)head; 65 head_sf = (struct stack_frame*)head;
65 66
@@ -68,8 +69,9 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
68 69
69 head = head_sf->back_chain; 70 head = head_sf->back_chain;
70 71
71 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, 72 head = __show_trace(&depth, head,
72 S390_lowcore.async_stack); 73 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
74 S390_lowcore.async_stack + frame_size);
73 75
74 __show_trace(&depth, head, S390_lowcore.thread_info, 76 __show_trace(&depth, head, S390_lowcore.thread_info,
75 S390_lowcore.thread_info + THREAD_SIZE); 77 S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ab2ed5328f0a..c46662f64c39 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -778,8 +778,8 @@ config HPET_TIMER
778 HPET is the next generation timer replacing legacy 8254s. 778 HPET is the next generation timer replacing legacy 8254s.
779 The HPET provides a stable time base on SMP 779 The HPET provides a stable time base on SMP
780 systems, unlike the TSC, but it is more expensive to access, 780 systems, unlike the TSC, but it is more expensive to access,
781 as it is off-chip. You can find the HPET spec at 781 as it is off-chip. The interface used is documented
782 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>. 782 in the HPET spec, revision 1.
783 783
784 You can safely choose Y here. However, HPET will only be 784 You can safely choose Y here. However, HPET will only be
785 activated if the platform and the BIOS support this feature. 785 activated if the platform and the BIOS support this feature.
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 19c099afa861..e795f5274217 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -41,7 +41,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
41 regs->ip = ip; 41 regs->ip = ip;
42} 42}
43#else 43#else
44#error Live patching support is disabled; check CONFIG_LIVEPATCH 44#error Include linux/livepatch.h, not asm/livepatch.h
45#endif 45#endif
46 46
47#endif /* _ASM_X86_LIVEPATCH_H */ 47#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 49742746a6c9..8836fc9fa84b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -323,6 +323,8 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
323 return 0; 323 return 0;
324 324
325fail: 325fail:
326 if (amd_uncore_nb)
327 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
326 kfree(uncore_nb); 328 kfree(uncore_nb);
327 return -ENOMEM; 329 return -ENOMEM;
328} 330}
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 982ce34f4a9b..27f89c79a44b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string)
232 232
233/* 233/*
234 * copy_user_nocache - Uncached memory copy with exception handling 234 * copy_user_nocache - Uncached memory copy with exception handling
235 * This will force destination/source out of cache for more performance. 235 * This will force destination out of cache for more performance.
236 *
237 * Note: Cached memory copy is used when destination or size is not
238 * naturally aligned. That is:
239 * - Require 8-byte alignment when size is 8 bytes or larger.
240 * - Require 4-byte alignment when size is 4 bytes.
236 */ 241 */
237ENTRY(__copy_user_nocache) 242ENTRY(__copy_user_nocache)
238 ASM_STAC 243 ASM_STAC
244
245 /* If size is less than 8 bytes, go to 4-byte copy */
239 cmpl $8,%edx 246 cmpl $8,%edx
240 jb 20f /* less then 8 bytes, go to byte copy loop */ 247 jb .L_4b_nocache_copy_entry
248
249 /* If destination is not 8-byte aligned, "cache" copy to align it */
241 ALIGN_DESTINATION 250 ALIGN_DESTINATION
251
252 /* Set 4x8-byte copy count and remainder */
242 movl %edx,%ecx 253 movl %edx,%ecx
243 andl $63,%edx 254 andl $63,%edx
244 shrl $6,%ecx 255 shrl $6,%ecx
245 jz 17f 256 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
257
258 /* Perform 4x8-byte nocache loop-copy */
259.L_4x8b_nocache_copy_loop:
2461: movq (%rsi),%r8 2601: movq (%rsi),%r8
2472: movq 1*8(%rsi),%r9 2612: movq 1*8(%rsi),%r9
2483: movq 2*8(%rsi),%r10 2623: movq 2*8(%rsi),%r10
@@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache)
262 leaq 64(%rsi),%rsi 276 leaq 64(%rsi),%rsi
263 leaq 64(%rdi),%rdi 277 leaq 64(%rdi),%rdi
264 decl %ecx 278 decl %ecx
265 jnz 1b 279 jnz .L_4x8b_nocache_copy_loop
26617: movl %edx,%ecx 280
281 /* Set 8-byte copy count and remainder */
282.L_8b_nocache_copy_entry:
283 movl %edx,%ecx
267 andl $7,%edx 284 andl $7,%edx
268 shrl $3,%ecx 285 shrl $3,%ecx
269 jz 20f 286 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
27018: movq (%rsi),%r8 287
27119: movnti %r8,(%rdi) 288 /* Perform 8-byte nocache loop-copy */
289.L_8b_nocache_copy_loop:
29020: movq (%rsi),%r8
29121: movnti %r8,(%rdi)
272 leaq 8(%rsi),%rsi 292 leaq 8(%rsi),%rsi
273 leaq 8(%rdi),%rdi 293 leaq 8(%rdi),%rdi
274 decl %ecx 294 decl %ecx
275 jnz 18b 295 jnz .L_8b_nocache_copy_loop
27620: andl %edx,%edx 296
277 jz 23f 297 /* If no byte left, we're done */
298.L_4b_nocache_copy_entry:
299 andl %edx,%edx
300 jz .L_finish_copy
301
302 /* If destination is not 4-byte aligned, go to byte copy: */
303 movl %edi,%ecx
304 andl $3,%ecx
305 jnz .L_1b_cache_copy_entry
306
307 /* Set 4-byte copy count (1 or 0) and remainder */
278 movl %edx,%ecx 308 movl %edx,%ecx
27921: movb (%rsi),%al 309 andl $3,%edx
28022: movb %al,(%rdi) 310 shrl $2,%ecx
311 jz .L_1b_cache_copy_entry /* jump if count is 0 */
312
313 /* Perform 4-byte nocache copy: */
31430: movl (%rsi),%r8d
31531: movnti %r8d,(%rdi)
316 leaq 4(%rsi),%rsi
317 leaq 4(%rdi),%rdi
318
319 /* If no bytes left, we're done: */
320 andl %edx,%edx
321 jz .L_finish_copy
322
323 /* Perform byte "cache" loop-copy for the remainder */
324.L_1b_cache_copy_entry:
325 movl %edx,%ecx
326.L_1b_cache_copy_loop:
32740: movb (%rsi),%al
32841: movb %al,(%rdi)
281 incq %rsi 329 incq %rsi
282 incq %rdi 330 incq %rdi
283 decl %ecx 331 decl %ecx
284 jnz 21b 332 jnz .L_1b_cache_copy_loop
28523: xorl %eax,%eax 333
334 /* Finished copying; fence the prior stores */
335.L_finish_copy:
336 xorl %eax,%eax
286 ASM_CLAC 337 ASM_CLAC
287 sfence 338 sfence
288 ret 339 ret
289 340
290 .section .fixup,"ax" 341 .section .fixup,"ax"
29130: shll $6,%ecx 342.L_fixup_4x8b_copy:
343 shll $6,%ecx
292 addl %ecx,%edx 344 addl %ecx,%edx
293 jmp 60f 345 jmp .L_fixup_handle_tail
29440: lea (%rdx,%rcx,8),%rdx 346.L_fixup_8b_copy:
295 jmp 60f 347 lea (%rdx,%rcx,8),%rdx
29650: movl %ecx,%edx 348 jmp .L_fixup_handle_tail
29760: sfence 349.L_fixup_4b_copy:
350 lea (%rdx,%rcx,4),%rdx
351 jmp .L_fixup_handle_tail
352.L_fixup_1b_copy:
353 movl %ecx,%edx
354.L_fixup_handle_tail:
355 sfence
298 jmp copy_user_handle_tail 356 jmp copy_user_handle_tail
299 .previous 357 .previous
300 358
301 _ASM_EXTABLE(1b,30b) 359 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
302 _ASM_EXTABLE(2b,30b) 360 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
303 _ASM_EXTABLE(3b,30b) 361 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
304 _ASM_EXTABLE(4b,30b) 362 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
305 _ASM_EXTABLE(5b,30b) 363 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
306 _ASM_EXTABLE(6b,30b) 364 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
307 _ASM_EXTABLE(7b,30b) 365 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
308 _ASM_EXTABLE(8b,30b) 366 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
309 _ASM_EXTABLE(9b,30b) 367 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
310 _ASM_EXTABLE(10b,30b) 368 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
311 _ASM_EXTABLE(11b,30b) 369 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
312 _ASM_EXTABLE(12b,30b) 370 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
313 _ASM_EXTABLE(13b,30b) 371 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
314 _ASM_EXTABLE(14b,30b) 372 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
315 _ASM_EXTABLE(15b,30b) 373 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
316 _ASM_EXTABLE(16b,30b) 374 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
317 _ASM_EXTABLE(18b,40b) 375 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
318 _ASM_EXTABLE(19b,40b) 376 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
319 _ASM_EXTABLE(21b,50b) 377 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
320 _ASM_EXTABLE(22b,50b) 378 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
379 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
380 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
321ENDPROC(__copy_user_nocache) 381ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index eef44d9a3f77..e830c71a1323 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
287 if (!pmd_k) 287 if (!pmd_k)
288 return -1; 288 return -1;
289 289
290 if (pmd_huge(*pmd_k))
291 return 0;
292
290 pte_k = pte_offset_kernel(pmd_k, address); 293 pte_k = pte_offset_kernel(pmd_k, address);
291 if (!pte_present(*pte_k)) 294 if (!pte_present(*pte_k))
292 return -1; 295 return -1;
@@ -360,8 +363,6 @@ void vmalloc_sync_all(void)
360 * 64-bit: 363 * 64-bit:
361 * 364 *
362 * Handle a fault on the vmalloc area 365 * Handle a fault on the vmalloc area
363 *
364 * This assumes no large pages in there.
365 */ 366 */
366static noinline int vmalloc_fault(unsigned long address) 367static noinline int vmalloc_fault(unsigned long address)
367{ 368{
@@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
403 if (pud_none(*pud_ref)) 404 if (pud_none(*pud_ref))
404 return -1; 405 return -1;
405 406
406 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 407 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
407 BUG(); 408 BUG();
408 409
410 if (pud_huge(*pud))
411 return 0;
412
409 pmd = pmd_offset(pud, address); 413 pmd = pmd_offset(pud, address);
410 pmd_ref = pmd_offset(pud_ref, address); 414 pmd_ref = pmd_offset(pud_ref, address);
411 if (pmd_none(*pmd_ref)) 415 if (pmd_none(*pmd_ref))
412 return -1; 416 return -1;
413 417
414 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 418 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
415 BUG(); 419 BUG();
416 420
421 if (pmd_huge(*pmd))
422 return 0;
423
417 pte_ref = pte_offset_kernel(pmd_ref, address); 424 pte_ref = pte_offset_kernel(pmd_ref, address);
418 if (!pte_present(*pte_ref)) 425 if (!pte_present(*pte_ref))
419 return -1; 426 return -1;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6d5eb5900372..d8a798d8bf50 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -102,7 +102,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
102 return 0; 102 return 0;
103 } 103 }
104 104
105 page = pte_page(pte);
106 if (pte_devmap(pte)) { 105 if (pte_devmap(pte)) {
107 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 106 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
108 if (unlikely(!pgmap)) { 107 if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
115 return 0; 114 return 0;
116 } 115 }
117 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 116 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
117 page = pte_page(pte);
118 get_page(page); 118 get_page(page);
119 put_dev_pagemap(pgmap); 119 put_dev_pagemap(pgmap);
120 SetPageReferenced(page); 120 SetPageReferenced(page);
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index 0a3736f03edc..740445a53363 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -580,14 +580,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
580 end = (unsigned long)__end_rodata - 1; 580 end = (unsigned long)__end_rodata - 1;
581 581
582 /* 582 /*
583 * Setup a locked IMR around the physical extent of the kernel 583 * Setup an unlocked IMR around the physical extent of the kernel
584 * from the beginning of the .text secton to the end of the 584 * from the beginning of the .text secton to the end of the
585 * .rodata section as one physically contiguous block. 585 * .rodata section as one physically contiguous block.
586 * 586 *
587 * We don't round up @size since it is already PAGE_SIZE aligned. 587 * We don't round up @size since it is already PAGE_SIZE aligned.
588 * See vmlinux.lds.S for details. 588 * See vmlinux.lds.S for details.
589 */ 589 */
590 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); 590 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
591 if (ret < 0) { 591 if (ret < 0) {
592 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 592 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
593 size / 1024, start, end); 593 size / 1024, start, end);
diff --git a/block/bio.c b/block/bio.c
index dbabd48b1934..cf7591551b17 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -874,7 +874,7 @@ int submit_bio_wait(int rw, struct bio *bio)
874 bio->bi_private = &ret; 874 bio->bi_private = &ret;
875 bio->bi_end_io = submit_bio_wait_endio; 875 bio->bi_end_io = submit_bio_wait_endio;
876 submit_bio(rw, bio); 876 submit_bio(rw, bio);
877 wait_for_completion(&ret.event); 877 wait_for_completion_io(&ret.event);
878 878
879 return ret.error; 879 return ret.error;
880} 880}
@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1091 /* 1091 /*
1092 * if we're in a workqueue, the request is orphaned, so 1092 * if we're in a workqueue, the request is orphaned, so
1093 * don't copy into a random user address space, just free. 1093 * don't copy into a random user address space, just free
1094 * and return -EINTR so user space doesn't expect any data.
1094 */ 1095 */
1095 if (current->mm && bio_data_dir(bio) == READ) 1096 if (!current->mm)
1097 ret = -EINTR;
1098 else if (bio_data_dir(bio) == READ)
1096 ret = bio_copy_to_iter(bio, bmd->iter); 1099 ret = bio_copy_to_iter(bio, bmd->iter);
1097 if (bmd->is_our_pages) 1100 if (bmd->is_our_pages)
1098 bio_free_pages(bio); 1101 bio_free_pages(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5a37188b559f..66e6f1aae02e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
788{ 788{
789 struct gendisk *disk; 789 struct gendisk *disk;
790 struct blkcg_gq *blkg; 790 struct blkcg_gq *blkg;
791 struct module *owner;
791 unsigned int major, minor; 792 unsigned int major, minor;
792 int key_len, part, ret; 793 int key_len, part, ret;
793 char *body; 794 char *body;
@@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
804 if (!disk) 805 if (!disk)
805 return -ENODEV; 806 return -ENODEV;
806 if (part) { 807 if (part) {
808 owner = disk->fops->owner;
807 put_disk(disk); 809 put_disk(disk);
810 module_put(owner);
808 return -ENODEV; 811 return -ENODEV;
809 } 812 }
810 813
@@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
820 ret = PTR_ERR(blkg); 823 ret = PTR_ERR(blkg);
821 rcu_read_unlock(); 824 rcu_read_unlock();
822 spin_unlock_irq(disk->queue->queue_lock); 825 spin_unlock_irq(disk->queue->queue_lock);
826 owner = disk->fops->owner;
823 put_disk(disk); 827 put_disk(disk);
828 module_put(owner);
824 /* 829 /*
825 * If queue was bypassing, we should retry. Do so after a 830 * If queue was bypassing, we should retry. Do so after a
826 * short msleep(). It isn't strictly necessary but queue 831 * short msleep(). It isn't strictly necessary but queue
@@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
851void blkg_conf_finish(struct blkg_conf_ctx *ctx) 856void blkg_conf_finish(struct blkg_conf_ctx *ctx)
852 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 857 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
853{ 858{
859 struct module *owner;
860
854 spin_unlock_irq(ctx->disk->queue->queue_lock); 861 spin_unlock_irq(ctx->disk->queue->queue_lock);
855 rcu_read_unlock(); 862 rcu_read_unlock();
863 owner = ctx->disk->fops->owner;
856 put_disk(ctx->disk); 864 put_disk(ctx->disk);
865 module_put(owner);
857} 866}
858EXPORT_SYMBOL_GPL(blkg_conf_finish); 867EXPORT_SYMBOL_GPL(blkg_conf_finish);
859 868
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c0622fae413..56c0a726b619 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -599,8 +599,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
599 * If a request wasn't started before the queue was 599 * If a request wasn't started before the queue was
600 * marked dying, kill it here or it'll go unnoticed. 600 * marked dying, kill it here or it'll go unnoticed.
601 */ 601 */
602 if (unlikely(blk_queue_dying(rq->q))) 602 if (unlikely(blk_queue_dying(rq->q))) {
603 blk_mq_complete_request(rq, -EIO); 603 rq->errors = -EIO;
604 blk_mq_end_request(rq, rq->errors);
605 }
604 return; 606 return;
605 } 607 }
606 608
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd4973583978..c7bb666aafd1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
92 lim->virt_boundary_mask = 0; 92 lim->virt_boundary_mask = 0;
93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
94 lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = 94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
95 BLK_SAFE_MAX_SECTORS; 95 lim->max_dev_sectors = 0;
96 lim->chunk_sectors = 0; 96 lim->chunk_sectors = 0;
97 lim->max_write_same_sectors = 0; 97 lim->max_write_same_sectors = 0;
98 lim->max_discard_sectors = 0; 98 lim->max_discard_sectors = 0;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e140cc487ce1..dd93763057ce 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -147,10 +147,9 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
147 147
148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
149{ 149{
150 unsigned long long val;
151 150
152 val = q->limits.max_hw_discard_sectors << 9; 151 return sprintf(page, "%llu\n",
153 return sprintf(page, "%llu\n", val); 152 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
154} 153}
155 154
156static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 155static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index a753df2b3fc2..d0dd7882d8c7 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -39,7 +39,6 @@ struct deadline_data {
39 */ 39 */
40 struct request *next_rq[2]; 40 struct request *next_rq[2];
41 unsigned int batching; /* number of sequential requests made */ 41 unsigned int batching; /* number of sequential requests made */
42 sector_t last_sector; /* head position */
43 unsigned int starved; /* times reads have starved writes */ 42 unsigned int starved; /* times reads have starved writes */
44 43
45 /* 44 /*
@@ -210,8 +209,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
210 dd->next_rq[WRITE] = NULL; 209 dd->next_rq[WRITE] = NULL;
211 dd->next_rq[data_dir] = deadline_latter_request(rq); 210 dd->next_rq[data_dir] = deadline_latter_request(rq);
212 211
213 dd->last_sector = rq_end_sector(rq);
214
215 /* 212 /*
216 * take it off the sort and fifo list, move 213 * take it off the sort and fifo list, move
217 * to dispatch queue 214 * to dispatch queue
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e251201dd48..84708a5f8c52 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@ static void set_fdc(int drive)
866} 866}
867 867
868/* locks the driver */ 868/* locks the driver */
869static int lock_fdc(int drive, bool interruptible) 869static int lock_fdc(int drive)
870{ 870{
871 if (WARN(atomic_read(&usage_count) == 0, 871 if (WARN(atomic_read(&usage_count) == 0,
872 "Trying to lock fdc while usage count=0\n")) 872 "Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
2173{ 2173{
2174 int ret; 2174 int ret;
2175 2175
2176 if (lock_fdc(drive, true)) 2176 if (lock_fdc(drive))
2177 return -EINTR; 2177 return -EINTR;
2178 2178
2179 set_floppy(drive); 2179 set_floppy(drive);
@@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
2960{ 2960{
2961 int ret; 2961 int ret;
2962 2962
2963 if (lock_fdc(drive, interruptible)) 2963 if (lock_fdc(drive))
2964 return -EINTR; 2964 return -EINTR;
2965 2965
2966 if (arg == FD_RESET_ALWAYS) 2966 if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3243 if (!capable(CAP_SYS_ADMIN)) 3243 if (!capable(CAP_SYS_ADMIN))
3244 return -EPERM; 3244 return -EPERM;
3245 mutex_lock(&open_lock); 3245 mutex_lock(&open_lock);
3246 if (lock_fdc(drive, true)) { 3246 if (lock_fdc(drive)) {
3247 mutex_unlock(&open_lock); 3247 mutex_unlock(&open_lock);
3248 return -EINTR; 3248 return -EINTR;
3249 } 3249 }
@@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3263 } else { 3263 } else {
3264 int oldStretch; 3264 int oldStretch;
3265 3265
3266 if (lock_fdc(drive, true)) 3266 if (lock_fdc(drive))
3267 return -EINTR; 3267 return -EINTR;
3268 if (cmd != FDDEFPRM) { 3268 if (cmd != FDDEFPRM) {
3269 /* notice a disk change immediately, else 3269 /* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
3349 if (type) 3349 if (type)
3350 *g = &floppy_type[type]; 3350 *g = &floppy_type[type];
3351 else { 3351 else {
3352 if (lock_fdc(drive, false)) 3352 if (lock_fdc(drive))
3353 return -EINTR; 3353 return -EINTR;
3354 if (poll_drive(false, 0) == -EINTR) 3354 if (poll_drive(false, 0) == -EINTR)
3355 return -EINTR; 3355 return -EINTR;
@@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3433 if (UDRS->fd_ref != 1) 3433 if (UDRS->fd_ref != 1)
3434 /* somebody else has this drive open */ 3434 /* somebody else has this drive open */
3435 return -EBUSY; 3435 return -EBUSY;
3436 if (lock_fdc(drive, true)) 3436 if (lock_fdc(drive))
3437 return -EINTR; 3437 return -EINTR;
3438 3438
3439 /* do the actual eject. Fails on 3439 /* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3445 process_fd_request(); 3445 process_fd_request();
3446 return ret; 3446 return ret;
3447 case FDCLRPRM: 3447 case FDCLRPRM:
3448 if (lock_fdc(drive, true)) 3448 if (lock_fdc(drive))
3449 return -EINTR; 3449 return -EINTR;
3450 current_type[drive] = NULL; 3450 current_type[drive] = NULL;
3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1; 3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 UDP->flags &= ~FTD_MSG; 3467 UDP->flags &= ~FTD_MSG;
3468 return 0; 3468 return 0;
3469 case FDFMTBEG: 3469 case FDFMTBEG:
3470 if (lock_fdc(drive, true)) 3470 if (lock_fdc(drive))
3471 return -EINTR; 3471 return -EINTR;
3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3473 return -EINTR; 3473 return -EINTR;
@@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3484 return do_format(drive, &inparam.f); 3484 return do_format(drive, &inparam.f);
3485 case FDFMTEND: 3485 case FDFMTEND:
3486 case FDFLUSH: 3486 case FDFLUSH:
3487 if (lock_fdc(drive, true)) 3487 if (lock_fdc(drive))
3488 return -EINTR; 3488 return -EINTR;
3489 return invalidate_drive(bdev); 3489 return invalidate_drive(bdev);
3490 case FDSETEMSGTRESH: 3490 case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3507 outparam = UDP; 3507 outparam = UDP;
3508 break; 3508 break;
3509 case FDPOLLDRVSTAT: 3509 case FDPOLLDRVSTAT:
3510 if (lock_fdc(drive, true)) 3510 if (lock_fdc(drive))
3511 return -EINTR; 3511 return -EINTR;
3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3513 return -EINTR; 3513 return -EINTR;
@@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3530 case FDRAWCMD: 3530 case FDRAWCMD:
3531 if (type) 3531 if (type)
3532 return -EINVAL; 3532 return -EINVAL;
3533 if (lock_fdc(drive, true)) 3533 if (lock_fdc(drive))
3534 return -EINTR; 3534 return -EINTR;
3535 set_floppy(drive); 3535 set_floppy(drive);
3536 i = raw_cmd_ioctl(cmd, (void __user *)param); 3536 i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3539 process_fd_request(); 3539 process_fd_request();
3540 return i; 3540 return i;
3541 case FDTWADDLE: 3541 case FDTWADDLE:
3542 if (lock_fdc(drive, true)) 3542 if (lock_fdc(drive))
3543 return -EINTR; 3543 return -EINTR;
3544 twaddle(); 3544 twaddle();
3545 process_fd_request(); 3545 process_fd_request();
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3663 3663
3664 opened_bdev[drive] = bdev; 3664 opened_bdev[drive] = bdev;
3665 3665
3666 if (!(mode & (FMODE_READ|FMODE_WRITE))) {
3667 res = -EINVAL;
3668 goto out;
3669 }
3670
3666 res = -ENXIO; 3671 res = -ENXIO;
3667 3672
3668 if (!floppy_track_buffer) { 3673 if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3706 if (UFDCS->rawcmd == 1) 3711 if (UFDCS->rawcmd == 1)
3707 UFDCS->rawcmd = 2; 3712 UFDCS->rawcmd = 2;
3708 3713
3709 if (!(mode & FMODE_NDELAY)) { 3714 UDRS->last_checked = 0;
3710 if (mode & (FMODE_READ|FMODE_WRITE)) { 3715 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3711 UDRS->last_checked = 0; 3716 check_disk_change(bdev);
3712 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3717 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
3713 check_disk_change(bdev); 3718 goto out;
3714 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) 3719 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
3715 goto out; 3720 goto out;
3716 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) 3721
3717 goto out; 3722 res = -EROFS;
3718 } 3723
3719 res = -EROFS; 3724 if ((mode & FMODE_WRITE) &&
3720 if ((mode & FMODE_WRITE) && 3725 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3721 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) 3726 goto out;
3722 goto out; 3727
3723 }
3724 mutex_unlock(&open_lock); 3728 mutex_unlock(&open_lock);
3725 mutex_unlock(&floppy_mutex); 3729 mutex_unlock(&floppy_mutex);
3726 return 0; 3730 return 0;
@@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk,
3748 return DISK_EVENT_MEDIA_CHANGE; 3752 return DISK_EVENT_MEDIA_CHANGE;
3749 3753
3750 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3754 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3751 lock_fdc(drive, false); 3755 if (lock_fdc(drive))
3756 return -EINTR;
3752 poll_drive(false, 0); 3757 poll_drive(false, 0);
3753 process_fd_request(); 3758 process_fd_request();
3754 } 3759 }
@@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk)
3847 "VFS: revalidate called on non-open device.\n")) 3852 "VFS: revalidate called on non-open device.\n"))
3848 return -EFAULT; 3853 return -EFAULT;
3849 3854
3850 lock_fdc(drive, false); 3855 res = lock_fdc(drive);
3856 if (res)
3857 return res;
3851 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3858 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
3852 test_bit(FD_VERIFY_BIT, &UDRS->flags)); 3859 test_bit(FD_VERIFY_BIT, &UDRS->flags));
3853 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { 3860 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97d573c..64a7b5971b57 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
478 id->ver_id = 0x1; 478 id->ver_id = 0x1;
479 id->vmnt = 0; 479 id->vmnt = 0;
480 id->cgrps = 1; 480 id->cgrps = 1;
481 id->cap = 0x3; 481 id->cap = 0x2;
482 id->dom = 0x1; 482 id->dom = 0x1;
483 483
484 id->ppaf.blk_offset = 0; 484 id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@ static int null_add_dev(void)
707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
709 709
710
711 mutex_lock(&lock); 710 mutex_lock(&lock);
712 list_add_tail(&nullb->list, &nullb_list);
713 nullb->index = nullb_indexes++; 711 nullb->index = nullb_indexes++;
714 mutex_unlock(&lock); 712 mutex_unlock(&lock);
715 713
@@ -743,6 +741,10 @@ static int null_add_dev(void)
743 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
744 742
745 add_disk(disk); 743 add_disk(disk);
744
745 mutex_lock(&lock);
746 list_add_tail(&nullb->list, &nullb_list);
747 mutex_unlock(&lock);
746done: 748done:
747 return 0; 749 return 0;
748 750
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91c39f7..83eb9e6bf8b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@ again:
1873 return err; 1873 return err;
1874} 1874}
1875 1875
1876static int negotiate_mq(struct blkfront_info *info)
1877{
1878 unsigned int backend_max_queues = 0;
1879 int err;
1880 unsigned int i;
1881
1882 BUG_ON(info->nr_rings);
1883
1884 /* Check if backend supports multiple queues. */
1885 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1886 "multi-queue-max-queues", "%u", &backend_max_queues);
1887 if (err < 0)
1888 backend_max_queues = 1;
1889
1890 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1891 /* We need at least one ring. */
1892 if (!info->nr_rings)
1893 info->nr_rings = 1;
1894
1895 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1896 if (!info->rinfo) {
1897 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1898 return -ENOMEM;
1899 }
1900
1901 for (i = 0; i < info->nr_rings; i++) {
1902 struct blkfront_ring_info *rinfo;
1903
1904 rinfo = &info->rinfo[i];
1905 INIT_LIST_HEAD(&rinfo->indirect_pages);
1906 INIT_LIST_HEAD(&rinfo->grants);
1907 rinfo->dev_info = info;
1908 INIT_WORK(&rinfo->work, blkif_restart_queue);
1909 spin_lock_init(&rinfo->ring_lock);
1910 }
1911 return 0;
1912}
1876/** 1913/**
1877 * Entry point to this code when a new device is created. Allocate the basic 1914 * Entry point to this code when a new device is created. Allocate the basic
1878 * structures and the ring buffer for communication with the backend, and 1915 * structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1883 const struct xenbus_device_id *id) 1920 const struct xenbus_device_id *id)
1884{ 1921{
1885 int err, vdevice; 1922 int err, vdevice;
1886 unsigned int r_index;
1887 struct blkfront_info *info; 1923 struct blkfront_info *info;
1888 unsigned int backend_max_queues = 0;
1889 1924
1890 /* FIXME: Use dynamic device id if this is not set. */ 1925 /* FIXME: Use dynamic device id if this is not set. */
1891 err = xenbus_scanf(XBT_NIL, dev->nodename, 1926 err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1936 } 1971 }
1937 1972
1938 info->xbdev = dev; 1973 info->xbdev = dev;
1939 /* Check if backend supports multiple queues. */ 1974 err = negotiate_mq(info);
1940 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1975 if (err) {
1941 "multi-queue-max-queues", "%u", &backend_max_queues);
1942 if (err < 0)
1943 backend_max_queues = 1;
1944
1945 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1946 /* We need at least one ring. */
1947 if (!info->nr_rings)
1948 info->nr_rings = 1;
1949
1950 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1951 if (!info->rinfo) {
1952 xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
1953 kfree(info); 1976 kfree(info);
1954 return -ENOMEM; 1977 return err;
1955 }
1956
1957 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1958 struct blkfront_ring_info *rinfo;
1959
1960 rinfo = &info->rinfo[r_index];
1961 INIT_LIST_HEAD(&rinfo->indirect_pages);
1962 INIT_LIST_HEAD(&rinfo->grants);
1963 rinfo->dev_info = info;
1964 INIT_WORK(&rinfo->work, blkif_restart_queue);
1965 spin_lock_init(&rinfo->ring_lock);
1966 } 1978 }
1967 1979
1968 mutex_init(&info->mutex); 1980 mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info)
2123static int blkfront_resume(struct xenbus_device *dev) 2135static int blkfront_resume(struct xenbus_device *dev)
2124{ 2136{
2125 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2137 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2126 int err; 2138 int err = 0;
2127 2139
2128 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2140 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2129 2141
2130 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2142 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2131 2143
2144 err = negotiate_mq(info);
2145 if (err)
2146 return err;
2147
2132 err = talk_to_blkback(dev, info); 2148 err = talk_to_blkback(dev, info);
2133 2149
2134 /* 2150 /*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 240b6cf1d97c..be54e5331a45 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -42,7 +42,7 @@
42/* 42/*
43 * The High Precision Event Timer driver. 43 * The High Precision Event Timer driver.
44 * This driver is closely modelled after the rtc.c driver. 44 * This driver is closely modelled after the rtc.c driver.
45 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf 45 * See HPET spec revision 1.
46 */ 46 */
47#define HPET_USER_FREQ (64) 47#define HPET_USER_FREQ (64)
48#define HPET_DRIFT (500) 48#define HPET_DRIFT (500)
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e3666058..bae4be6501df 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o 43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o 44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o 45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
46obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o 46obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o 47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
48obj-$(CONFIG_ARCH_U300) += clk-u300.o 48obj-$(CONFIG_ARCH_U300) += clk-u300.o
49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o 49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65587e8..7b09a265d79f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@ static void __init of_gpio_clk_setup(struct device_node *node,
289 289
290 num_parents = of_clk_get_parent_count(node); 290 num_parents = of_clk_get_parent_count(node);
291 if (num_parents < 0) 291 if (num_parents < 0)
292 return; 292 num_parents = 0;
293 293
294 data = kzalloc(sizeof(*data), GFP_KERNEL); 294 data = kzalloc(sizeof(*data), GFP_KERNEL);
295 if (!data) 295 if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..89e9ca78bb94 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
299 /* Add the virtual cpufreq device */ 299 /* Add the virtual cpufreq device */
300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq", 300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
301 -1, NULL, 0); 301 -1, NULL, 0);
302 if (!cpufreq_dev) 302 if (IS_ERR(cpufreq_dev))
303 pr_warn("unable to register cpufreq device"); 303 pr_warn("unable to register cpufreq device");
304 304
305 return 0; 305 return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa35a5a..3e0b52daa35f 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@ static struct clk_onecell_data dove_divider_data = {
247 247
248void __init dove_divider_clk_init(struct device_node *np) 248void __init dove_divider_clk_init(struct device_node *np)
249{ 249{
250 void *base; 250 void __iomem *base;
251 251
252 base = of_iomap(np, 0); 252 base = of_iomap(np, 0);
253 if (WARN_ON(!base)) 253 if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e539e9f6..070037a29ea5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@ static const struct regmap_config gcc_apq8084_regmap_config = {
3587 .val_bits = 32, 3587 .val_bits = 32,
3588 .max_register = 0x1fc0, 3588 .max_register = 0x1fc0,
3589 .fast_io = true, 3589 .fast_io = true,
3590 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3591}; 3590};
3592 3591
3593static const struct qcom_cc_desc gcc_apq8084_desc = { 3592static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae881d6a..dd5402bac620 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@ static const struct regmap_config gcc_ipq806x_regmap_config = {
3005 .val_bits = 32, 3005 .val_bits = 32,
3006 .max_register = 0x3e40, 3006 .max_register = 0x3e40,
3007 .fast_io = true, 3007 .fast_io = true,
3008 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3009}; 3008};
3010 3009
3011static const struct qcom_cc_desc gcc_ipq806x_desc = { 3010static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14dfec4..ad413036f7c7 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@ static const struct regmap_config gcc_msm8660_regmap_config = {
2702 .val_bits = 32, 2702 .val_bits = 32,
2703 .max_register = 0x363c, 2703 .max_register = 0x363c,
2704 .fast_io = true, 2704 .fast_io = true,
2705 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2706}; 2705};
2707 2706
2708static const struct qcom_cc_desc gcc_msm8660_desc = { 2707static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d7d0ef..8cc9b2868b41 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@ static const struct regmap_config gcc_msm8916_regmap_config = {
3336 .val_bits = 32, 3336 .val_bits = 32,
3337 .max_register = 0x80000, 3337 .max_register = 0x80000,
3338 .fast_io = true, 3338 .fast_io = true,
3339 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3340}; 3339};
3341 3340
3342static const struct qcom_cc_desc gcc_msm8916_desc = { 3341static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e32d44..983dd7dc89a7 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@ static const struct regmap_config gcc_msm8960_regmap_config = {
3468 .val_bits = 32, 3468 .val_bits = 32,
3469 .max_register = 0x3660, 3469 .max_register = 0x3660,
3470 .fast_io = true, 3470 .fast_io = true,
3471 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3472}; 3471};
3473 3472
3474static const struct regmap_config gcc_apq8064_regmap_config = { 3473static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@ static const struct regmap_config gcc_apq8064_regmap_config = {
3477 .val_bits = 32, 3476 .val_bits = 32,
3478 .max_register = 0x3880, 3477 .max_register = 0x3880,
3479 .fast_io = true, 3478 .fast_io = true,
3480 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3481}; 3479};
3482 3480
3483static const struct qcom_cc_desc gcc_msm8960_desc = { 3481static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d618e34..335952db309b 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@ static const struct regmap_config gcc_msm8974_regmap_config = {
2680 .val_bits = 32, 2680 .val_bits = 32,
2681 .max_register = 0x1fc0, 2681 .max_register = 0x1fc0,
2682 .fast_io = true, 2682 .fast_io = true,
2683 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2684}; 2683};
2685 2684
2686static const struct qcom_cc_desc gcc_msm8974_desc = { 2685static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fadd5f7..db3998e5e2d8 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@ static const struct regmap_config lcc_ipq806x_regmap_config = {
419 .val_bits = 32, 419 .val_bits = 32,
420 .max_register = 0xfc, 420 .max_register = 0xfc,
421 .fast_io = true, 421 .fast_io = true,
422 .val_format_endian = REGMAP_ENDIAN_LITTLE,
423}; 422};
424 423
425static const struct qcom_cc_desc lcc_ipq806x_desc = { 424static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0ea1b8..4fcf9d1d233c 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@ static const struct regmap_config lcc_msm8960_regmap_config = {
524 .val_bits = 32, 524 .val_bits = 32,
525 .max_register = 0xfc, 525 .max_register = 0xfc,
526 .fast_io = true, 526 .fast_io = true,
527 .val_format_endian = REGMAP_ENDIAN_LITTLE,
528}; 527};
529 528
530static const struct qcom_cc_desc lcc_msm8960_desc = { 529static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fda8a0f..30777f9f1a43 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@ static const struct regmap_config mmcc_apq8084_regmap_config = {
3368 .val_bits = 32, 3368 .val_bits = 32,
3369 .max_register = 0x5104, 3369 .max_register = 0x5104,
3370 .fast_io = true, 3370 .fast_io = true,
3371 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3372}; 3371};
3373 3372
3374static const struct qcom_cc_desc mmcc_apq8084_desc = { 3373static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048d3b9d..00e36192a1de 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@ static const struct regmap_config mmcc_msm8960_regmap_config = {
3029 .val_bits = 32, 3029 .val_bits = 32,
3030 .max_register = 0x334, 3030 .max_register = 0x334,
3031 .fast_io = true, 3031 .fast_io = true,
3032 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3033}; 3032};
3034 3033
3035static const struct regmap_config mmcc_apq8064_regmap_config = { 3034static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@ static const struct regmap_config mmcc_apq8064_regmap_config = {
3038 .val_bits = 32, 3037 .val_bits = 32,
3039 .max_register = 0x350, 3038 .max_register = 0x350,
3040 .fast_io = true, 3039 .fast_io = true,
3041 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3042}; 3040};
3043 3041
3044static const struct qcom_cc_desc mmcc_msm8960_desc = { 3042static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed93669..9d790bcadf25 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@ static const struct regmap_config mmcc_msm8974_regmap_config = {
2594 .val_bits = 32, 2594 .val_bits = 32,
2595 .max_register = 0x5104, 2595 .max_register = 0x5104,
2596 .fast_io = true, 2596 .fast_io = true,
2597 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2598}; 2597};
2599 2598
2600static const struct qcom_cc_desc mmcc_msm8974_desc = { 2599static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce98033fbb..bc7fbac83ab7 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@ PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" };
133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
136PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; 136PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" };
137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; 137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" };
138 138
139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { 139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
224 RK2928_CLKGATE_CON(2), 2, GFLAGS), 224 RK2928_CLKGATE_CON(2), 2, GFLAGS),
225 225
226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, 226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
227 RK2928_CLKSEL_CON(2), 4, 1, DFLAGS, 227 RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
228 RK2928_CLKGATE_CON(1), 0, GFLAGS), 228 RK2928_CLKGATE_CON(1), 0, GFLAGS),
229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, 229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
230 RK2928_CLKSEL_CON(2), 5, 1, DFLAGS, 230 RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
231 RK2928_CLKGATE_CON(1), 1, GFLAGS), 231 RK2928_CLKGATE_CON(1), 1, GFLAGS),
232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, 232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
233 RK2928_CLKSEL_CON(2), 6, 1, DFLAGS, 233 RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
234 RK2928_CLKGATE_CON(2), 4, GFLAGS), 234 RK2928_CLKGATE_CON(2), 4, GFLAGS),
235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, 235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
236 RK2928_CLKSEL_CON(2), 7, 1, DFLAGS, 236 RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
237 RK2928_CLKGATE_CON(2), 5, GFLAGS), 237 RK2928_CLKGATE_CON(2), 5, GFLAGS),
238 238
239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, 239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
243 RK2928_CLKGATE_CON(1), 8, GFLAGS), 243 RK2928_CLKGATE_CON(1), 8, GFLAGS),
244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, 244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
245 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 245 RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
246 RK2928_CLKGATE_CON(1), 8, GFLAGS), 246 RK2928_CLKGATE_CON(1), 10, GFLAGS),
247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, 247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
248 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 248 RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
249 RK2928_CLKGATE_CON(1), 8, GFLAGS), 249 RK2928_CLKGATE_CON(1), 12, GFLAGS),
250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, 250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
251 RK2928_CLKSEL_CON(17), 0, 251 RK2928_CLKSEL_CON(17), 0,
252 RK2928_CLKGATE_CON(1), 9, GFLAGS, 252 RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
279 RK2928_CLKGATE_CON(3), 2, GFLAGS), 279 RK2928_CLKGATE_CON(3), 2, GFLAGS),
280 280
281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, 281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
282 RK2928_CLKSEL_CON(12), 8, 2, DFLAGS, 282 RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
283 RK2928_CLKGATE_CON(2), 11, GFLAGS), 283 RK2928_CLKGATE_CON(2), 11, GFLAGS),
284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, 284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), 285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
286 286
287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, 287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
288 RK2928_CLKSEL_CON(12), 10, 2, DFLAGS, 288 RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
289 RK2928_CLKGATE_CON(2), 13, GFLAGS), 289 RK2928_CLKGATE_CON(2), 13, GFLAGS),
290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, 290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), 291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
344 RK2928_CLKGATE_CON(10), 5, GFLAGS), 344 RK2928_CLKGATE_CON(10), 5, GFLAGS),
345 345
346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, 346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS), 347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, 348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), 349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
350 350
351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, 351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
352 RK2928_CLKSEL_CON(21), 9, 5, DFLAGS, 352 RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
353 RK2928_CLKGATE_CON(2), 6, GFLAGS), 353 RK2928_CLKGATE_CON(2), 6, GFLAGS),
354 354
355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, 355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede522269..21f3ea909fab 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), 780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
781 781
782 /* pclk_pd_alive gates */ 782 /* pclk_pd_alive gates */
783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), 783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), 784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), 785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), 786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), 787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), 788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), 789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
790 790
791 /* 791 /*
792 * pclk_vio gates 792 * pclk_vio gates
@@ -796,12 +796,12 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), 796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
797 797
798 /* pclk_pd_pmu gates */ 798 /* pclk_pd_pmu gates */
799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), 799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), 800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), 801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), 803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
805 805
806 /* timer gates */ 806 /* timer gates */
807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), 807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f35d45c..74e7544f861b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
450 struct emc_timing *timing = tegra->timings + (i++); 450 struct emc_timing *timing = tegra->timings + (i++);
451 451
452 err = load_one_timing_from_dt(tegra, timing, child); 452 err = load_one_timing_from_dt(tegra, timing, child);
453 if (err) 453 if (err) {
454 of_node_put(child);
454 return err; 455 return err;
456 }
455 457
456 timing->ram_code = ram_code; 458 timing->ram_code = ram_code;
457 } 459 }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
499 * fuses until the apbmisc driver is loaded. 501 * fuses until the apbmisc driver is loaded.
500 */ 502 */
501 err = load_timings_from_dt(tegra, node, node_ram_code); 503 err = load_timings_from_dt(tegra, node, node_ram_code);
504 of_node_put(node);
502 if (err) 505 if (err)
503 return ERR_PTR(err); 506 return ERR_PTR(err);
504 of_node_put(node);
505 break; 507 break;
506 } 508 }
507 509
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce0738ee76..62ea38187b71 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@ enum clk_id {
11 tegra_clk_afi, 11 tegra_clk_afi,
12 tegra_clk_amx, 12 tegra_clk_amx,
13 tegra_clk_amx1, 13 tegra_clk_amx1,
14 tegra_clk_apb2ape,
14 tegra_clk_apbdma, 15 tegra_clk_apbdma,
15 tegra_clk_apbif, 16 tegra_clk_apbif,
16 tegra_clk_ape, 17 tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfab30b3..6ac3f843e7ca 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\ 86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
87 PLLE_SS_CNTL_SSC_BYP) 87 PLLE_SS_CNTL_SSC_BYP)
88#define PLLE_SS_MAX_MASK 0x1ff 88#define PLLE_SS_MAX_MASK 0x1ff
89#define PLLE_SS_MAX_VAL 0x25 89#define PLLE_SS_MAX_VAL_TEGRA114 0x25
90#define PLLE_SS_MAX_VAL_TEGRA210 0x21
90#define PLLE_SS_INC_MASK (0xff << 16) 91#define PLLE_SS_INC_MASK (0xff << 16)
91#define PLLE_SS_INC_VAL (0x1 << 16) 92#define PLLE_SS_INC_VAL (0x1 << 16)
92#define PLLE_SS_INCINTRV_MASK (0x3f << 24) 93#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
93#define PLLE_SS_INCINTRV_VAL (0x20 << 24) 94#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
95#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
94#define PLLE_SS_COEFFICIENTS_MASK \ 96#define PLLE_SS_COEFFICIENTS_MASK \
95 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK) 97 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
96#define PLLE_SS_COEFFICIENTS_VAL \ 98#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
97 (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL) 99 (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
100 PLLE_SS_INCINTRV_VAL_TEGRA114)
101#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
102 (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
103 PLLE_SS_INCINTRV_VAL_TEGRA210)
98 104
99#define PLLE_AUX_PLLP_SEL BIT(2) 105#define PLLE_AUX_PLLP_SEL BIT(2)
100#define PLLE_AUX_USE_LOCKDET BIT(3) 106#define PLLE_AUX_USE_LOCKDET BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
880static int clk_plle_enable(struct clk_hw *hw) 886static int clk_plle_enable(struct clk_hw *hw)
881{ 887{
882 struct tegra_clk_pll *pll = to_clk_pll(hw); 888 struct tegra_clk_pll *pll = to_clk_pll(hw);
883 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 889 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
884 struct tegra_clk_pll_freq_table sel; 890 struct tegra_clk_pll_freq_table sel;
885 u32 val; 891 u32 val;
886 int err; 892 int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1378 u32 val; 1384 u32 val;
1379 int ret; 1385 int ret;
1380 unsigned long flags = 0; 1386 unsigned long flags = 0;
1381 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 1387 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
1382 1388
1383 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 1389 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
1384 return -EINVAL; 1390 return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1401 val |= PLLE_MISC_IDDQ_SW_CTRL; 1407 val |= PLLE_MISC_IDDQ_SW_CTRL;
1402 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 1408 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
1403 val |= PLLE_MISC_PLLE_PTS; 1409 val |= PLLE_MISC_PLLE_PTS;
1404 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 1410 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
1405 pll_writel_misc(val, pll); 1411 pll_writel_misc(val, pll);
1406 udelay(5); 1412 udelay(5);
1407 1413
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1428 val = pll_readl(PLLE_SS_CTRL, pll); 1434 val = pll_readl(PLLE_SS_CTRL, pll);
1429 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 1435 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
1430 val &= ~PLLE_SS_COEFFICIENTS_MASK; 1436 val &= ~PLLE_SS_COEFFICIENTS_MASK;
1431 val |= PLLE_SS_COEFFICIENTS_VAL; 1437 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
1432 pll_writel(val, PLLE_SS_CTRL, pll); 1438 pll_writel(val, PLLE_SS_CTRL, pll);
1433 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 1439 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
1434 pll_writel(val, PLLE_SS_CTRL, pll); 1440 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2012 struct tegra_clk_pll *pll = to_clk_pll(hw); 2018 struct tegra_clk_pll *pll = to_clk_pll(hw);
2013 struct tegra_clk_pll_freq_table sel; 2019 struct tegra_clk_pll_freq_table sel;
2014 u32 val; 2020 u32 val;
2015 int ret; 2021 int ret = 0;
2016 unsigned long flags = 0; 2022 unsigned long flags = 0;
2017 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 2023 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
2018 2024
2019 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 2025 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
2020 return -EINVAL; 2026 return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2022 if (pll->lock) 2028 if (pll->lock)
2023 spin_lock_irqsave(pll->lock, flags); 2029 spin_lock_irqsave(pll->lock, flags);
2024 2030
2031 val = pll_readl(pll->params->aux_reg, pll);
2032 if (val & PLLE_AUX_SEQ_ENABLE)
2033 goto out;
2034
2025 val = pll_readl_base(pll); 2035 val = pll_readl_base(pll);
2026 val &= ~BIT(30); /* Disable lock override */ 2036 val &= ~BIT(30); /* Disable lock override */
2027 pll_writel_base(val, pll); 2037 pll_writel_base(val, pll);
2028 2038
2029 val = pll_readl(pll->params->aux_reg, pll);
2030 val |= PLLE_AUX_ENABLE_SWCTL;
2031 val &= ~PLLE_AUX_SEQ_ENABLE;
2032 pll_writel(val, pll->params->aux_reg, pll);
2033 udelay(1);
2034
2035 val = pll_readl_misc(pll); 2039 val = pll_readl_misc(pll);
2036 val |= PLLE_MISC_LOCK_ENABLE; 2040 val |= PLLE_MISC_LOCK_ENABLE;
2037 val |= PLLE_MISC_IDDQ_SW_CTRL; 2041 val |= PLLE_MISC_IDDQ_SW_CTRL;
2038 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 2042 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
2039 val |= PLLE_MISC_PLLE_PTS; 2043 val |= PLLE_MISC_PLLE_PTS;
2040 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 2044 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
2041 pll_writel_misc(val, pll); 2045 pll_writel_misc(val, pll);
2042 udelay(5); 2046 udelay(5);
2043 2047
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2067 val = pll_readl(PLLE_SS_CTRL, pll); 2071 val = pll_readl(PLLE_SS_CTRL, pll);
2068 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 2072 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
2069 val &= ~PLLE_SS_COEFFICIENTS_MASK; 2073 val &= ~PLLE_SS_COEFFICIENTS_MASK;
2070 val |= PLLE_SS_COEFFICIENTS_VAL; 2074 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
2071 pll_writel(val, PLLE_SS_CTRL, pll); 2075 pll_writel(val, PLLE_SS_CTRL, pll);
2072 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 2076 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
2073 pll_writel(val, PLLE_SS_CTRL, pll); 2077 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
2104 if (pll->lock) 2108 if (pll->lock)
2105 spin_lock_irqsave(pll->lock, flags); 2109 spin_lock_irqsave(pll->lock, flags);
2106 2110
2111 /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
2112 val = pll_readl(pll->params->aux_reg, pll);
2113 if (val & PLLE_AUX_SEQ_ENABLE)
2114 goto out;
2115
2107 val = pll_readl_base(pll); 2116 val = pll_readl_base(pll);
2108 val &= ~PLLE_BASE_ENABLE; 2117 val &= ~PLLE_BASE_ENABLE;
2109 pll_writel_base(val, pll); 2118 pll_writel_base(val, pll);
2110 2119
2120 val = pll_readl(pll->params->aux_reg, pll);
2121 val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
2122 pll_writel(val, pll->params->aux_reg, pll);
2123
2111 val = pll_readl_misc(pll); 2124 val = pll_readl_misc(pll);
2112 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; 2125 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
2113 pll_writel_misc(val, pll); 2126 pll_writel_misc(val, pll);
2114 udelay(1); 2127 udelay(1);
2115 2128
2129out:
2116 if (pll->lock) 2130 if (pll->lock)
2117 spin_unlock_irqrestore(pll->lock, flags); 2131 spin_unlock_irqrestore(pll->lock, flags);
2118} 2132}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a888a6..ea2b9cbf9e70 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src), 773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8), 774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb), 775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
776 MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), 776 MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec), 777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), 778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), 779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), 782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), 783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), 784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
785 MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c), 785 I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif), 786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), 787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), 788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0), 829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), 830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), 831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
832 GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
832}; 833};
833 834
834static struct tegra_periph_init_data div_clks[] = { 835static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20e3af6..474de0f0c26d 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
67 "pll_p", "pll_p_out4", "unused", 67 "pll_p", "pll_p_out4", "unused",
68 "unused", "pll_x", "pll_x_out0" }; 68 "unused", "pll_x", "pll_x_out0" };
69 69
70const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { 70static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
71 .gen = gen4, 71 .gen = gen4,
72 .sclk_parents = sclk_parents, 72 .sclk_parents = sclk_parents,
73 .cclk_g_parents = cclk_g_parents, 73 .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
93 "unused", "unused", "unused", "unused", 93 "unused", "unused", "unused", "unused",
94 "dfllCPU_out" }; 94 "dfllCPU_out" };
95 95
96const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { 96static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
97 .gen = gen5, 97 .gen = gen5,
98 .sclk_parents = sclk_parents_gen5, 98 .sclk_parents = sclk_parents_gen5,
99 .cclk_g_parents = cclk_g_parents_gen5, 99 .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
171 *dt_clk = clk; 171 *dt_clk = clk;
172} 172}
173 173
174void __init tegra_super_clk_init(void __iomem *clk_base, 174static void __init tegra_super_clk_init(void __iomem *clk_base,
175 void __iomem *pmc_base, 175 void __iomem *pmc_base,
176 struct tegra_clk *tegra_clks, 176 struct tegra_clk *tegra_clks,
177 struct tegra_clk_pll_params *params, 177 struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c44ea83..637041fd53ad 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
59#define PLLC3_MISC3 0x50c 59#define PLLC3_MISC3 0x50c
60 60
61#define PLLM_BASE 0x90 61#define PLLM_BASE 0x90
62#define PLLM_MISC0 0x9c
63#define PLLM_MISC1 0x98 62#define PLLM_MISC1 0x98
63#define PLLM_MISC2 0x9c
64#define PLLP_BASE 0xa0 64#define PLLP_BASE 0xa0
65#define PLLP_MISC0 0xac 65#define PLLP_MISC0 0xac
66#define PLLP_MISC1 0x680 66#define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
99#define PLLC4_MISC0 0x5a8 99#define PLLC4_MISC0 0x5a8
100#define PLLC4_OUT 0x5e4 100#define PLLC4_OUT 0x5e4
101#define PLLMB_BASE 0x5e8 101#define PLLMB_BASE 0x5e8
102#define PLLMB_MISC0 0x5ec 102#define PLLMB_MISC1 0x5ec
103#define PLLA1_BASE 0x6a4 103#define PLLA1_BASE 0x6a4
104#define PLLA1_MISC0 0x6a8 104#define PLLA1_MISC0 0x6a8
105#define PLLA1_MISC1 0x6ac 105#define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
243}; 243};
244 244
245static const char *mux_pllmcp_clkm[] = { 245static const char *mux_pllmcp_clkm[] = {
246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
247 "pll_p",
247}; 248};
248#define mux_pllmcp_clkm_idx NULL 249#define mux_pllmcp_clkm_idx NULL
249 250
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
367/* PLLMB */ 368/* PLLMB */
368#define PLLMB_BASE_LOCK (1 << 27) 369#define PLLMB_BASE_LOCK (1 << 27)
369 370
370#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18) 371#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18)
371#define PLLMB_MISC0_IDDQ (1 << 17) 372#define PLLMB_MISC1_IDDQ (1 << 17)
372#define PLLMB_MISC0_LOCK_ENABLE (1 << 16) 373#define PLLMB_MISC1_LOCK_ENABLE (1 << 16)
373 374
374#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000 375#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000
375#define PLLMB_MISC0_WRITE_MASK 0x0007ffff 376#define PLLMB_MISC1_WRITE_MASK 0x0007ffff
376 377
377/* PLLP */ 378/* PLLP */
378#define PLLP_BASE_OVERRIDE (1 << 28) 379#define PLLP_BASE_OVERRIDE (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
457 PLLCX_MISC3_WRITE_MASK); 458 PLLCX_MISC3_WRITE_MASK);
458} 459}
459 460
460void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) 461static void tegra210_pllcx_set_defaults(const char *name,
462 struct tegra_clk_pll *pllcx)
461{ 463{
462 pllcx->params->defaults_set = true; 464 pllcx->params->defaults_set = true;
463 465
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
482 udelay(1); 484 udelay(1);
483} 485}
484 486
485void _pllc_set_defaults(struct tegra_clk_pll *pllcx) 487static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
486{ 488{
487 tegra210_pllcx_set_defaults("PLL_C", pllcx); 489 tegra210_pllcx_set_defaults("PLL_C", pllcx);
488} 490}
489 491
490void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) 492static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
491{ 493{
492 tegra210_pllcx_set_defaults("PLL_C2", pllcx); 494 tegra210_pllcx_set_defaults("PLL_C2", pllcx);
493} 495}
494 496
495void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) 497static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
496{ 498{
497 tegra210_pllcx_set_defaults("PLL_C3", pllcx); 499 tegra210_pllcx_set_defaults("PLL_C3", pllcx);
498} 500}
499 501
500void _plla1_set_defaults(struct tegra_clk_pll *pllcx) 502static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
501{ 503{
502 tegra210_pllcx_set_defaults("PLL_A1", pllcx); 504 tegra210_pllcx_set_defaults("PLL_A1", pllcx);
503} 505}
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
507 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. 509 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
508 * Fractional SDM is allowed to provide exact audio rates. 510 * Fractional SDM is allowed to provide exact audio rates.
509 */ 511 */
510void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) 512static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
511{ 513{
512 u32 mask; 514 u32 mask;
513 u32 val = readl_relaxed(clk_base + plla->params->base_reg); 515 u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
559 * PLLD 561 * PLLD
560 * PLL with fractional SDM. 562 * PLL with fractional SDM.
561 */ 563 */
562void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) 564static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
563{ 565{
564 u32 val; 566 u32 val;
565 u32 mask = 0xffff; 567 u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
698 udelay(1); 700 udelay(1);
699} 701}
700 702
701void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) 703static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
702{ 704{
703 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, 705 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
704 PLLD2_MISC1_CFG_DEFAULT_VALUE, 706 PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
706 PLLD2_MISC3_CTRL2_DEFAULT_VALUE); 708 PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
707} 709}
708 710
709void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) 711static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
710{ 712{
711 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, 713 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
712 PLLDP_MISC1_CFG_DEFAULT_VALUE, 714 PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
719 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. 721 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
720 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. 722 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
721 */ 723 */
722void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) 724static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
723{ 725{
724 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); 726 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
725} 727}
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
728 * PLLRE 730 * PLLRE
729 * VCO is exposed to the clock tree directly along with post-divider output 731 * VCO is exposed to the clock tree directly along with post-divider output
730 */ 732 */
731void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) 733static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
732{ 734{
733 u32 mask; 735 u32 mask;
734 u32 val = readl_relaxed(clk_base + pllre->params->base_reg); 736 u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
780{ 782{
781 unsigned long input_rate; 783 unsigned long input_rate;
782 784
783 if (!IS_ERR_OR_NULL(hw->clk)) { 785 /* cf rate */
786 if (!IS_ERR_OR_NULL(hw->clk))
784 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 787 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
785 /* cf rate */ 788 else
786 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
787 } else {
788 input_rate = 38400000; 789 input_rate = 38400000;
789 } 790
791 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
790 792
791 switch (input_rate) { 793 switch (input_rate) {
792 case 12000000: 794 case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
841 PLLX_MISC5_WRITE_MASK); 843 PLLX_MISC5_WRITE_MASK);
842} 844}
843 845
844void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) 846static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
845{ 847{
846 u32 val; 848 u32 val;
847 u32 step_a, step_b; 849 u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
901} 903}
902 904
903/* PLLMB */ 905/* PLLMB */
904void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) 906static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
905{ 907{
906 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); 908 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
907 909
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
914 * PLL is ON: check if defaults already set, then set those 916 * PLL is ON: check if defaults already set, then set those
915 * that can be updated in flight. 917 * that can be updated in flight.
916 */ 918 */
917 val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ); 919 val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
918 mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE; 920 mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
919 _pll_misc_chk_default(clk_base, pllmb->params, 0, val, 921 _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
920 ~mask & PLLMB_MISC0_WRITE_MASK); 922 ~mask & PLLMB_MISC1_WRITE_MASK);
921 923
922 /* Enable lock detect */ 924 /* Enable lock detect */
923 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); 925 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
924 val &= ~mask; 926 val &= ~mask;
925 val |= PLLMB_MISC0_DEFAULT_VALUE & mask; 927 val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
926 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); 928 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
927 udelay(1); 929 udelay(1);
928 930
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
930 } 932 }
931 933
932 /* set IDDQ, enable lock detect */ 934 /* set IDDQ, enable lock detect */
933 writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE, 935 writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
934 clk_base + pllmb->params->ext_misc_reg[0]); 936 clk_base + pllmb->params->ext_misc_reg[0]);
935 udelay(1); 937 udelay(1);
936} 938}
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
960 ~mask & PLLP_MISC1_WRITE_MASK); 962 ~mask & PLLP_MISC1_WRITE_MASK);
961} 963}
962 964
963void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) 965static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
964{ 966{
965 u32 mask; 967 u32 mask;
966 u32 val = readl_relaxed(clk_base + pllp->params->base_reg); 968 u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
1022 ~mask & PLLU_MISC1_WRITE_MASK); 1024 ~mask & PLLU_MISC1_WRITE_MASK);
1023} 1025}
1024 1026
1025void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) 1027static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
1026{ 1028{
1027 u32 val = readl_relaxed(clk_base + pllu->params->base_reg); 1029 u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
1028 1030
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
1212 cfg->m *= PLL_SDM_COEFF; 1214 cfg->m *= PLL_SDM_COEFF;
1213} 1215}
1214 1216
1215unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, 1217static unsigned long
1216 unsigned long parent_rate) 1218tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
1219 unsigned long parent_rate)
1217{ 1220{
1218 unsigned long vco_min = params->vco_min; 1221 unsigned long vco_min = params->vco_min;
1219 1222
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
1386 .mdiv_default = 3, 1389 .mdiv_default = 3,
1387 .div_nmp = &pllc_nmp, 1390 .div_nmp = &pllc_nmp,
1388 .freq_table = pll_cx_freq_table, 1391 .freq_table = pll_cx_freq_table,
1389 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1392 .flags = TEGRA_PLL_USE_LOCK,
1390 .set_defaults = _pllc_set_defaults, 1393 .set_defaults = _pllc_set_defaults,
1391 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1394 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1392}; 1395};
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
1425 .ext_misc_reg[2] = PLLC2_MISC2, 1428 .ext_misc_reg[2] = PLLC2_MISC2,
1426 .ext_misc_reg[3] = PLLC2_MISC3, 1429 .ext_misc_reg[3] = PLLC2_MISC3,
1427 .freq_table = pll_cx_freq_table, 1430 .freq_table = pll_cx_freq_table,
1428 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1431 .flags = TEGRA_PLL_USE_LOCK,
1429 .set_defaults = _pllc2_set_defaults, 1432 .set_defaults = _pllc2_set_defaults,
1430 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1433 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1431}; 1434};
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
1455 .ext_misc_reg[2] = PLLC3_MISC2, 1458 .ext_misc_reg[2] = PLLC3_MISC2,
1456 .ext_misc_reg[3] = PLLC3_MISC3, 1459 .ext_misc_reg[3] = PLLC3_MISC3,
1457 .freq_table = pll_cx_freq_table, 1460 .freq_table = pll_cx_freq_table,
1458 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1461 .flags = TEGRA_PLL_USE_LOCK,
1459 .set_defaults = _pllc3_set_defaults, 1462 .set_defaults = _pllc3_set_defaults,
1460 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1463 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1461}; 1464};
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1505 .base_reg = PLLC4_BASE, 1508 .base_reg = PLLC4_BASE,
1506 .misc_reg = PLLC4_MISC0, 1509 .misc_reg = PLLC4_MISC0,
1507 .lock_mask = PLL_BASE_LOCK, 1510 .lock_mask = PLL_BASE_LOCK,
1508 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1509 .lock_delay = 300, 1511 .lock_delay = 300,
1510 .max_p = PLL_QLIN_PDIV_MAX, 1512 .max_p = PLL_QLIN_PDIV_MAX,
1511 .ext_misc_reg[0] = PLLC4_MISC0, 1513 .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1517 .div_nmp = &pllss_nmp, 1519 .div_nmp = &pllss_nmp,
1518 .freq_table = pll_c4_vco_freq_table, 1520 .freq_table = pll_c4_vco_freq_table,
1519 .set_defaults = tegra210_pllc4_set_defaults, 1521 .set_defaults = tegra210_pllc4_set_defaults,
1520 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1522 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1521 TEGRA_PLL_VCO_OUT,
1522 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1523 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1523}; 1524};
1524 1525
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
1559 .vco_min = 800000000, 1560 .vco_min = 800000000,
1560 .vco_max = 1866000000, 1561 .vco_max = 1866000000,
1561 .base_reg = PLLM_BASE, 1562 .base_reg = PLLM_BASE,
1562 .misc_reg = PLLM_MISC1, 1563 .misc_reg = PLLM_MISC2,
1563 .lock_mask = PLL_BASE_LOCK, 1564 .lock_mask = PLL_BASE_LOCK,
1564 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, 1565 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
1565 .lock_delay = 300, 1566 .lock_delay = 300,
1566 .iddq_reg = PLLM_MISC0, 1567 .iddq_reg = PLLM_MISC2,
1567 .iddq_bit_idx = PLLM_IDDQ_BIT, 1568 .iddq_bit_idx = PLLM_IDDQ_BIT,
1568 .max_p = PLL_QLIN_PDIV_MAX, 1569 .max_p = PLL_QLIN_PDIV_MAX,
1569 .ext_misc_reg[0] = PLLM_MISC0, 1570 .ext_misc_reg[0] = PLLM_MISC2,
1570 .ext_misc_reg[0] = PLLM_MISC1, 1571 .ext_misc_reg[1] = PLLM_MISC1,
1571 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1572 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1572 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1573 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1573 .div_nmp = &pllm_nmp, 1574 .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
1586 .vco_min = 800000000, 1587 .vco_min = 800000000,
1587 .vco_max = 1866000000, 1588 .vco_max = 1866000000,
1588 .base_reg = PLLMB_BASE, 1589 .base_reg = PLLMB_BASE,
1589 .misc_reg = PLLMB_MISC0, 1590 .misc_reg = PLLMB_MISC1,
1590 .lock_mask = PLL_BASE_LOCK, 1591 .lock_mask = PLL_BASE_LOCK,
1591 .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
1592 .lock_delay = 300, 1592 .lock_delay = 300,
1593 .iddq_reg = PLLMB_MISC0, 1593 .iddq_reg = PLLMB_MISC1,
1594 .iddq_bit_idx = PLLMB_IDDQ_BIT, 1594 .iddq_bit_idx = PLLMB_IDDQ_BIT,
1595 .max_p = PLL_QLIN_PDIV_MAX, 1595 .max_p = PLL_QLIN_PDIV_MAX,
1596 .ext_misc_reg[0] = PLLMB_MISC0, 1596 .ext_misc_reg[0] = PLLMB_MISC1,
1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1598 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1598 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1599 .div_nmp = &pllm_nmp, 1599 .div_nmp = &pllm_nmp,
1600 .freq_table = pll_m_freq_table, 1600 .freq_table = pll_m_freq_table,
1601 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1601 .flags = TEGRA_PLL_USE_LOCK,
1602 .set_defaults = tegra210_pllmb_set_defaults, 1602 .set_defaults = tegra210_pllmb_set_defaults,
1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1604}; 1604};
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1671 .base_reg = PLLRE_BASE, 1671 .base_reg = PLLRE_BASE,
1672 .misc_reg = PLLRE_MISC0, 1672 .misc_reg = PLLRE_MISC0,
1673 .lock_mask = PLLRE_MISC_LOCK, 1673 .lock_mask = PLLRE_MISC_LOCK,
1674 .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
1675 .lock_delay = 300, 1674 .lock_delay = 300,
1676 .max_p = PLL_QLIN_PDIV_MAX, 1675 .max_p = PLL_QLIN_PDIV_MAX,
1677 .ext_misc_reg[0] = PLLRE_MISC0, 1676 .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1681 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1680 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1682 .div_nmp = &pllre_nmp, 1681 .div_nmp = &pllre_nmp,
1683 .freq_table = pll_re_vco_freq_table, 1682 .freq_table = pll_re_vco_freq_table,
1684 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | 1683 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
1685 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1686 .set_defaults = tegra210_pllre_set_defaults, 1684 .set_defaults = tegra210_pllre_set_defaults,
1687 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1685 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1688}; 1686};
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
1712 .base_reg = PLLP_BASE, 1710 .base_reg = PLLP_BASE,
1713 .misc_reg = PLLP_MISC0, 1711 .misc_reg = PLLP_MISC0,
1714 .lock_mask = PLL_BASE_LOCK, 1712 .lock_mask = PLL_BASE_LOCK,
1715 .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
1716 .lock_delay = 300, 1713 .lock_delay = 300,
1717 .iddq_reg = PLLP_MISC0, 1714 .iddq_reg = PLLP_MISC0,
1718 .iddq_bit_idx = PLLXP_IDDQ_BIT, 1715 .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
1721 .div_nmp = &pllp_nmp, 1718 .div_nmp = &pllp_nmp,
1722 .freq_table = pll_p_freq_table, 1719 .freq_table = pll_p_freq_table,
1723 .fixed_rate = 408000000, 1720 .fixed_rate = 408000000,
1724 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | 1721 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1725 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1726 .set_defaults = tegra210_pllp_set_defaults, 1722 .set_defaults = tegra210_pllp_set_defaults,
1727 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1723 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1728}; 1724};
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
1750 .ext_misc_reg[2] = PLLA1_MISC2, 1746 .ext_misc_reg[2] = PLLA1_MISC2,
1751 .ext_misc_reg[3] = PLLA1_MISC3, 1747 .ext_misc_reg[3] = PLLA1_MISC3,
1752 .freq_table = pll_cx_freq_table, 1748 .freq_table = pll_cx_freq_table,
1753 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1749 .flags = TEGRA_PLL_USE_LOCK,
1754 .set_defaults = _plla1_set_defaults, 1750 .set_defaults = _plla1_set_defaults,
1755 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1751 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1756}; 1752};
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
1787 .base_reg = PLLA_BASE, 1783 .base_reg = PLLA_BASE,
1788 .misc_reg = PLLA_MISC0, 1784 .misc_reg = PLLA_MISC0,
1789 .lock_mask = PLL_BASE_LOCK, 1785 .lock_mask = PLL_BASE_LOCK,
1790 .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
1791 .lock_delay = 300, 1786 .lock_delay = 300,
1792 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1787 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1793 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1788 .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
1802 .ext_misc_reg[1] = PLLA_MISC1, 1797 .ext_misc_reg[1] = PLLA_MISC1,
1803 .ext_misc_reg[2] = PLLA_MISC2, 1798 .ext_misc_reg[2] = PLLA_MISC2,
1804 .freq_table = pll_a_freq_table, 1799 .freq_table = pll_a_freq_table,
1805 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW | 1800 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
1806 TEGRA_PLL_HAS_LOCK_ENABLE,
1807 .set_defaults = tegra210_plla_set_defaults, 1801 .set_defaults = tegra210_plla_set_defaults,
1808 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1802 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1809 .set_gain = tegra210_clk_pll_set_gain, 1803 .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
1836 .base_reg = PLLD_BASE, 1830 .base_reg = PLLD_BASE,
1837 .misc_reg = PLLD_MISC0, 1831 .misc_reg = PLLD_MISC0,
1838 .lock_mask = PLL_BASE_LOCK, 1832 .lock_mask = PLL_BASE_LOCK,
1839 .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
1840 .lock_delay = 1000, 1833 .lock_delay = 1000,
1841 .iddq_reg = PLLD_MISC0, 1834 .iddq_reg = PLLD_MISC0,
1842 .iddq_bit_idx = PLLD_IDDQ_BIT, 1835 .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
1850 .ext_misc_reg[0] = PLLD_MISC0, 1843 .ext_misc_reg[0] = PLLD_MISC0,
1851 .ext_misc_reg[1] = PLLD_MISC1, 1844 .ext_misc_reg[1] = PLLD_MISC1,
1852 .freq_table = pll_d_freq_table, 1845 .freq_table = pll_d_freq_table,
1853 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1846 .flags = TEGRA_PLL_USE_LOCK,
1854 .mdiv_default = 1, 1847 .mdiv_default = 1,
1855 .set_defaults = tegra210_plld_set_defaults, 1848 .set_defaults = tegra210_plld_set_defaults,
1856 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1849 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
1876 .base_reg = PLLD2_BASE, 1869 .base_reg = PLLD2_BASE,
1877 .misc_reg = PLLD2_MISC0, 1870 .misc_reg = PLLD2_MISC0,
1878 .lock_mask = PLL_BASE_LOCK, 1871 .lock_mask = PLL_BASE_LOCK,
1879 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1880 .lock_delay = 300, 1872 .lock_delay = 300,
1881 .iddq_reg = PLLD2_BASE, 1873 .iddq_reg = PLLD2_BASE,
1882 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1874 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
1897 .mdiv_default = 1, 1889 .mdiv_default = 1,
1898 .freq_table = tegra210_pll_d2_freq_table, 1890 .freq_table = tegra210_pll_d2_freq_table,
1899 .set_defaults = tegra210_plld2_set_defaults, 1891 .set_defaults = tegra210_plld2_set_defaults,
1900 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1892 .flags = TEGRA_PLL_USE_LOCK,
1901 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1893 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1902 .set_gain = tegra210_clk_pll_set_gain, 1894 .set_gain = tegra210_clk_pll_set_gain,
1903 .adjust_vco = tegra210_clk_adjust_vco_min, 1895 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
1920 .base_reg = PLLDP_BASE, 1912 .base_reg = PLLDP_BASE,
1921 .misc_reg = PLLDP_MISC, 1913 .misc_reg = PLLDP_MISC,
1922 .lock_mask = PLL_BASE_LOCK, 1914 .lock_mask = PLL_BASE_LOCK,
1923 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1924 .lock_delay = 300, 1915 .lock_delay = 300,
1925 .iddq_reg = PLLDP_BASE, 1916 .iddq_reg = PLLDP_BASE,
1926 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1917 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
1941 .mdiv_default = 1, 1932 .mdiv_default = 1,
1942 .freq_table = pll_dp_freq_table, 1933 .freq_table = pll_dp_freq_table,
1943 .set_defaults = tegra210_plldp_set_defaults, 1934 .set_defaults = tegra210_plldp_set_defaults,
1944 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1935 .flags = TEGRA_PLL_USE_LOCK,
1945 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1936 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1946 .set_gain = tegra210_clk_pll_set_gain, 1937 .set_gain = tegra210_clk_pll_set_gain,
1947 .adjust_vco = tegra210_clk_adjust_vco_min, 1938 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1973 .base_reg = PLLU_BASE, 1964 .base_reg = PLLU_BASE,
1974 .misc_reg = PLLU_MISC0, 1965 .misc_reg = PLLU_MISC0,
1975 .lock_mask = PLL_BASE_LOCK, 1966 .lock_mask = PLL_BASE_LOCK,
1976 .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
1977 .lock_delay = 1000, 1967 .lock_delay = 1000,
1978 .iddq_reg = PLLU_MISC0, 1968 .iddq_reg = PLLU_MISC0,
1979 .iddq_bit_idx = PLLU_IDDQ_BIT, 1969 .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1983 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1973 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1984 .div_nmp = &pllu_nmp, 1974 .div_nmp = &pllu_nmp,
1985 .freq_table = pll_u_freq_table, 1975 .freq_table = pll_u_freq_table,
1986 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1976 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1987 TEGRA_PLL_VCO_OUT,
1988 .set_defaults = tegra210_pllu_set_defaults, 1977 .set_defaults = tegra210_pllu_set_defaults,
1989 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1978 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1990}; 1979};
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
2218 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, 2207 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
2219 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, 2208 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
2220 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, 2209 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
2210 [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
2221}; 2211};
2222 2212
2223static struct tegra_devclk devclks[] __initdata = { 2213static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
2519 2509
2520 /* PLLU_VCO */ 2510 /* PLLU_VCO */
2521 val = readl(clk_base + pll_u_vco_params.base_reg); 2511 val = readl(clk_base + pll_u_vco_params.base_reg);
2522 val &= ~BIT(24); /* disable PLLU_OVERRIDE */ 2512 val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
2523 writel(val, clk_base + pll_u_vco_params.base_reg); 2513 writel(val, clk_base + pll_u_vco_params.base_reg);
2524 2514
2525 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, 2515 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2738 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, 2728 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
2739 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, 2729 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
2740 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, 2730 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
2741 { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
2742 { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2743 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, 2731 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
2744 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, 2732 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
2745 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, 2733 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb2c9b5..3bca438ecd19 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
78 ret = regmap_read(icst->map, icst->vcoreg_off, &val); 78 ret = regmap_read(icst->map, icst->vcoreg_off, &val);
79 if (ret) 79 if (ret)
80 return ret; 80 return ret;
81
82 /* Mask the 18 bits used by the VCO */
83 val &= ~0x7ffff;
81 val |= vco.v | (vco.r << 9) | (vco.s << 16); 84 val |= vco.v | (vco.r << 9) | (vco.s << 16);
82 85
83 /* This magic unlocks the VCO so it can be controlled */ 86 /* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 159 channel_set_bit(dw, MASK.ERROR, dwc->mask);
161 160
162 dwc->initialized = true; 161 dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588 587
589 spin_unlock_irqrestore(&dwc->lock, flags); 588 spin_unlock_irqrestore(&dwc->lock, flags);
590 } 589 }
590
591 /* Re-enable interrupts */
592 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591} 593}
592 594
593/* ------------------------------------------------------------------------- */ 595/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
619 } 621 }
620 622
621 /* 623 /* Re-enable interrupts */
622 * Re-enable interrupts.
623 */
624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627} 626}
628 627
@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1261int dw_dma_cyclic_start(struct dma_chan *chan) 1260int dw_dma_cyclic_start(struct dma_chan *chan)
1262{ 1261{
1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1262 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263 struct dw_dma *dw = to_dw_dma(chan->device);
1264 unsigned long flags; 1264 unsigned long flags;
1265 1265
1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1269 } 1269 }
1270 1270
1271 spin_lock_irqsave(&dwc->lock, flags); 1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 /* Enable interrupts to perform cyclic transfer */
1274 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275
1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277
1273 spin_unlock_irqrestore(&dwc->lock, flags); 1278 spin_unlock_irqrestore(&dwc->lock, flags);
1274 1279
1275 return 0; 1280 return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108 108
109 /* Haswell */ 109 /* Haswell */
110 { PCI_VDEVICE(INTEL, 0x9c60) }, 110 { PCI_VDEVICE(INTEL, 0x9c60) },
111
112 /* Broadwell */
113 { PCI_VDEVICE(INTEL, 0x9ce0) },
114
111 { } 115 { }
112}; 116};
113MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 117MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d65549406..e3d7fcb69b4c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24) 114#define CHMAP_EXIST BIT(24)
115 115
116/* CCSTAT register */
117#define EDMA_CCSTAT_ACTV BIT(4)
118
116/* 119/*
117 * Max of 20 segments per channel to conserve PaRAM slots 120 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods 121 * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1681} 1684}
1682 1685
1686/*
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1692 */
1693#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694
1683static u32 edma_residue(struct edma_desc *edesc) 1695static u32 edma_residue(struct edma_desc *edesc)
1684{ 1696{
1685 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1686 struct edma_pset *pset = edesc->pset; 1700 struct edma_pset *pset = edesc->pset;
1687 dma_addr_t done, pos; 1701 dma_addr_t done, pos;
1688 int i; 1702 int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
1691 * We always read the dst/src position from the first RamPar 1705 * We always read the dst/src position from the first RamPar
1692 * pset. That's the one which is active now. 1706 * pset. That's the one which is active now.
1693 */ 1707 */
1694 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709
1710 /*
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1717 */
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1722 break;
1723 }
1724
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1728 __func__);
1729 break;
1730 }
1731
1732 cpu_relax();
1733 }
1695 1734
1696 /* 1735 /*
1697 * Cyclic is simple. Just subtract pset[0].addr from pos. 1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..21539d5c54c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861 return; 861 return;
862 } 862 }
863 863
864 spin_lock_bh(&ioat_chan->cleanup_lock);
865
866 /* handle the no-actives case */
867 if (!ioat_ring_active(ioat_chan)) {
868 spin_lock_bh(&ioat_chan->prep_lock);
869 check_active(ioat_chan);
870 spin_unlock_bh(&ioat_chan->prep_lock);
871 spin_unlock_bh(&ioat_chan->cleanup_lock);
872 return;
873 }
874
864 /* if we haven't made progress and we have already 875 /* if we haven't made progress and we have already
865 * acknowledged a pending completion once, then be more 876 * acknowledged a pending completion once, then be more
866 * forceful with a restart 877 * forceful with a restart
867 */ 878 */
868 spin_lock_bh(&ioat_chan->cleanup_lock);
869 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870 __cleanup(ioat_chan, phys_complete); 880 __cleanup(ioat_chan, phys_complete);
871 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882 u32 chanerr;
883
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887 status, chanerr);
888 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889 ioat_ring_active(ioat_chan));
890
872 spin_lock_bh(&ioat_chan->prep_lock); 891 spin_lock_bh(&ioat_chan->prep_lock);
873 ioat_restart_channel(ioat_chan); 892 ioat_restart_channel(ioat_chan);
874 spin_unlock_bh(&ioat_chan->prep_lock); 893 spin_unlock_bh(&ioat_chan->prep_lock);
875 spin_unlock_bh(&ioat_chan->cleanup_lock); 894 spin_unlock_bh(&ioat_chan->cleanup_lock);
876 return; 895 return;
877 } else { 896 } else
878 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880 }
881
882 898
883 if (ioat_ring_active(ioat_chan)) 899 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 else {
886 spin_lock_bh(&ioat_chan->prep_lock);
887 check_active(ioat_chan);
888 spin_unlock_bh(&ioat_chan->prep_lock);
889 }
890 spin_unlock_bh(&ioat_chan->cleanup_lock); 900 spin_unlock_bh(&ioat_chan->cleanup_lock);
891} 901}
892 902
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 756eca8c4cf8..10e6774ab2a2 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
221 } 221 }
222 222
223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
224 efivar_validate(name, data, size) == false) { 224 efivar_validate(vendor, name, data, size) == false) {
225 printk(KERN_ERR "efivars: Malformed variable content\n"); 225 printk(KERN_ERR "efivars: Malformed variable content\n");
226 return -EINVAL; 226 return -EINVAL;
227 } 227 }
@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
447 } 447 }
448 448
449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
450 efivar_validate(name, data, size) == false) { 450 efivar_validate(new_var->VendorGuid, name, data,
451 size) == false) {
451 printk(KERN_ERR "efivars: Malformed variable content\n"); 452 printk(KERN_ERR "efivars: Malformed variable content\n");
452 return -EINVAL; 453 return -EINVAL;
453 } 454 }
@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
540static int 541static int
541efivar_create_sysfs_entry(struct efivar_entry *new_var) 542efivar_create_sysfs_entry(struct efivar_entry *new_var)
542{ 543{
543 int i, short_name_size; 544 int short_name_size;
544 char *short_name; 545 char *short_name;
545 unsigned long variable_name_size; 546 unsigned long utf8_name_size;
546 efi_char16_t *variable_name; 547 efi_char16_t *variable_name = new_var->var.VariableName;
547 int ret; 548 int ret;
548 549
549 variable_name = new_var->var.VariableName;
550 variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
551
552 /* 550 /*
553 * Length of the variable bytes in ASCII, plus the '-' separator, 551 * Length of the variable bytes in UTF8, plus the '-' separator,
554 * plus the GUID, plus trailing NUL 552 * plus the GUID, plus trailing NUL
555 */ 553 */
556 short_name_size = variable_name_size / sizeof(efi_char16_t) 554 utf8_name_size = ucs2_utf8size(variable_name);
557 + 1 + EFI_VARIABLE_GUID_LEN + 1; 555 short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
558
559 short_name = kzalloc(short_name_size, GFP_KERNEL);
560 556
557 short_name = kmalloc(short_name_size, GFP_KERNEL);
561 if (!short_name) 558 if (!short_name)
562 return -ENOMEM; 559 return -ENOMEM;
563 560
564 /* Convert Unicode to normal chars (assume top bits are 0), 561 ucs2_as_utf8(short_name, variable_name, short_name_size);
565 ala UTF-8 */ 562
566 for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
567 short_name[i] = variable_name[i] & 0xFF;
568 }
569 /* This is ugly, but necessary to separate one vendor's 563 /* This is ugly, but necessary to separate one vendor's
570 private variables from another's. */ 564 private variables from another's. */
571 565 short_name[utf8_name_size] = '-';
572 *(short_name + strlen(short_name)) = '-';
573 efi_guid_to_str(&new_var->var.VendorGuid, 566 efi_guid_to_str(&new_var->var.VendorGuid,
574 short_name + strlen(short_name)); 567 short_name + utf8_name_size + 1);
575 568
576 new_var->kobj.kset = efivars_kset; 569 new_var->kobj.kset = efivars_kset;
577 570
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 70a0fb10517f..7f2ea21c730d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
165} 165}
166 166
167struct variable_validate { 167struct variable_validate {
168 efi_guid_t vendor;
168 char *name; 169 char *name;
169 bool (*validate)(efi_char16_t *var_name, int match, u8 *data, 170 bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
170 unsigned long len); 171 unsigned long len);
171}; 172};
172 173
174/*
175 * This is the list of variables we need to validate, as well as the
176 * whitelist for what we think is safe not to default to immutable.
177 *
178 * If it has a validate() method that's not NULL, it'll go into the
179 * validation routine. If not, it is assumed valid, but still used for
180 * whitelisting.
181 *
182 * Note that it's sorted by {vendor,name}, but globbed names must come after
183 * any other name with the same prefix.
184 */
173static const struct variable_validate variable_validate[] = { 185static const struct variable_validate variable_validate[] = {
174 { "BootNext", validate_uint16 }, 186 { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
175 { "BootOrder", validate_boot_order }, 187 { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
176 { "DriverOrder", validate_boot_order }, 188 { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
177 { "Boot*", validate_load_option }, 189 { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
178 { "Driver*", validate_load_option }, 190 { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
179 { "ConIn", validate_device_path }, 191 { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
180 { "ConInDev", validate_device_path }, 192 { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
181 { "ConOut", validate_device_path }, 193 { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
182 { "ConOutDev", validate_device_path }, 194 { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
183 { "ErrOut", validate_device_path }, 195 { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
184 { "ErrOutDev", validate_device_path }, 196 { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
185 { "Timeout", validate_uint16 }, 197 { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
186 { "Lang", validate_ascii_string }, 198 { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
187 { "PlatformLang", validate_ascii_string }, 199 { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
188 { "", NULL }, 200 { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
201 { LINUX_EFI_CRASH_GUID, "*", NULL },
202 { NULL_GUID, "", NULL },
189}; 203};
190 204
205static bool
206variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match)
208{
209 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match];
211 char u = var_name[*match];
212
213 /* Wildcard in the matching name means we've matched */
214 if (c == '*')
215 return true;
216
217 /* Case sensitive match */
218 if (!c && *match == len)
219 return true;
220
221 if (c != u)
222 return false;
223
224 if (!c)
225 return true;
226 }
227 return true;
228}
229
191bool 230bool
192efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) 231efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
232 unsigned long data_size)
193{ 233{
194 int i; 234 int i;
195 u16 *unicode_name = var_name; 235 unsigned long utf8_size;
236 u8 *utf8_name;
196 237
197 for (i = 0; variable_validate[i].validate != NULL; i++) { 238 utf8_size = ucs2_utf8size(var_name);
198 const char *name = variable_validate[i].name; 239 utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
199 int match; 240 if (!utf8_name)
241 return false;
200 242
201 for (match = 0; ; match++) { 243 ucs2_as_utf8(utf8_name, var_name, utf8_size);
202 char c = name[match]; 244 utf8_name[utf8_size] = '\0';
203 u16 u = unicode_name[match];
204 245
205 /* All special variables are plain ascii */ 246 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
206 if (u > 127) 247 const char *name = variable_validate[i].name;
207 return true; 248 int match = 0;
208 249
209 /* Wildcard in the matching name means we've matched */ 250 if (efi_guidcmp(vendor, variable_validate[i].vendor))
210 if (c == '*') 251 continue;
211 return variable_validate[i].validate(var_name,
212 match, data, len);
213 252
214 /* Case sensitive match */ 253 if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
215 if (c != u) 254 if (variable_validate[i].validate == NULL)
216 break; 255 break;
217 256 kfree(utf8_name);
218 /* Reached the end of the string while matching */ 257 return variable_validate[i].validate(var_name, match,
219 if (!c) 258 data, data_size);
220 return variable_validate[i].validate(var_name,
221 match, data, len);
222 } 259 }
223 } 260 }
224 261 kfree(utf8_name);
225 return true; 262 return true;
226} 263}
227EXPORT_SYMBOL_GPL(efivar_validate); 264EXPORT_SYMBOL_GPL(efivar_validate);
228 265
266bool
267efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
268 size_t len)
269{
270 int i;
271 bool found = false;
272 int match = 0;
273
274 /*
275 * Check if our variable is in the validated variables list
276 */
277 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
278 if (efi_guidcmp(variable_validate[i].vendor, vendor))
279 continue;
280
281 if (variable_matches(var_name, len,
282 variable_validate[i].name, &match)) {
283 found = true;
284 break;
285 }
286 }
287
288 /*
289 * If it's in our list, it is removable.
290 */
291 return found;
292}
293EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
294
229static efi_status_t 295static efi_status_t
230check_var_size(u32 attributes, unsigned long size) 296check_var_size(u32 attributes, unsigned long size)
231{ 297{
@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
852 918
853 *set = false; 919 *set = false;
854 920
855 if (efivar_validate(name, data, *size) == false) 921 if (efivar_validate(*vendor, name, data, *size) == false)
856 return -EINVAL; 922 return -EINVAL;
857 923
858 /* 924 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6442a06d6fdc..1cbb16e15307 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
712 0, PAGE_SIZE, 712 0, PAGE_SIZE,
713 PCI_DMA_BIDIRECTIONAL); 713 PCI_DMA_BIDIRECTIONAL);
714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { 714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
715 while (--i) { 715 while (i--) {
716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
718 gtt->ttm.dma_address[i] = 0; 718 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3f74193885f1..9a7b44616b55 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
65 */ 65 */
66 state->allow_modeset = true; 66 state->allow_modeset = true;
67 67
68 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
69
70 state->crtcs = kcalloc(dev->mode_config.num_crtc, 68 state->crtcs = kcalloc(dev->mode_config.num_crtc,
71 sizeof(*state->crtcs), GFP_KERNEL); 69 sizeof(*state->crtcs), GFP_KERNEL);
72 if (!state->crtcs) 70 if (!state->crtcs)
@@ -83,16 +81,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
83 sizeof(*state->plane_states), GFP_KERNEL); 81 sizeof(*state->plane_states), GFP_KERNEL);
84 if (!state->plane_states) 82 if (!state->plane_states)
85 goto fail; 83 goto fail;
86 state->connectors = kcalloc(state->num_connector,
87 sizeof(*state->connectors),
88 GFP_KERNEL);
89 if (!state->connectors)
90 goto fail;
91 state->connector_states = kcalloc(state->num_connector,
92 sizeof(*state->connector_states),
93 GFP_KERNEL);
94 if (!state->connector_states)
95 goto fail;
96 84
97 state->dev = dev; 85 state->dev = dev;
98 86
@@ -823,19 +811,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
823 811
824 index = drm_connector_index(connector); 812 index = drm_connector_index(connector);
825 813
826 /*
827 * Construction of atomic state updates can race with a connector
828 * hot-add which might overflow. In this case flip the table and just
829 * restart the entire ioctl - no one is fast enough to livelock a cpu
830 * with physical hotplug events anyway.
831 *
832 * Note that we only grab the indexes once we have the right lock to
833 * prevent hotplug/unplugging of connectors. So removal is no problem,
834 * at most the array is a bit too large.
835 */
836 if (index >= state->num_connector) { 814 if (index >= state->num_connector) {
837 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); 815 struct drm_connector **c;
838 return ERR_PTR(-EAGAIN); 816 struct drm_connector_state **cs;
817 int alloc = max(index + 1, config->num_connector);
818
819 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
820 if (!c)
821 return ERR_PTR(-ENOMEM);
822
823 state->connectors = c;
824 memset(&state->connectors[state->num_connector], 0,
825 sizeof(*state->connectors) * (alloc - state->num_connector));
826
827 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
828 if (!cs)
829 return ERR_PTR(-ENOMEM);
830
831 state->connector_states = cs;
832 memset(&state->connector_states[state->num_connector], 0,
833 sizeof(*state->connector_states) * (alloc - state->num_connector));
834 state->num_connector = alloc;
839 } 835 }
840 836
841 if (state->connector_states[index]) 837 if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7c523060a076..4f2d3e161593 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1493,7 +1493,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
1493{ 1493{
1494 int i; 1494 int i;
1495 1495
1496 for (i = 0; i < dev->mode_config.num_connector; i++) { 1496 for (i = 0; i < state->num_connector; i++) {
1497 struct drm_connector *connector = state->connectors[i]; 1497 struct drm_connector *connector = state->connectors[i];
1498 1498
1499 if (!connector) 1499 if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d40bab29747e..f6191215b2cb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@ int drm_connector_init(struct drm_device *dev,
918 connector->base.properties = &connector->properties; 918 connector->base.properties = &connector->properties;
919 connector->dev = dev; 919 connector->dev = dev;
920 connector->funcs = funcs; 920 connector->funcs = funcs;
921
922 connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
923 if (connector->connector_id < 0) {
924 ret = connector->connector_id;
925 goto out_put;
926 }
927
921 connector->connector_type = connector_type; 928 connector->connector_type = connector_type;
922 connector->connector_type_id = 929 connector->connector_type_id =
923 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); 930 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
924 if (connector->connector_type_id < 0) { 931 if (connector->connector_type_id < 0) {
925 ret = connector->connector_type_id; 932 ret = connector->connector_type_id;
926 goto out_put; 933 goto out_put_id;
927 } 934 }
928 connector->name = 935 connector->name =
929 kasprintf(GFP_KERNEL, "%s-%d", 936 kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@ int drm_connector_init(struct drm_device *dev,
931 connector->connector_type_id); 938 connector->connector_type_id);
932 if (!connector->name) { 939 if (!connector->name) {
933 ret = -ENOMEM; 940 ret = -ENOMEM;
934 goto out_put; 941 goto out_put_type_id;
935 } 942 }
936 943
937 INIT_LIST_HEAD(&connector->probed_modes); 944 INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@ int drm_connector_init(struct drm_device *dev,
959 } 966 }
960 967
961 connector->debugfs_entry = NULL; 968 connector->debugfs_entry = NULL;
962 969out_put_type_id:
970 if (ret)
971 ida_remove(connector_ida, connector->connector_type_id);
972out_put_id:
973 if (ret)
974 ida_remove(&config->connector_ida, connector->connector_id);
963out_put: 975out_put:
964 if (ret) 976 if (ret)
965 drm_mode_object_put(dev, &connector->base); 977 drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
996 ida_remove(&drm_connector_enum_list[connector->connector_type].ida, 1008 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
997 connector->connector_type_id); 1009 connector->connector_type_id);
998 1010
1011 ida_remove(&dev->mode_config.connector_ida,
1012 connector->connector_id);
1013
999 kfree(connector->display_info.bus_formats); 1014 kfree(connector->display_info.bus_formats);
1000 drm_mode_object_put(dev, &connector->base); 1015 drm_mode_object_put(dev, &connector->base);
1001 kfree(connector->name); 1016 kfree(connector->name);
@@ -1013,32 +1028,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
1013EXPORT_SYMBOL(drm_connector_cleanup); 1028EXPORT_SYMBOL(drm_connector_cleanup);
1014 1029
1015/** 1030/**
1016 * drm_connector_index - find the index of a registered connector
1017 * @connector: connector to find index for
1018 *
1019 * Given a registered connector, return the index of that connector within a DRM
1020 * device's list of connectors.
1021 */
1022unsigned int drm_connector_index(struct drm_connector *connector)
1023{
1024 unsigned int index = 0;
1025 struct drm_connector *tmp;
1026 struct drm_mode_config *config = &connector->dev->mode_config;
1027
1028 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
1029
1030 drm_for_each_connector(tmp, connector->dev) {
1031 if (tmp == connector)
1032 return index;
1033
1034 index++;
1035 }
1036
1037 BUG();
1038}
1039EXPORT_SYMBOL(drm_connector_index);
1040
1041/**
1042 * drm_connector_register - register a connector 1031 * drm_connector_register - register a connector
1043 * @connector: the connector to register 1032 * @connector: the connector to register
1044 * 1033 *
@@ -5789,6 +5778,7 @@ void drm_mode_config_init(struct drm_device *dev)
5789 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5778 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5790 idr_init(&dev->mode_config.crtc_idr); 5779 idr_init(&dev->mode_config.crtc_idr);
5791 idr_init(&dev->mode_config.tile_idr); 5780 idr_init(&dev->mode_config.tile_idr);
5781 ida_init(&dev->mode_config.connector_ida);
5792 5782
5793 drm_modeset_lock_all(dev); 5783 drm_modeset_lock_all(dev);
5794 drm_mode_create_standard_properties(dev); 5784 drm_mode_create_standard_properties(dev);
@@ -5869,6 +5859,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5869 crtc->funcs->destroy(crtc); 5859 crtc->funcs->destroy(crtc);
5870 } 5860 }
5871 5861
5862 ida_destroy(&dev->mode_config.connector_ida);
5872 idr_destroy(&dev->mode_config.tile_idr); 5863 idr_destroy(&dev->mode_config.tile_idr);
5873 idr_destroy(&dev->mode_config.crtc_idr); 5864 idr_destroy(&dev->mode_config.crtc_idr);
5874 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5865 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 8ae13de272c4..27fbd79d0daf 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1159,11 +1159,13 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1159 drm_dp_put_port(port); 1159 drm_dp_put_port(port);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 drm_mode_connector_set_tile_property(port->connector); 1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 1164 drm_mode_connector_set_tile_property(port->connector);
1165 }
1165 (*mstb->mgr->cbs->register_connector)(port->connector); 1166 (*mstb->mgr->cbs->register_connector)(port->connector);
1166 } 1167 }
1168
1167out: 1169out:
1168 /* put reference to this port */ 1170 /* put reference to this port */
1169 drm_dp_put_port(port); 1171 drm_dp_put_port(port);
@@ -1188,8 +1190,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1188 port->ddps = conn_stat->displayport_device_plug_status; 1190 port->ddps = conn_stat->displayport_device_plug_status;
1189 1191
1190 if (old_ddps != port->ddps) { 1192 if (old_ddps != port->ddps) {
1191 dowork = true;
1192 if (port->ddps) { 1193 if (port->ddps) {
1194 dowork = true;
1193 } else { 1195 } else {
1194 port->available_pbn = 0; 1196 port->available_pbn = 0;
1195 } 1197 }
@@ -1294,13 +1296,8 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1294 if (port->input) 1296 if (port->input)
1295 continue; 1297 continue;
1296 1298
1297 if (!port->ddps) { 1299 if (!port->ddps)
1298 if (port->cached_edid) {
1299 kfree(port->cached_edid);
1300 port->cached_edid = NULL;
1301 }
1302 continue; 1300 continue;
1303 }
1304 1301
1305 if (!port->available_pbn) 1302 if (!port->available_pbn)
1306 drm_dp_send_enum_path_resources(mgr, mstb, port); 1303 drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1311,12 +1308,6 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1311 drm_dp_check_and_send_link_address(mgr, mstb_child); 1308 drm_dp_check_and_send_link_address(mgr, mstb_child);
1312 drm_dp_put_mst_branch_device(mstb_child); 1309 drm_dp_put_mst_branch_device(mstb_child);
1313 } 1310 }
1314 } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
1315 port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
1316 if (!port->cached_edid) {
1317 port->cached_edid =
1318 drm_get_edid(port->connector, &port->aux.ddc);
1319 }
1320 } 1311 }
1321 } 1312 }
1322} 1313}
@@ -1336,8 +1327,6 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
1336 drm_dp_check_and_send_link_address(mgr, mstb); 1327 drm_dp_check_and_send_link_address(mgr, mstb);
1337 drm_dp_put_mst_branch_device(mstb); 1328 drm_dp_put_mst_branch_device(mstb);
1338 } 1329 }
1339
1340 (*mgr->cbs->hotplug)(mgr);
1341} 1330}
1342 1331
1343static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1332static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1597,6 +1586,7 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1597 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1586 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1598 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1587 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1599 } 1588 }
1589 (*mgr->cbs->hotplug)(mgr);
1600 } 1590 }
1601 } else { 1591 } else {
1602 mstb->link_address_sent = false; 1592 mstb->link_address_sent = false;
@@ -2293,6 +2283,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2293 drm_dp_update_port(mstb, &msg.u.conn_stat); 2283 drm_dp_update_port(mstb, &msg.u.conn_stat);
2294 2284
2295 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2285 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2286 (*mgr->cbs->hotplug)(mgr);
2287
2296 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2288 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2297 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2289 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2298 if (!mstb) 2290 if (!mstb)
@@ -2379,6 +2371,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2379 2371
2380 case DP_PEER_DEVICE_SST_SINK: 2372 case DP_PEER_DEVICE_SST_SINK:
2381 status = connector_status_connected; 2373 status = connector_status_connected;
2374 /* for logical ports - cache the EDID */
2375 if (port->port_num >= 8 && !port->cached_edid) {
2376 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2377 }
2382 break; 2378 break;
2383 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2379 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2384 if (port->ldps) 2380 if (port->ldps)
@@ -2433,7 +2429,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2433 2429
2434 if (port->cached_edid) 2430 if (port->cached_edid)
2435 edid = drm_edid_duplicate(port->cached_edid); 2431 edid = drm_edid_duplicate(port->cached_edid);
2436 2432 else {
2433 edid = drm_get_edid(connector, &port->aux.ddc);
2434 drm_mode_connector_set_tile_property(connector);
2435 }
2437 port->has_audio = drm_detect_monitor_audio(edid); 2436 port->has_audio = drm_detect_monitor_audio(edid);
2438 drm_dp_put_port(port); 2437 drm_dp_put_port(port);
2439 return edid; 2438 return edid;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d12a4efa651b..1fe14579e8c9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; 224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
225 } 225 }
226 226
227 /*
228 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
229 * interval? If so then vblank irqs keep running and it will likely
230 * happen that the hardware vblank counter is not trustworthy as it
231 * might reset at some point in that interval and vblank timestamps
232 * are not trustworthy either in that interval. Iow. this can result
233 * in a bogus diff >> 1 which must be avoided as it would cause
234 * random large forward jumps of the software vblank counter.
235 */
236 if (diff > 1 && (vblank->inmodeset & 0x2)) {
237 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
238 " due to pre-modeset.\n", pipe, diff);
239 diff = 1;
240 }
241
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
227 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 285 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
228 " current=%u, diff=%u, hw=%u hw_last=%u\n", 286 " current=%u, diff=%u, hw=%u hw_last=%u\n",
229 pipe, vblank->count, diff, cur_vblank, vblank->last); 287 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1316,7 +1374,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1316 spin_lock_irqsave(&dev->event_lock, irqflags); 1374 spin_lock_irqsave(&dev->event_lock, irqflags);
1317 1375
1318 spin_lock(&dev->vbl_lock); 1376 spin_lock(&dev->vbl_lock);
1319 vblank_disable_and_save(dev, pipe); 1377 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1378 pipe, vblank->enabled, vblank->inmodeset);
1379
1380 /* Avoid redundant vblank disables without previous drm_vblank_on(). */
1381 if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
1382 vblank_disable_and_save(dev, pipe);
1383
1320 wake_up(&vblank->queue); 1384 wake_up(&vblank->queue);
1321 1385
1322 /* 1386 /*
@@ -1418,6 +1482,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1418 return; 1482 return;
1419 1483
1420 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1484 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1485 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1486 pipe, vblank->enabled, vblank->inmodeset);
1487
1421 /* Drop our private "prevent drm_vblank_get" refcount */ 1488 /* Drop our private "prevent drm_vblank_get" refcount */
1422 if (vblank->inmodeset) { 1489 if (vblank->inmodeset) {
1423 atomic_dec(&vblank->refcount); 1490 atomic_dec(&vblank->refcount);
@@ -1430,8 +1497,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1430 * re-enable interrupts if there are users left, or the 1497 * re-enable interrupts if there are users left, or the
1431 * user wishes vblank interrupts to be enabled all the time. 1498 * user wishes vblank interrupts to be enabled all the time.
1432 */ 1499 */
1433 if (atomic_read(&vblank->refcount) != 0 || 1500 if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
1434 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1435 WARN_ON(drm_vblank_enable(dev, pipe)); 1501 WARN_ON(drm_vblank_enable(dev, pipe));
1436 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1502 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1437} 1503}
@@ -1526,6 +1592,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
1526 if (vblank->inmodeset) { 1592 if (vblank->inmodeset) {
1527 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1593 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1528 dev->vblank_disable_allowed = true; 1594 dev->vblank_disable_allowed = true;
1595 drm_reset_vblank_timestamp(dev, pipe);
1529 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1596 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1530 1597
1531 if (vblank->inmodeset & 0x2) 1598 if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83efca941388..f17d39279596 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 5 select DRM_KMS_FB_HELPER
6 select FB_CFB_FILLRECT 6 select FB_CFB_FILLRECT
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1bf6a21130c7..162ab93e99cb 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -93,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
93 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 93 if (test_bit(BIT_SUSPENDED, &ctx->flags))
94 return -EPERM; 94 return -EPERM;
95 95
96 if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) { 96 if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
97 val = VIDINTCON0_INTEN; 97 val = VIDINTCON0_INTEN;
98 if (ctx->out_type == IFTYPE_I80) 98 if (ctx->out_type == IFTYPE_I80)
99 val |= VIDINTCON0_FRAMEDONE; 99 val |= VIDINTCON0_FRAMEDONE;
@@ -402,8 +402,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
402 decon_enable_vblank(ctx->crtc); 402 decon_enable_vblank(ctx->crtc);
403 403
404 decon_commit(ctx->crtc); 404 decon_commit(ctx->crtc);
405
406 set_bit(BIT_SUSPENDED, &ctx->flags);
407} 405}
408 406
409static void decon_disable(struct exynos_drm_crtc *crtc) 407static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -582,9 +580,9 @@ out:
582static int exynos5433_decon_suspend(struct device *dev) 580static int exynos5433_decon_suspend(struct device *dev)
583{ 581{
584 struct decon_context *ctx = dev_get_drvdata(dev); 582 struct decon_context *ctx = dev_get_drvdata(dev);
585 int i; 583 int i = ARRAY_SIZE(decon_clks_name);
586 584
587 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) 585 while (--i >= 0)
588 clk_disable_unprepare(ctx->clks[i]); 586 clk_disable_unprepare(ctx->clks[i]);
589 587
590 return 0; 588 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e977a81af2e6..26e81d191f56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1782,6 +1782,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1782 1782
1783 bridge = of_drm_find_bridge(dsi->bridge_node); 1783 bridge = of_drm_find_bridge(dsi->bridge_node);
1784 if (bridge) { 1784 if (bridge) {
1785 encoder->bridge = bridge;
1785 drm_bridge_attach(drm_dev, bridge); 1786 drm_bridge_attach(drm_dev, bridge);
1786 } 1787 }
1787 1788
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f6118baa8e3e..8baabd813ff5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
50 if (vm_size > exynos_gem->size) 50 if (vm_size > exynos_gem->size)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages, 53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
54 exynos_gem->dma_addr, exynos_gem->size, 54 exynos_gem->dma_addr, exynos_gem->size,
55 &exynos_gem->dma_attrs); 55 &exynos_gem->dma_attrs);
56 if (ret < 0) { 56 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index c747824f3c98..8a4f4a0211d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1723,7 +1723,7 @@ static int fimc_probe(struct platform_device *pdev)
1723 goto err_put_clk; 1723 goto err_put_clk;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 spin_lock_init(&ctx->lock); 1728 spin_lock_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index c17efdb238a6..8dfe6e113a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1166,7 +1166,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1166 goto err_free_event; 1166 goto err_free_event;
1167 } 1167 }
1168 1168
1169 cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; 1169 cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1170 1170
1171 if (copy_from_user(cmdlist->data + cmdlist->last, 1171 if (copy_from_user(cmdlist->data + cmdlist->last,
1172 (void __user *)cmd, 1172 (void __user *)cmd,
@@ -1184,7 +1184,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1184 if (req->cmd_buf_nr) { 1184 if (req->cmd_buf_nr) {
1185 struct drm_exynos_g2d_cmd *cmd_buf; 1185 struct drm_exynos_g2d_cmd *cmd_buf;
1186 1186
1187 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; 1187 cmd_buf = (struct drm_exynos_g2d_cmd *)
1188 (unsigned long)req->cmd_buf;
1188 1189
1189 if (copy_from_user(cmdlist->data + cmdlist->last, 1190 if (copy_from_user(cmdlist->data + cmdlist->last,
1190 (void __user *)cmd_buf, 1191 (void __user *)cmd_buf,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 32358c5e3db4..26b5e4bd55b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
@@ -335,7 +335,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
335 if (vm_size > exynos_gem->size) 335 if (vm_size > exynos_gem->size)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, 338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
339 exynos_gem->dma_addr, exynos_gem->size, 339 exynos_gem->dma_addr, exynos_gem->size,
340 &exynos_gem->dma_attrs); 340 &exynos_gem->dma_attrs);
341 if (ret < 0) { 341 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7aecd23cfa11..5d20da8f957e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67d24236e745..95eeb9116f10 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,8 +388,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
392 property->prop_id, property->cmd, (int)ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
395 c_node->property = *property; 395 c_node->property = *property;
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 521 DRM_DEBUG_KMS("node[%p]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -582,8 +582,8 @@ static struct drm_exynos_ipp_mem_node
582 582
583 buf_info->handles[i] = qbuf->handle[i]; 583 buf_info->handles[i] = qbuf->handle[i];
584 buf_info->base[i] = *addr; 584 buf_info->base[i] = *addr;
585 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 585 DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
586 buf_info->base[i], buf_info->handles[i]); 586 &buf_info->base[i], buf_info->handles[i]);
587 } 587 }
588 } 588 }
589 589
@@ -664,7 +664,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
664 664
665 mutex_lock(&c_node->event_lock); 665 mutex_lock(&c_node->event_lock);
666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
667 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 667 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
668 668
669 /* 669 /*
670 * qbuf == NULL condition means all event deletion. 670 * qbuf == NULL condition means all event deletion.
@@ -755,7 +755,7 @@ static struct drm_exynos_ipp_mem_node
755 755
756 /* find memory node from memory list */ 756 /* find memory node from memory list */
757 list_for_each_entry(m_node, head, list) { 757 list_for_each_entry(m_node, head, list) {
758 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 758 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
759 759
760 /* compare buffer id */ 760 /* compare buffer id */
761 if (m_node->buf_id == qbuf->buf_id) 761 if (m_node->buf_id == qbuf->buf_id)
@@ -772,7 +772,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
772 struct exynos_drm_ipp_ops *ops = NULL; 772 struct exynos_drm_ipp_ops *ops = NULL;
773 int ret = 0; 773 int ret = 0;
774 774
775 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 775 DRM_DEBUG_KMS("node[%p]\n", m_node);
776 776
777 if (!m_node) { 777 if (!m_node) {
778 DRM_ERROR("invalid queue node.\n"); 778 DRM_ERROR("invalid queue node.\n");
@@ -1237,7 +1237,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1237 m_node = list_first_entry(head, 1237 m_node = list_first_entry(head,
1238 struct drm_exynos_ipp_mem_node, list); 1238 struct drm_exynos_ipp_mem_node, list);
1239 1239
1240 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1240 DRM_DEBUG_KMS("m_node[%p]\n", m_node);
1241 1241
1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1243 if (ret) { 1243 if (ret) {
@@ -1610,8 +1610,8 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1610 } 1610 }
1611 ippdrv->prop_list.ipp_id = ret; 1611 ippdrv->prop_list.ipp_id = ret;
1612 1612
1613 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1613 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
1614 count++, (int)ippdrv, ret); 1614 count++, ippdrv, ret);
1615 1615
1616 /* store parent device for node */ 1616 /* store parent device for node */
1617 ippdrv->parent_dev = dev; 1617 ippdrv->parent_dev = dev;
@@ -1668,7 +1668,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1668 1668
1669 file_priv->ipp_dev = dev; 1669 file_priv->ipp_dev = dev;
1670 1670
1671 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1671 DRM_DEBUG_KMS("done priv[%p]\n", dev);
1672 1672
1673 return 0; 1673 return 0;
1674} 1674}
@@ -1685,8 +1685,8 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1685 mutex_lock(&ippdrv->cmd_lock); 1685 mutex_lock(&ippdrv->cmd_lock);
1686 list_for_each_entry_safe(c_node, tc_node, 1686 list_for_each_entry_safe(c_node, tc_node,
1687 &ippdrv->cmd_list, list) { 1687 &ippdrv->cmd_list, list) {
1688 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1688 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
1689 count++, (int)ippdrv); 1689 count++, ippdrv);
1690 1690
1691 if (c_node->filp == file) { 1691 if (c_node->filp == file) {
1692 /* 1692 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 4eaef36aec5a..9869d70e9e54 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -18,6 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_graph.h> 19#include <linux/of_graph.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h>
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <linux/mfd/syscon.h> 23#include <linux/mfd/syscon.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
@@ -306,9 +307,9 @@ exit:
306 return ret; 307 return ret;
307} 308}
308 309
309void mic_disable(struct drm_bridge *bridge) { } 310static void mic_disable(struct drm_bridge *bridge) { }
310 311
311void mic_post_disable(struct drm_bridge *bridge) 312static void mic_post_disable(struct drm_bridge *bridge)
312{ 313{
313 struct exynos_mic *mic = bridge->driver_private; 314 struct exynos_mic *mic = bridge->driver_private;
314 int i; 315 int i;
@@ -328,7 +329,7 @@ already_disabled:
328 mutex_unlock(&mic_mutex); 329 mutex_unlock(&mic_mutex);
329} 330}
330 331
331void mic_pre_enable(struct drm_bridge *bridge) 332static void mic_pre_enable(struct drm_bridge *bridge)
332{ 333{
333 struct exynos_mic *mic = bridge->driver_private; 334 struct exynos_mic *mic = bridge->driver_private;
334 int ret, i; 335 int ret, i;
@@ -371,11 +372,35 @@ already_enabled:
371 mutex_unlock(&mic_mutex); 372 mutex_unlock(&mic_mutex);
372} 373}
373 374
374void mic_enable(struct drm_bridge *bridge) { } 375static void mic_enable(struct drm_bridge *bridge) { }
375 376
376void mic_destroy(struct drm_bridge *bridge) 377static const struct drm_bridge_funcs mic_bridge_funcs = {
378 .disable = mic_disable,
379 .post_disable = mic_post_disable,
380 .pre_enable = mic_pre_enable,
381 .enable = mic_enable,
382};
383
384static int exynos_mic_bind(struct device *dev, struct device *master,
385 void *data)
377{ 386{
378 struct exynos_mic *mic = bridge->driver_private; 387 struct exynos_mic *mic = dev_get_drvdata(dev);
388 int ret;
389
390 mic->bridge.funcs = &mic_bridge_funcs;
391 mic->bridge.of_node = dev->of_node;
392 mic->bridge.driver_private = mic;
393 ret = drm_bridge_add(&mic->bridge);
394 if (ret)
395 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
396
397 return ret;
398}
399
400static void exynos_mic_unbind(struct device *dev, struct device *master,
401 void *data)
402{
403 struct exynos_mic *mic = dev_get_drvdata(dev);
379 int i; 404 int i;
380 405
381 mutex_lock(&mic_mutex); 406 mutex_lock(&mic_mutex);
@@ -387,16 +412,16 @@ void mic_destroy(struct drm_bridge *bridge)
387 412
388already_disabled: 413already_disabled:
389 mutex_unlock(&mic_mutex); 414 mutex_unlock(&mic_mutex);
415
416 drm_bridge_remove(&mic->bridge);
390} 417}
391 418
392static const struct drm_bridge_funcs mic_bridge_funcs = { 419static const struct component_ops exynos_mic_component_ops = {
393 .disable = mic_disable, 420 .bind = exynos_mic_bind,
394 .post_disable = mic_post_disable, 421 .unbind = exynos_mic_unbind,
395 .pre_enable = mic_pre_enable,
396 .enable = mic_enable,
397}; 422};
398 423
399int exynos_mic_probe(struct platform_device *pdev) 424static int exynos_mic_probe(struct platform_device *pdev)
400{ 425{
401 struct device *dev = &pdev->dev; 426 struct device *dev = &pdev->dev;
402 struct exynos_mic *mic; 427 struct exynos_mic *mic;
@@ -435,17 +460,8 @@ int exynos_mic_probe(struct platform_device *pdev)
435 goto err; 460 goto err;
436 } 461 }
437 462
438 mic->bridge.funcs = &mic_bridge_funcs;
439 mic->bridge.of_node = dev->of_node;
440 mic->bridge.driver_private = mic;
441 ret = drm_bridge_add(&mic->bridge);
442 if (ret) {
443 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
444 goto err;
445 }
446
447 for (i = 0; i < NUM_CLKS; i++) { 463 for (i = 0; i < NUM_CLKS; i++) {
448 mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]); 464 mic->clks[i] = devm_clk_get(dev, clk_names[i]);
449 if (IS_ERR(mic->clks[i])) { 465 if (IS_ERR(mic->clks[i])) {
450 DRM_ERROR("mic: Failed to get clock (%s)\n", 466 DRM_ERROR("mic: Failed to get clock (%s)\n",
451 clk_names[i]); 467 clk_names[i]);
@@ -454,7 +470,10 @@ int exynos_mic_probe(struct platform_device *pdev)
454 } 470 }
455 } 471 }
456 472
473 platform_set_drvdata(pdev, mic);
474
457 DRM_DEBUG_KMS("MIC has been probed\n"); 475 DRM_DEBUG_KMS("MIC has been probed\n");
476 return component_add(dev, &exynos_mic_component_ops);
458 477
459err: 478err:
460 return ret; 479 return ret;
@@ -462,14 +481,7 @@ err:
462 481
463static int exynos_mic_remove(struct platform_device *pdev) 482static int exynos_mic_remove(struct platform_device *pdev)
464{ 483{
465 struct exynos_mic *mic = platform_get_drvdata(pdev); 484 component_del(&pdev->dev, &exynos_mic_component_ops);
466 int i;
467
468 drm_bridge_remove(&mic->bridge);
469
470 for (i = NUM_CLKS - 1; i > -1; i--)
471 clk_put(mic->clks[i]);
472
473 return 0; 485 return 0;
474} 486}
475 487
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index bea0f7826d30..ce59f4443394 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -754,7 +754,7 @@ static int rotator_probe(struct platform_device *pdev)
754 goto err_ippdrv_register; 754 goto err_ippdrv_register;
755 } 755 }
756 756
757 DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv); 757 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
758 758
759 platform_set_drvdata(pdev, rot); 759 platform_set_drvdata(pdev, rot);
760 760
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 62ac4e5fa51d..b605bd7395ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -223,7 +223,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
223 } 223 }
224} 224}
225 225
226static int vidi_show_connection(struct device *dev, 226static ssize_t vidi_show_connection(struct device *dev,
227 struct device_attribute *attr, char *buf) 227 struct device_attribute *attr, char *buf)
228{ 228{
229 struct vidi_context *ctx = dev_get_drvdata(dev); 229 struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -238,7 +238,7 @@ static int vidi_show_connection(struct device *dev,
238 return rc; 238 return rc;
239} 239}
240 240
241static int vidi_store_connection(struct device *dev, 241static ssize_t vidi_store_connection(struct device *dev,
242 struct device_attribute *attr, 242 struct device_attribute *attr,
243 const char *buf, size_t len) 243 const char *buf, size_t len)
244{ 244{
@@ -294,7 +294,9 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
294 } 294 }
295 295
296 if (vidi->connection) { 296 if (vidi->connection) {
297 struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; 297 struct edid *raw_edid;
298
299 raw_edid = (struct edid *)(unsigned long)vidi->edid;
298 if (!drm_edid_is_valid(raw_edid)) { 300 if (!drm_edid_is_valid(raw_edid)) {
299 DRM_DEBUG_KMS("edid data is invalid.\n"); 301 DRM_DEBUG_KMS("edid data is invalid.\n");
300 return -EINVAL; 302 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b9a564b76528..4897728713f6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3287,19 +3287,20 @@ enum skl_disp_power_wells {
3287 3287
3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) 3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3289/* 3289/*
3290 * HDMI/DP bits are gen4+ 3290 * HDMI/DP bits are g4x+
3291 * 3291 *
3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. 3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
3293 * Please check the detailed lore in the commit message for for experimental 3293 * Please check the detailed lore in the commit message for for experimental
3294 * evidence. 3294 * evidence.
3295 */ 3295 */
3296#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) 3296/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
3297#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
3298#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
3299#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
3300/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
3301#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
3297#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) 3302#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
3298#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) 3303#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
3299/* VLV DP/HDMI bits again match Bspec */
3300#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
3301#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
3302#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
3303#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 3304#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
3304#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) 3305#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
3305#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) 3306#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 796e3d313cb9..1bbd67b046da 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4493,20 +4493,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4493 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4493 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4494} 4494}
4495 4495
4496static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, 4496static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4497 struct intel_digital_port *port) 4497 struct intel_digital_port *port)
4498{ 4498{
4499 u32 bit; 4499 u32 bit;
4500 4500
4501 switch (port->port) { 4501 switch (port->port) {
4502 case PORT_B: 4502 case PORT_B:
4503 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 4503 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4504 break; 4504 break;
4505 case PORT_C: 4505 case PORT_C:
4506 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 4506 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4507 break; 4507 break;
4508 case PORT_D: 4508 case PORT_D:
4509 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 4509 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4510 break; 4510 break;
4511 default: 4511 default:
4512 MISSING_CASE(port->port); 4512 MISSING_CASE(port->port);
@@ -4558,8 +4558,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4558 return cpt_digital_port_connected(dev_priv, port); 4558 return cpt_digital_port_connected(dev_priv, port);
4559 else if (IS_BROXTON(dev_priv)) 4559 else if (IS_BROXTON(dev_priv))
4560 return bxt_digital_port_connected(dev_priv, port); 4560 return bxt_digital_port_connected(dev_priv, port);
4561 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4561 else if (IS_GM45(dev_priv))
4562 return vlv_digital_port_connected(dev_priv, port); 4562 return gm45_digital_port_connected(dev_priv, port);
4563 else 4563 else
4564 return g4x_digital_port_connected(dev_priv, port); 4564 return g4x_digital_port_connected(dev_priv, port);
4565} 4565}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d05de9..e3acc35e3805 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1520 DMA_BIDIRECTIONAL); 1520 DMA_BIDIRECTIONAL);
1521 1521
1522 if (dma_mapping_error(pdev, addr)) { 1522 if (dma_mapping_error(pdev, addr)) {
1523 while (--i) { 1523 while (i--) {
1524 dma_unmap_page(pdev, ttm_dma->dma_address[i], 1524 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1525 PAGE_SIZE, DMA_BIDIRECTIONAL); 1525 PAGE_SIZE, DMA_BIDIRECTIONAL);
1526 ttm_dma->dma_address[i] = 0; 1526 ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d3cd18..20935eb2a09e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
635 nv_crtc->lut.depth = 0; 635 nv_crtc->lut.depth = 0;
636 } 636 }
637 637
638 /* Make sure that drm and hw vblank irqs get resumed if needed. */
639 for (head = 0; head < dev->mode_config.num_crtc; head++)
640 drm_vblank_on(dev, head);
641
642 /* This should ensure we don't hit a locking problem when someone 638 /* This should ensure we don't hit a locking problem when someone
643 * wakes us up via a connector. We should never go into suspend 639 * wakes us up via a connector. We should never go into suspend
644 * while the display is on anyways. 640 * while the display is on anyways.
@@ -648,6 +644,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
648 644
649 drm_helper_resume_force_mode(dev); 645 drm_helper_resume_force_mode(dev);
650 646
647 /* Make sure that drm and hw vblank irqs get resumed if needed. */
648 for (head = 0; head < dev->mode_config.num_crtc; head++)
649 drm_vblank_on(dev, head);
650
651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
653 653
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
168 cmd->command_size)) 168 cmd->command_size))
169 return -EFAULT; 169 return -EFAULT;
170 170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info) 173 if (!reloc_info)
173 return -ENOMEM; 174 return -ENOMEM;
174 175
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b50a8fd..9f029dda1f07 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area) 68 struct vm_area_struct *area)
69{ 69{
70 WARN_ONCE(1, "not implemented"); 70 WARN_ONCE(1, "not implemented");
71 return ENOSYS; 71 return -ENOSYS;
72} 72}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2989da..248c5a9fb0b6 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
276 if (rdev->irq.installed) { 276 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 277 for (i = 0; i < rdev->num_crtc; i++) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 278 if (rdev->pm.active_crtcs & (1 << i)) {
279 rdev->pm.req_vblank |= (1 << i); 279 /* This can fail if a modeset is in progress */
280 drm_vblank_get(rdev->ddev, i); 280 if (drm_vblank_get(rdev->ddev, i) == 0)
281 rdev->pm.req_vblank |= (1 << i);
282 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i);
281 } 285 }
282 } 286 }
283 } 287 }
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
758 0, PAGE_SIZE, 758 0, PAGE_SIZE,
759 PCI_DMA_BIDIRECTIONAL); 759 PCI_DMA_BIDIRECTIONAL);
760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
761 while (--i) { 761 while (i--) {
762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
764 gtt->ttm.dma_address[i] = 0; 764 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3ec9a62..22278bcfc60e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
215 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
216 216
217 if (size == 0) 217 if (size == 0)
218 return NULL; 218 return ERR_PTR(-EINVAL);
219 219
220 /* First, try to get a vc4_bo from the kernel BO cache. */ 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) { 221 if (from_cache) {
@@ -237,7 +237,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
237 if (IS_ERR(cma_obj)) { 237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n"); 238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4); 239 vc4_bo_stats_dump(vc4);
240 return NULL; 240 return ERR_PTR(-ENOMEM);
241 } 241 }
242 } 242 }
243 243
@@ -259,8 +259,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
259 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
260 260
261 bo = vc4_bo_create(dev, args->size, false); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo) 262 if (IS_ERR(bo))
263 return -ENOMEM; 263 return PTR_ERR(bo);
264 264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base); 266 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
443 * get zeroed, and that might leak data between users. 443 * get zeroed, and that might leak data between users.
444 */ 444 */
445 bo = vc4_bo_create(dev, args->size, false); 445 bo = vc4_bo_create(dev, args->size, false);
446 if (!bo) 446 if (IS_ERR(bo))
447 return -ENOMEM; 447 return PTR_ERR(bo);
448 448
449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
450 drm_gem_object_unreference_unlocked(&bo->base.base); 450 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
496 } 496 }
497 497
498 bo = vc4_bo_create(dev, args->size, true); 498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo) 499 if (IS_ERR(bo))
500 return -ENOMEM; 500 return PTR_ERR(bo);
501 501
502 ret = copy_from_user(bo->base.vaddr, 502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data, 503 (void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 080865ec2bae..51a63330d4f8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@ struct vc4_dev {
91 struct vc4_bo *overflow_mem; 91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work; 92 struct work_struct overflow_mem_work;
93 93
94 int power_refcount;
95
96 /* Mutex controlling the power refcount. */
97 struct mutex power_lock;
98
94 struct { 99 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer; 100 struct timer_list timer;
97 struct work_struct reset_work; 101 struct work_struct reset_work;
98 } hangcheck; 102 } hangcheck;
@@ -142,6 +146,7 @@ struct vc4_seqno_cb {
142}; 146};
143 147
144struct vc4_v3d { 148struct vc4_v3d {
149 struct vc4_dev *vc4;
145 struct platform_device *pdev; 150 struct platform_device *pdev;
146 void __iomem *regs; 151 void __iomem *regs;
147}; 152};
@@ -192,6 +197,11 @@ struct vc4_exec_info {
192 /* Sequence number for this bin/render job. */ 197 /* Sequence number for this bin/render job. */
193 uint64_t seqno; 198 uint64_t seqno;
194 199
200 /* Last current addresses the hardware was processing when the
201 * hangcheck timer checked on us.
202 */
203 uint32_t last_ct0ca, last_ct1ca;
204
195 /* Kernel-space copy of the ioctl arguments */ 205 /* Kernel-space copy of the ioctl arguments */
196 struct drm_vc4_submit_cl *args; 206 struct drm_vc4_submit_cl *args;
197 207
@@ -434,7 +444,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
434extern struct platform_driver vc4_v3d_driver; 444extern struct platform_driver vc4_v3d_driver;
435int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 445int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
436int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 446int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
437int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
438 447
439/* vc4_validate.c */ 448/* vc4_validate.c */
440int 449int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a6f4b5..202aa1544acc 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/io.h> 28#include <linux/io.h>
28 29
@@ -228,8 +229,16 @@ vc4_reset(struct drm_device *dev)
228 struct vc4_dev *vc4 = to_vc4_dev(dev); 229 struct vc4_dev *vc4 = to_vc4_dev(dev);
229 230
230 DRM_INFO("Resetting GPU.\n"); 231 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4, false); 232
232 vc4_v3d_set_power(vc4, true); 233 mutex_lock(&vc4->power_lock);
234 if (vc4->power_refcount) {
235 /* Power the device off and back on the by dropping the
236 * reference on runtime PM.
237 */
238 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
239 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
240 }
241 mutex_unlock(&vc4->power_lock);
233 242
234 vc4_irq_reset(dev); 243 vc4_irq_reset(dev);
235 244
@@ -257,10 +266,17 @@ vc4_hangcheck_elapsed(unsigned long data)
257 struct drm_device *dev = (struct drm_device *)data; 266 struct drm_device *dev = (struct drm_device *)data;
258 struct vc4_dev *vc4 = to_vc4_dev(dev); 267 struct vc4_dev *vc4 = to_vc4_dev(dev);
259 uint32_t ct0ca, ct1ca; 268 uint32_t ct0ca, ct1ca;
269 unsigned long irqflags;
270 struct vc4_exec_info *exec;
271
272 spin_lock_irqsave(&vc4->job_lock, irqflags);
273 exec = vc4_first_job(vc4);
260 274
261 /* If idle, we can stop watching for hangs. */ 275 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4->job_list)) 276 if (!exec) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
263 return; 278 return;
279 }
264 280
265 ct0ca = V3D_READ(V3D_CTNCA(0)); 281 ct0ca = V3D_READ(V3D_CTNCA(0));
266 ct1ca = V3D_READ(V3D_CTNCA(1)); 282 ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@ vc4_hangcheck_elapsed(unsigned long data)
268 /* If we've made any progress in execution, rearm the timer 284 /* If we've made any progress in execution, rearm the timer
269 * and wait. 285 * and wait.
270 */ 286 */
271 if (ct0ca != vc4->hangcheck.last_ct0ca || 287 if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
272 ct1ca != vc4->hangcheck.last_ct1ca) { 288 exec->last_ct0ca = ct0ca;
273 vc4->hangcheck.last_ct0ca = ct0ca; 289 exec->last_ct1ca = ct1ca;
274 vc4->hangcheck.last_ct1ca = ct1ca; 290 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 vc4_queue_hangcheck(dev); 291 vc4_queue_hangcheck(dev);
276 return; 292 return;
277 } 293 }
278 294
295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
296
279 /* We've gone too long with no progress, reset. This has to 297 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and 298 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to. 299 * this timer hook isn't allowed to.
@@ -340,12 +358,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
340 finish_wait(&vc4->job_wait_queue, &wait); 358 finish_wait(&vc4->job_wait_queue, &wait);
341 trace_vc4_wait_for_seqno_end(dev, seqno); 359 trace_vc4_wait_for_seqno_end(dev, seqno);
342 360
343 if (ret && ret != -ERESTARTSYS) { 361 return ret;
344 DRM_ERROR("timeout waiting for render thread idle\n");
345 return ret;
346 }
347
348 return 0;
349} 362}
350 363
351static void 364static void
@@ -578,9 +591,9 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
578 } 591 }
579 592
580 bo = vc4_bo_create(dev, exec_size, true); 593 bo = vc4_bo_create(dev, exec_size, true);
581 if (!bo) { 594 if (IS_ERR(bo)) {
582 DRM_ERROR("Couldn't allocate BO for binning\n"); 595 DRM_ERROR("Couldn't allocate BO for binning\n");
583 ret = -ENOMEM; 596 ret = PTR_ERR(bo);
584 goto fail; 597 goto fail;
585 } 598 }
586 exec->exec_bo = &bo->base; 599 exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@ fail:
617static void 630static void
618vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) 631vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
619{ 632{
633 struct vc4_dev *vc4 = to_vc4_dev(dev);
620 unsigned i; 634 unsigned i;
621 635
622 /* Need the struct lock for drm_gem_object_unreference(). */ 636 /* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
635 } 649 }
636 mutex_unlock(&dev->struct_mutex); 650 mutex_unlock(&dev->struct_mutex);
637 651
652 mutex_lock(&vc4->power_lock);
653 if (--vc4->power_refcount == 0)
654 pm_runtime_put(&vc4->v3d->pdev->dev);
655 mutex_unlock(&vc4->power_lock);
656
638 kfree(exec); 657 kfree(exec);
639} 658}
640 659
@@ -746,6 +765,9 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
746 struct drm_gem_object *gem_obj; 765 struct drm_gem_object *gem_obj;
747 struct vc4_bo *bo; 766 struct vc4_bo *bo;
748 767
768 if (args->pad != 0)
769 return -EINVAL;
770
749 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); 771 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
750 if (!gem_obj) { 772 if (!gem_obj) {
751 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 773 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
772 struct vc4_dev *vc4 = to_vc4_dev(dev); 794 struct vc4_dev *vc4 = to_vc4_dev(dev);
773 struct drm_vc4_submit_cl *args = data; 795 struct drm_vc4_submit_cl *args = data;
774 struct vc4_exec_info *exec; 796 struct vc4_exec_info *exec;
775 int ret; 797 int ret = 0;
776 798
777 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { 799 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); 800 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
785 return -ENOMEM; 807 return -ENOMEM;
786 } 808 }
787 809
810 mutex_lock(&vc4->power_lock);
811 if (vc4->power_refcount++ == 0)
812 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
813 mutex_unlock(&vc4->power_lock);
814 if (ret < 0) {
815 kfree(exec);
816 return ret;
817 }
818
788 exec->args = args; 819 exec->args = args;
789 INIT_LIST_HEAD(&exec->unref_list); 820 INIT_LIST_HEAD(&exec->unref_list);
790 821
@@ -839,6 +870,8 @@ vc4_gem_init(struct drm_device *dev)
839 (unsigned long)dev); 870 (unsigned long)dev);
840 871
841 INIT_WORK(&vc4->job_done_work, vc4_job_done_work); 872 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
873
874 mutex_init(&vc4->power_lock);
842} 875}
843 876
844void 877void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e758db..78a21357fb2d 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@ vc4_overflow_mem_work(struct work_struct *work)
57 struct vc4_bo *bo; 57 struct vc4_bo *bo;
58 58
59 bo = vc4_bo_create(dev, 256 * 1024, true); 59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) { 60 if (IS_ERR(bo)) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return; 62 return;
63 } 63 }
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312e2c1b..0f12418725e5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
316 size += xtiles * ytiles * loop_body_size; 316 size += xtiles * ytiles * loop_body_size;
317 317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base; 318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl) 319 if (IS_ERR(setup->rcl))
320 return -ENOMEM; 320 return PTR_ERR(setup->rcl);
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, 321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list); 322 &exec->unref_list);
323 323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If 324 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has 325 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no 326 * stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
349 rcl_u32(setup, 0); /* no address, since we're in None mode */ 340 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 } 341 }
351 342
343 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
344 rcl_u32(setup,
345 (setup->color_write ? (setup->color_write->paddr +
346 args->color_write.offset) :
347 0));
348 rcl_u16(setup, args->width);
349 rcl_u16(setup, args->height);
350 rcl_u16(setup, args->color_write.bits);
351
352 for (y = min_y_tile; y <= max_y_tile; y++) { 352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) { 353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile); 354 bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 314ff71db978..31de5d17bc85 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include "linux/component.h" 19#include "linux/component.h"
20#include "linux/pm_runtime.h"
20#include "vc4_drv.h" 21#include "vc4_drv.h"
21#include "vc4_regs.h" 22#include "vc4_regs.h"
22 23
@@ -144,18 +145,6 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
144} 145}
145#endif /* CONFIG_DEBUG_FS */ 146#endif /* CONFIG_DEBUG_FS */
146 147
147int
148vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
149{
150 /* XXX: This interface is needed for GPU reset, and the way to
151 * do it is to turn our power domain off and back on. We
152 * can't just reset from within the driver, because the reset
153 * bits are in the power domain's register area, and get set
154 * during the poweron process.
155 */
156 return 0;
157}
158
159static void vc4_v3d_init_hw(struct drm_device *dev) 148static void vc4_v3d_init_hw(struct drm_device *dev)
160{ 149{
161 struct vc4_dev *vc4 = to_vc4_dev(dev); 150 struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -167,6 +156,29 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
167 V3D_WRITE(V3D_VPMBASE, 0); 156 V3D_WRITE(V3D_VPMBASE, 0);
168} 157}
169 158
159#ifdef CONFIG_PM
160static int vc4_v3d_runtime_suspend(struct device *dev)
161{
162 struct vc4_v3d *v3d = dev_get_drvdata(dev);
163 struct vc4_dev *vc4 = v3d->vc4;
164
165 vc4_irq_uninstall(vc4->dev);
166
167 return 0;
168}
169
170static int vc4_v3d_runtime_resume(struct device *dev)
171{
172 struct vc4_v3d *v3d = dev_get_drvdata(dev);
173 struct vc4_dev *vc4 = v3d->vc4;
174
175 vc4_v3d_init_hw(vc4->dev);
176 vc4_irq_postinstall(vc4->dev);
177
178 return 0;
179}
180#endif
181
170static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) 182static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
171{ 183{
172 struct platform_device *pdev = to_platform_device(dev); 184 struct platform_device *pdev = to_platform_device(dev);
@@ -179,6 +191,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
179 if (!v3d) 191 if (!v3d)
180 return -ENOMEM; 192 return -ENOMEM;
181 193
194 dev_set_drvdata(dev, v3d);
195
182 v3d->pdev = pdev; 196 v3d->pdev = pdev;
183 197
184 v3d->regs = vc4_ioremap_regs(pdev, 0); 198 v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -186,6 +200,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
186 return PTR_ERR(v3d->regs); 200 return PTR_ERR(v3d->regs);
187 201
188 vc4->v3d = v3d; 202 vc4->v3d = v3d;
203 v3d->vc4 = vc4;
189 204
190 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { 205 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
191 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", 206 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -207,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
207 return ret; 222 return ret;
208 } 223 }
209 224
225 pm_runtime_enable(dev);
226
210 return 0; 227 return 0;
211} 228}
212 229
@@ -216,6 +233,8 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
216 struct drm_device *drm = dev_get_drvdata(master); 233 struct drm_device *drm = dev_get_drvdata(master);
217 struct vc4_dev *vc4 = to_vc4_dev(drm); 234 struct vc4_dev *vc4 = to_vc4_dev(drm);
218 235
236 pm_runtime_disable(dev);
237
219 drm_irq_uninstall(drm); 238 drm_irq_uninstall(drm);
220 239
221 /* Disable the binner's overflow memory address, so the next 240 /* Disable the binner's overflow memory address, so the next
@@ -228,6 +247,10 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
228 vc4->v3d = NULL; 247 vc4->v3d = NULL;
229} 248}
230 249
250static const struct dev_pm_ops vc4_v3d_pm_ops = {
251 SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
252};
253
231static const struct component_ops vc4_v3d_ops = { 254static const struct component_ops vc4_v3d_ops = {
232 .bind = vc4_v3d_bind, 255 .bind = vc4_v3d_bind,
233 .unbind = vc4_v3d_unbind, 256 .unbind = vc4_v3d_unbind,
@@ -255,5 +278,6 @@ struct platform_driver vc4_v3d_driver = {
255 .driver = { 278 .driver = {
256 .name = "vc4_v3d", 279 .name = "vc4_v3d",
257 .of_match_table = vc4_v3d_dt_match, 280 .of_match_table = vc4_v3d_dt_match,
281 .pm = &vc4_v3d_pm_ops,
258 }, 282 },
259}; 283};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6face3..24c2c746e8f3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, 401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true); 402 true);
403 exec->tile_bo = &tile_bo->base; 403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo) 404 if (IS_ERR(exec->tile_bo))
405 return -ENOMEM; 405 return PTR_ERR(exec->tile_bo);
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list); 406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407 407
408 /* tile alloc address. */ 408 /* tile alloc address. */
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c5ba06..fb092f3f11cb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1353,7 +1353,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1353 1353
1354 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1354 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355 1355
1356 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1356 sts = readl(iommu->reg + DMAR_GSTS_REG);
1357 if (!(sts & DMA_GSTS_QIES)) 1357 if (!(sts & DMA_GSTS_QIES))
1358 goto end; 1358 goto end;
1359 1359
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
250{ 250{
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
252 struct intel_svm_dev *sdev;
252 253
254 /* This might end up being called from exit_mmap(), *before* the page
255 * tables are cleared. And __mmu_notifier_release() will delete us from
256 * the list of notifiers so that our invalidate_range() callback doesn't
257 * get called when the page tables are cleared. So we need to protect
258 * against hardware accessing those page tables.
259 *
260 * We do it by clearing the entry in the PASID table and then flushing
261 * the IOTLB and the PASID table caches. This might upset hardware;
262 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
263 * page) so that we end up taking a fault that the hardware really
264 * *has* to handle gracefully without affecting other processes.
265 */
253 svm->iommu->pasid_table[svm->pasid].val = 0; 266 svm->iommu->pasid_table[svm->pasid].val = 0;
267 wmb();
268
269 rcu_read_lock();
270 list_for_each_entry_rcu(sdev, &svm->devs, list) {
271 intel_flush_pasid_dev(svm, sdev, svm->pasid);
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
273 }
274 rcu_read_unlock();
254 275
255 /* There's no need to do any flush because we can't get here if there
256 * are any devices left anyway. */
257 WARN_ON(!list_empty(&svm->devs));
258} 276}
259 277
260static const struct mmu_notifier_ops intel_mmuops = { 278static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
379 goto out; 397 goto out;
380 } 398 }
381 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; 399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
382 mm = NULL;
383 } else 400 } else
384 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); 401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
385 wmb(); 402 wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
442 kfree_rcu(sdev, rcu); 459 kfree_rcu(sdev, rcu);
443 460
444 if (list_empty(&svm->devs)) { 461 if (list_empty(&svm->devs)) {
445 mmu_notifier_unregister(&svm->notifier, svm->mm);
446 462
447 idr_remove(&svm->iommu->pasid_idr, svm->pasid); 463 idr_remove(&svm->iommu->pasid_idr, svm->pasid);
448 if (svm->mm) 464 if (svm->mm)
449 mmput(svm->mm); 465 mmu_notifier_unregister(&svm->notifier, svm->mm);
466
450 /* We mandate that no page faults may be outstanding 467 /* We mandate that no page faults may be outstanding
451 * for the PASID when intel_svm_unbind_mm() is called. 468 * for the PASID when intel_svm_unbind_mm() is called.
452 * If that is not obeyed, subtle errors will happen. 469 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
507 struct intel_svm *svm = NULL; 524 struct intel_svm *svm = NULL;
508 int head, tail, handled = 0; 525 int head, tail, handled = 0;
509 526
527 /* Clear PPR bit before reading head/tail registers, to
528 * ensure that we get a new interrupt if needed. */
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
530
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
512 while (head != tail) { 533 while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
551 * any faults on kernel addresses. */ 572 * any faults on kernel addresses. */
552 if (!svm->mm) 573 if (!svm->mm)
553 goto bad_req; 574 goto bad_req;
575 /* If the mm is already defunct, don't handle faults. */
576 if (!atomic_inc_not_zero(&svm->mm->mm_users))
577 goto bad_req;
554 down_read(&svm->mm->mmap_sem); 578 down_read(&svm->mm->mmap_sem);
555 vma = find_extend_vma(svm->mm, address); 579 vma = find_extend_vma(svm->mm, address);
556 if (!vma || address < vma->vm_start) 580 if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
567 result = QI_RESP_SUCCESS; 591 result = QI_RESP_SUCCESS;
568 invalid: 592 invalid:
569 up_read(&svm->mm->mmap_sem); 593 up_read(&svm->mm->mmap_sem);
594 mmput(svm->mm);
570 bad_req: 595 bad_req:
571 /* Accounting for major/minor faults? */ 596 /* Accounting for major/minor faults? */
572 rcu_read_lock(); 597 rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba4516df2..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
629 629
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
631 631
632 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 632 sts = readl(iommu->reg + DMAR_GSTS_REG);
633 if (!(sts & DMA_GSTS_IRES)) 633 if (!(sts & DMA_GSTS_IRES))
634 goto end; 634 goto end;
635 635
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb91c5b..9f6acd5d1d2e 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@ int nvm_register(struct request_queue *q, char *disk_name,
572 } 572 }
573 } 573 }
574 574
575 ret = nvm_get_sysblock(dev, &dev->sb); 575 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
576 if (!ret) 576 ret = nvm_get_sysblock(dev, &dev->sb);
577 pr_err("nvm: device not initialized.\n"); 577 if (!ret)
578 else if (ret < 0) 578 pr_err("nvm: device not initialized.\n");
579 pr_err("nvm: err (%d) on device initialization\n", ret); 579 else if (ret < 0)
580 pr_err("nvm: err (%d) on device initialization\n", ret);
581 }
580 582
581 /* register device with a supported media manager */ 583 /* register device with a supported media manager */
582 down_write(&nvm_lock); 584 down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1055 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); 1057 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1056 info.fs_ppa.ppa = -1; 1058 info.fs_ppa.ppa = -1;
1057 1059
1058 ret = nvm_init_sysblock(dev, &info); 1060 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1059 if (ret) 1061 ret = nvm_init_sysblock(dev, &info);
1060 return ret; 1062 if (ret)
1063 return ret;
1064 }
1061 1065
1062 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); 1066 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1063 1067
@@ -1117,7 +1121,10 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1117 dev->mt = NULL; 1121 dev->mt = NULL;
1118 } 1122 }
1119 1123
1120 return nvm_dev_factory(dev, fact.flags); 1124 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1125 return nvm_dev_factory(dev, fact.flags);
1126
1127 return 0;
1121} 1128}
1122 1129
1123static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1130static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c75958ced3..307db1ea22de 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
300 } 300 }
301 301
302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page) 303 if (!page) {
304 bio_put(bio);
304 return -ENOMEM; 305 return -ENOMEM;
306 }
305 307
306 while ((slot = find_first_zero_bit(rblk->invalid_pages, 308 while ((slot = find_first_zero_bit(rblk->invalid_pages,
307 nr_pgs_per_blk)) < nr_pgs_per_blk) { 309 nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7700c8..f7b37336353f 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr)
174static inline int request_intersects(struct rrpc_inflight_rq *r, 174static inline int request_intersects(struct rrpc_inflight_rq *r,
175 sector_t laddr_start, sector_t laddr_end) 175 sector_t laddr_start, sector_t laddr_end)
176{ 176{
177 return (laddr_end >= r->l_start && laddr_end <= r->l_end) && 177 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
178 (laddr_start >= r->l_start && laddr_start <= r->l_end);
179} 178}
180 179
181static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, 180static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
184 sector_t laddr_end = laddr + pages - 1; 183 sector_t laddr_end = laddr + pages - 1;
185 struct rrpc_inflight_rq *rtmp; 184 struct rrpc_inflight_rq *rtmp;
186 185
186 WARN_ON(irqs_disabled());
187
187 spin_lock_irq(&rrpc->inflights.lock); 188 spin_lock_irq(&rrpc->inflights.lock);
188 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { 189 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
189 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { 190 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d6237391dcd..b586d84f2518 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@ config BLK_DEV_NVME_SCSI
17 and block devices nodes, as well a a translation for a small 17 and block devices nodes, as well a a translation for a small
18 number of selected SCSI commands to NVMe commands to the NVMe 18 number of selected SCSI commands to NVMe commands to the NVMe
19 driver. If you don't know what this means you probably want 19 driver. If you don't know what this means you probably want
20 to say N here, and if you know what it means you probably 20 to say N here, unless you run a distro that abuses the SCSI
21 want to say N as well. 21 emulation to provide stable device names for mount by id, like
22 some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001af559..3cd921e6121e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1121,7 +1121,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1121 ns->queue = blk_mq_init_queue(ctrl->tagset); 1121 ns->queue = blk_mq_init_queue(ctrl->tagset);
1122 if (IS_ERR(ns->queue)) 1122 if (IS_ERR(ns->queue))
1123 goto out_free_ns; 1123 goto out_free_ns;
1124 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1125 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1124 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1126 ns->queue->queuedata = ns; 1125 ns->queue->queuedata = ns;
1127 ns->ctrl = ctrl; 1126 ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725e2fa4..6bb15e4926dc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149#define NVME_NVM_LP_MLC_PAIRS 886
149struct nvme_nvm_lp_mlc { 150struct nvme_nvm_lp_mlc {
150 __u16 num_pairs; 151 __u16 num_pairs;
151 __u8 pairs[886]; 152 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
152}; 153};
153 154
154struct nvme_nvm_lp_tbl { 155struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
282 memcpy(dst->lptbl.id, src->lptbl.id, 8); 283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs = 284 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs); 285 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */ 286
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
289 return -EINVAL;
290 }
291
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, 292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1); 293 dst->lptbl.mlc.num_pairs);
288 } 294 }
289 } 295 }
290 296
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb737868..9664d07d807d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -139,9 +139,9 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
139 u32 val = 0; 139 u32 val = 0;
140 140
141 if (ctrl->ops->io_incapable(ctrl)) 141 if (ctrl->ops->io_incapable(ctrl))
142 return false; 142 return true;
143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
144 return false; 144 return true;
145 return val & NVME_CSTS_CFS; 145 return val & NVME_CSTS_CFS;
146} 146}
147 147
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef8322d32a..a128672472ec 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -678,6 +678,11 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
678 blk_mq_start_request(req); 678 blk_mq_start_request(req);
679 679
680 spin_lock_irq(&nvmeq->q_lock); 680 spin_lock_irq(&nvmeq->q_lock);
681 if (unlikely(nvmeq->cq_vector < 0)) {
682 ret = BLK_MQ_RQ_QUEUE_BUSY;
683 spin_unlock_irq(&nvmeq->q_lock);
684 goto out;
685 }
681 __nvme_submit_cmd(nvmeq, &cmnd); 686 __nvme_submit_cmd(nvmeq, &cmnd);
682 nvme_process_cq(nvmeq); 687 nvme_process_cq(nvmeq);
683 spin_unlock_irq(&nvmeq->q_lock); 688 spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1004,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
999 if (!blk_mq_request_started(req)) 1004 if (!blk_mq_request_started(req))
1000 return; 1005 return;
1001 1006
1002 dev_warn(nvmeq->q_dmadev, 1007 dev_dbg_ratelimited(nvmeq->q_dmadev,
1003 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); 1008 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
1004 1009
1005 status = NVME_SC_ABORT_REQ; 1010 status = NVME_SC_ABORT_REQ;
@@ -2111,16 +2116,12 @@ static void nvme_remove(struct pci_dev *pdev)
2111{ 2116{
2112 struct nvme_dev *dev = pci_get_drvdata(pdev); 2117 struct nvme_dev *dev = pci_get_drvdata(pdev);
2113 2118
2114 spin_lock(&dev_list_lock);
2115 list_del_init(&dev->node);
2116 spin_unlock(&dev_list_lock);
2117
2118 pci_set_drvdata(pdev, NULL); 2119 pci_set_drvdata(pdev, NULL);
2119 flush_work(&dev->reset_work);
2120 flush_work(&dev->scan_work); 2120 flush_work(&dev->scan_work);
2121 nvme_remove_namespaces(&dev->ctrl); 2121 nvme_remove_namespaces(&dev->ctrl);
2122 nvme_uninit_ctrl(&dev->ctrl); 2122 nvme_uninit_ctrl(&dev->ctrl);
2123 nvme_dev_disable(dev, true); 2123 nvme_dev_disable(dev, true);
2124 flush_work(&dev->reset_work);
2124 nvme_dev_remove_admin(dev); 2125 nvme_dev_remove_admin(dev);
2125 nvme_free_queues(dev, 0); 2126 nvme_free_queues(dev, 0);
2126 nvme_release_cmb(dev); 2127 nvme_release_cmb(dev);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7ee21ae305ae..e7bfc175b8e1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
635 msi_base = be32_to_cpup(msi_map + 2); 635 msi_base = be32_to_cpup(msi_map + 2);
636 rid_len = be32_to_cpup(msi_map + 3); 636 rid_len = be32_to_cpup(msi_map + 3);
637 637
638 if (rid_base & ~map_mask) {
639 dev_err(parent_dev,
640 "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
641 map_mask, rid_base);
642 return rid_out;
643 }
644
638 msi_controller_node = of_find_node_by_phandle(phandle); 645 msi_controller_node = of_find_node_by_phandle(phandle);
639 646
640 matched = (masked_rid >= rid_base && 647 matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
654 if (!matched) 661 if (!matched)
655 return rid_out; 662 return rid_out;
656 663
657 rid_out = masked_rid + msi_base; 664 rid_out = masked_rid - rid_base + msi_base;
658 dev_dbg(dev, 665 dev_dbg(dev,
659 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", 666 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
660 dev_name(parent_dev), map_mask, rid_base, msi_base, 667 dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4ed225..e96e86d2e745 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg); 347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
348 break; 348 break;
349 case PIN_CONFIG_INPUT_ENABLE: 349 case PIN_CONFIG_INPUT_ENABLE:
350 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
350 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 351 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
351 break; 352 break;
352 case PIN_CONFIG_OUTPUT: 353 case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
354 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false); 355 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
355 break; 356 break;
356 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 357 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
358 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
357 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 359 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
358 break; 360 break;
359 case PIN_CONFIG_DRIVE_STRENGTH: 361 case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d473811bb3..3ef798fac81b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0]; 666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
667 struct mvebu_pinctrl_group *grp; 667 struct mvebu_pinctrl_group *grp;
668 unsigned num_settings; 668 unsigned num_settings;
669 unsigned supp_settings;
669 670
670 for (num_settings = 0; ; set++) { 671 for (num_settings = 0, supp_settings = 0; ; set++) {
671 if (!set->name) 672 if (!set->name)
672 break; 673 break;
673 674
675 num_settings++;
676
674 /* skip unsupported settings for this variant */ 677 /* skip unsupported settings for this variant */
675 if (pctl->variant && !(pctl->variant & set->variant)) 678 if (pctl->variant && !(pctl->variant & set->variant))
676 continue; 679 continue;
677 680
678 num_settings++; 681 supp_settings++;
679 682
680 /* find gpio/gpo/gpi settings */ 683 /* find gpio/gpo/gpi settings */
681 if (strcmp(set->name, "gpio") == 0) 684 if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
688 } 691 }
689 692
690 /* skip modes with no settings for this variant */ 693 /* skip modes with no settings for this variant */
691 if (!num_settings) 694 if (!supp_settings)
692 continue; 695 continue;
693 696
694 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); 697 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e60106ec2..1f7469c9857d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@ static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret); 191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
192} 192}
193 193
194#ifdef CONFIG_DEBUG_FS
194static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset, 195static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
195 enum abx500_gpio_pull_updown *pull_updown) 196 enum abx500_gpio_pull_updown *pull_updown)
196{ 197{
@@ -226,6 +227,7 @@ out:
226 227
227 return ret; 228 return ret;
228} 229}
230#endif
229 231
230static int abx500_set_pull_updown(struct abx500_pinctrl *pct, 232static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
231 int offset, enum abx500_gpio_pull_updown val) 233 int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@ out:
468 return ret; 470 return ret;
469} 471}
470 472
473#ifdef CONFIG_DEBUG_FS
471static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, 474static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
472 unsigned gpio) 475 unsigned gpio)
473{ 476{
@@ -553,8 +556,6 @@ out:
553 return ret; 556 return ret;
554} 557}
555 558
556#ifdef CONFIG_DEBUG_FS
557
558#include <linux/seq_file.h> 559#include <linux/seq_file.h>
559 560
560static void abx500_gpio_dbg_show_one(struct seq_file *s, 561static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205cf809..216f227c6009 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
426 426
427 return 0; 427 return 0;
428} 428}
429EXPORT_SYMBOL(pxa2xx_pinctrl_init);
429 430
430int pxa2xx_pinctrl_exit(struct platform_device *pdev) 431int pxa2xx_pinctrl_exit(struct platform_device *pdev)
431{ 432{
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e958589..5cc97f85db02 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@ static const struct pinconf_ops samsung_pinconf_ops = {
514 .pin_config_group_set = samsung_pinconf_group_set, 514 .pin_config_group_set = samsung_pinconf_group_set,
515}; 515};
516 516
517/* gpiolib gpio_set callback function */ 517/*
518static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 518 * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
519 * to avoid race condition.
520 */
521static void samsung_gpio_set_value(struct gpio_chip *gc,
522 unsigned offset, int value)
519{ 523{
520 struct samsung_pin_bank *bank = gpiochip_get_data(gc); 524 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
521 const struct samsung_pin_bank_type *type = bank->type; 525 const struct samsung_pin_bank_type *type = bank->type;
522 unsigned long flags;
523 void __iomem *reg; 526 void __iomem *reg;
524 u32 data; 527 u32 data;
525 528
526 reg = bank->drvdata->virt_base + bank->pctl_offset; 529 reg = bank->drvdata->virt_base + bank->pctl_offset;
527 530
528 spin_lock_irqsave(&bank->slock, flags);
529
530 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); 531 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
531 data &= ~(1 << offset); 532 data &= ~(1 << offset);
532 if (value) 533 if (value)
533 data |= 1 << offset; 534 data |= 1 << offset;
534 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]); 535 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
536}
537
538/* gpiolib gpio_set callback function */
539static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
540{
541 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
542 unsigned long flags;
535 543
544 spin_lock_irqsave(&bank->slock, flags);
545 samsung_gpio_set_value(gc, offset, value);
536 spin_unlock_irqrestore(&bank->slock, flags); 546 spin_unlock_irqrestore(&bank->slock, flags);
537} 547}
538 548
@@ -553,6 +563,8 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
553} 563}
554 564
555/* 565/*
566 * The samsung_gpio_set_direction() should be called with "bank->slock" held
567 * to avoid race condition.
556 * The calls to gpio_direction_output() and gpio_direction_input() 568 * The calls to gpio_direction_output() and gpio_direction_input()
557 * leads to this function call. 569 * leads to this function call.
558 */ 570 */
@@ -564,7 +576,6 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
564 struct samsung_pinctrl_drv_data *drvdata; 576 struct samsung_pinctrl_drv_data *drvdata;
565 void __iomem *reg; 577 void __iomem *reg;
566 u32 data, mask, shift; 578 u32 data, mask, shift;
567 unsigned long flags;
568 579
569 bank = gpiochip_get_data(gc); 580 bank = gpiochip_get_data(gc);
570 type = bank->type; 581 type = bank->type;
@@ -581,31 +592,42 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
581 reg += 4; 592 reg += 4;
582 } 593 }
583 594
584 spin_lock_irqsave(&bank->slock, flags);
585
586 data = readl(reg); 595 data = readl(reg);
587 data &= ~(mask << shift); 596 data &= ~(mask << shift);
588 if (!input) 597 if (!input)
589 data |= FUNC_OUTPUT << shift; 598 data |= FUNC_OUTPUT << shift;
590 writel(data, reg); 599 writel(data, reg);
591 600
592 spin_unlock_irqrestore(&bank->slock, flags);
593
594 return 0; 601 return 0;
595} 602}
596 603
597/* gpiolib gpio_direction_input callback function. */ 604/* gpiolib gpio_direction_input callback function. */
598static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset) 605static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
599{ 606{
600 return samsung_gpio_set_direction(gc, offset, true); 607 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 unsigned long flags;
609 int ret;
610
611 spin_lock_irqsave(&bank->slock, flags);
612 ret = samsung_gpio_set_direction(gc, offset, true);
613 spin_unlock_irqrestore(&bank->slock, flags);
614 return ret;
601} 615}
602 616
603/* gpiolib gpio_direction_output callback function. */ 617/* gpiolib gpio_direction_output callback function. */
604static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset, 618static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
605 int value) 619 int value)
606{ 620{
607 samsung_gpio_set(gc, offset, value); 621 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 return samsung_gpio_set_direction(gc, offset, false); 622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(&bank->slock, flags);
626 samsung_gpio_set_value(gc, offset, value);
627 ret = samsung_gpio_set_direction(gc, offset, false);
628 spin_unlock_irqrestore(&bank->slock, flags);
629
630 return ret;
609} 631}
610 632
611/* 633/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf047cee..11760bbe9d51 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
492 .pins = sun8i_h3_pins, 492 .pins = sun8i_h3_pins,
493 .npins = ARRAY_SIZE(sun8i_h3_pins), 493 .npins = ARRAY_SIZE(sun8i_h3_pins),
494 .irq_banks = 2, 494 .irq_banks = 2,
495 .irq_read_needs_mux = true
495}; 496};
496 497
497static int sun8i_h3_pinctrl_probe(struct platform_device *pdev) 498static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605dac8309..c78db05e75b1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@ static void dasd_setup_queue(struct dasd_block *block)
3035 max = block->base->discipline->max_blocks << block->s2b_shift; 3035 max = block->base->discipline->max_blocks << block->s2b_shift;
3036 } 3036 }
3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
3038 block->request_queue->limits.max_dev_sectors = max;
3038 blk_queue_logical_block_size(block->request_queue, 3039 blk_queue_logical_block_size(block->request_queue,
3039 block->bp_block); 3040 block->bp_block);
3040 blk_queue_max_hw_sectors(block->request_queue, max); 3041 blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1dbeb554..286782c60da4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
264 spin_unlock_irqrestore(&lcu->lock, flags); 264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker); 265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags); 266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device) 267 if (device == lcu->suc_data.device) {
268 dasd_put_device(device);
268 lcu->suc_data.device = NULL; 269 lcu->suc_data.device = NULL;
270 }
269 } 271 }
270 was_pending = 0; 272 was_pending = 0;
271 if (device == lcu->ruac_data.device) { 273 if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
273 was_pending = 1; 275 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 276 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags); 277 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device) 278 if (device == lcu->ruac_data.device) {
279 dasd_put_device(device);
277 lcu->ruac_data.device = NULL; 280 lcu->ruac_data.device = NULL;
281 }
278 } 282 }
279 private->lcu = NULL; 283 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags); 284 spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 553 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 555 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 556 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
557 dasd_put_device(device);
553 } else { 558 } else {
559 dasd_put_device(device);
554 lcu->ruac_data.device = NULL; 560 lcu->ruac_data.device = NULL;
555 lcu->flags &= ~UPDATE_PENDING; 561 lcu->flags &= ~UPDATE_PENDING;
556 } 562 }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
593 */ 599 */
594 if (!usedev) 600 if (!usedev)
595 return -EINVAL; 601 return -EINVAL;
602 dasd_get_device(usedev);
596 lcu->ruac_data.device = usedev; 603 lcu->ruac_data.device = usedev;
597 schedule_delayed_work(&lcu->ruac_data.dwork, 0); 604 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
605 dasd_put_device(usedev);
598 return 0; 606 return 0;
599} 607}
600 608
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
723 ASCEBC((char *) &cqr->magic, 4); 731 ASCEBC((char *) &cqr->magic, 4);
724 ccw = cqr->cpaddr; 732 ccw = cqr->cpaddr;
725 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 733 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
726 ccw->flags = 0 ; 734 ccw->flags = CCW_FLAG_SLI;
727 ccw->count = 16; 735 ccw->count = 16;
728 ccw->cda = (__u32)(addr_t) cqr->data; 736 ccw->cda = (__u32)(addr_t) cqr->data;
729 ((char *)cqr->data)[0] = reason; 737 ((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
930 /* 3. read new alias configuration */ 938 /* 3. read new alias configuration */
931 _schedule_lcu_update(lcu, device); 939 _schedule_lcu_update(lcu, device);
932 lcu->suc_data.device = NULL; 940 lcu->suc_data.device = NULL;
941 dasd_put_device(device);
933 spin_unlock_irqrestore(&lcu->lock, flags); 942 spin_unlock_irqrestore(&lcu->lock, flags);
934} 943}
935 944
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
989 } 998 }
990 lcu->suc_data.reason = reason; 999 lcu->suc_data.reason = reason;
991 lcu->suc_data.device = device; 1000 lcu->suc_data.device = device;
1001 dasd_get_device(device);
992 spin_unlock(&lcu->lock); 1002 spin_unlock(&lcu->lock);
993 schedule_work(&lcu->suc_data.worker); 1003 if (!schedule_work(&lcu->suc_data.worker))
1004 dasd_put_device(device);
994}; 1005};
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index da2e068ee47d..bbfbfd9e5aa3 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -206,6 +206,7 @@ static struct {
206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, 208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
209 {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
209 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 210 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
210 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 211 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
211 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 212 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a851e2c7..00bc7218a7f8 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1272void scsi_remove_target(struct device *dev) 1272void scsi_remove_target(struct device *dev)
1273{ 1273{
1274 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1274 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1275 struct scsi_target *starget; 1275 struct scsi_target *starget, *last_target = NULL;
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
1278restart: 1278restart:
1279 spin_lock_irqsave(shost->host_lock, flags); 1279 spin_lock_irqsave(shost->host_lock, flags);
1280 list_for_each_entry(starget, &shost->__targets, siblings) { 1280 list_for_each_entry(starget, &shost->__targets, siblings) {
1281 if (starget->state == STARGET_DEL) 1281 if (starget->state == STARGET_DEL ||
1282 starget == last_target)
1282 continue; 1283 continue;
1283 if (starget->dev.parent == dev || &starget->dev == dev) { 1284 if (starget->dev.parent == dev || &starget->dev == dev) {
1284 kref_get(&starget->reap_ref); 1285 kref_get(&starget->reap_ref);
1286 last_target = starget;
1285 spin_unlock_irqrestore(shost->host_lock, flags); 1287 spin_unlock_irqrestore(shost->host_lock, flags);
1286 __scsi_remove_target(starget); 1288 __scsi_remove_target(starget);
1287 scsi_target_reap(starget); 1289 scsi_target_reap(starget);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36391c9..8feac599e9ab 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1571 1571
1572 as->use_cs_gpios = true; 1572 as->use_cs_gpios = true;
1573 if (atmel_spi_is_v2(as) && 1573 if (atmel_spi_is_v2(as) &&
1574 pdev->dev.of_node &&
1574 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) { 1575 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
1575 as->use_cs_gpios = false; 1576 as->use_cs_gpios = false;
1576 master->num_chipselect = 4; 1577 master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f8472a81..ecc73c0a97cf 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
73 73
74/* Bitfields in CNTL1 */ 74/* Bitfields in CNTL1 */
75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700 75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
76#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080 76#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
77#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040 77#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002 78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001 79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
80 80
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c009d2..7cb0c1921495 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
84/* SPCOM register values */ 84/* SPCOM register values */
85#define SPCOM_CS(x) ((x) << 30) 85#define SPCOM_CS(x) ((x) << 30)
86#define SPCOM_TRANLEN(x) ((x) << 0) 86#define SPCOM_TRANLEN(x) ((x) << 0)
87#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ 87#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
88 88
89#define AUTOSUSPEND_TIMEOUT 2000 89#define AUTOSUSPEND_TIMEOUT 2000
90 90
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
233 reinit_completion(&mpc8xxx_spi->done); 233 reinit_completion(&mpc8xxx_spi->done);
234 234
235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
236 if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 236 if (t->len > SPCOM_TRANLEN_MAX) {
237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" 237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
238 " beyond the SPCOM[TRANLEN] field\n", t->len); 238 " beyond the SPCOM[TRANLEN] field\n", t->len);
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33cb64f9..6a4ff27f4357 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,7 +929,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
929 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 929 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
931 if (!desc_tx) 931 if (!desc_tx)
932 goto no_dma; 932 goto tx_nodma;
933 933
934 desc_tx->callback = spi_imx_dma_tx_callback; 934 desc_tx->callback = spi_imx_dma_tx_callback;
935 desc_tx->callback_param = (void *)spi_imx; 935 desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
941 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 941 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
943 if (!desc_rx) 943 if (!desc_rx)
944 goto no_dma; 944 goto rx_nodma;
945 945
946 desc_rx->callback = spi_imx_dma_rx_callback; 946 desc_rx->callback = spi_imx_dma_rx_callback;
947 desc_rx->callback_param = (void *)spi_imx; 947 desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1008 1008
1009 return ret; 1009 return ret;
1010 1010
1011no_dma: 1011rx_nodma:
1012 dmaengine_terminate_all(master->dma_tx);
1013tx_nodma:
1012 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 1014 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
1013 dev_driver_string(&master->dev), 1015 dev_driver_string(&master->dev),
1014 dev_name(&master->dev)); 1016 dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f687b0..cf4bb36bee25 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
761 test.iterate_transfer_mask = 1; 761 test.iterate_transfer_mask = 1;
762 762
763 /* count number of transfers with tx/rx_buf != NULL */ 763 /* count number of transfers with tx/rx_buf != NULL */
764 rx_count = tx_count = 0;
764 for (i = 0; i < test.transfer_count; i++) { 765 for (i = 0; i < test.transfer_count; i++) {
765 if (test.transfers[i].tx_buf) 766 if (test.transfers[i].tx_buf)
766 tx_count++; 767 tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820275e9..0caa3c8bef46 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1490 return status; 1490 return status;
1491 1491
1492disable_pm: 1492disable_pm:
1493 pm_runtime_dont_use_autosuspend(&pdev->dev);
1494 pm_runtime_put_sync(&pdev->dev);
1493 pm_runtime_disable(&pdev->dev); 1495 pm_runtime_disable(&pdev->dev);
1494free_master: 1496free_master:
1495 spi_master_put(master); 1497 spi_master_put(master);
@@ -1501,6 +1503,7 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1501 struct spi_master *master = platform_get_drvdata(pdev); 1503 struct spi_master *master = platform_get_drvdata(pdev);
1502 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1504 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1503 1505
1506 pm_runtime_dont_use_autosuspend(mcspi->dev);
1504 pm_runtime_put_sync(mcspi->dev); 1507 pm_runtime_put_sync(mcspi->dev);
1505 pm_runtime_disable(&pdev->dev); 1508 pm_runtime_disable(&pdev->dev);
1506 1509
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 151b7c71b868..d96f5cf38a2d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7986,6 +7986,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
7986 7986
7987 kfree(dip); 7987 kfree(dip);
7988 7988
7989 dio_bio->bi_error = bio->bi_error;
7989 dio_end_io(dio_bio, bio->bi_error); 7990 dio_end_io(dio_bio, bio->bi_error);
7990 7991
7991 if (io_bio->end_io) 7992 if (io_bio->end_io)
@@ -8040,6 +8041,7 @@ static void btrfs_endio_direct_write(struct bio *bio)
8040 8041
8041 kfree(dip); 8042 kfree(dip);
8042 8043
8044 dio_bio->bi_error = bio->bi_error;
8043 dio_end_io(dio_bio, bio->bi_error); 8045 dio_end_io(dio_bio, bio->bi_error);
8044 bio_put(bio); 8046 bio_put(bio);
8045} 8047}
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 7dc886c9a78f..e956cba94338 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -175,7 +175,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
175 * string to the length of the original string to allow for worst case. 175 * string to the length of the original string to allow for worst case.
176 */ 176 */
177 md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN; 177 md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
178 mountdata = kzalloc(md_len + 1, GFP_KERNEL); 178 mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
179 if (mountdata == NULL) { 179 if (mountdata == NULL) {
180 rc = -ENOMEM; 180 rc = -ENOMEM;
181 goto compose_mount_options_err; 181 goto compose_mount_options_err;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index afa09fce8151..e682b36a210f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
714 714
715 ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); 715 ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
716 if (!ses->auth_key.response) { 716 if (!ses->auth_key.response) {
717 rc = ENOMEM; 717 rc = -ENOMEM;
718 ses->auth_key.len = 0; 718 ses->auth_key.len = 0;
719 goto setup_ntlmv2_rsp_ret; 719 goto setup_ntlmv2_rsp_ret;
720 } 720 }
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4fbd92d2e113..a763cd3d9e7c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2999,8 +2999,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
2999 if (ses_init_buf) { 2999 if (ses_init_buf) {
3000 ses_init_buf->trailer.session_req.called_len = 32; 3000 ses_init_buf->trailer.session_req.called_len = 32;
3001 3001
3002 if (server->server_RFC1001_name && 3002 if (server->server_RFC1001_name[0] != 0)
3003 server->server_RFC1001_name[0] != 0)
3004 rfc1002mangle(ses_init_buf->trailer. 3003 rfc1002mangle(ses_init_buf->trailer.
3005 session_req.called_name, 3004 session_req.called_name,
3006 server->server_RFC1001_name, 3005 server->server_RFC1001_name,
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1b2f7ffc8b84..d6a9012d42ad 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
472 dio->io_error = -EIO; 472 dio->io_error = -EIO;
473 473
474 if (dio->is_async && dio->rw == READ && dio->should_dirty) { 474 if (dio->is_async && dio->rw == READ && dio->should_dirty) {
475 bio_check_pages_dirty(bio); /* transfers ownership */
476 err = bio->bi_error; 475 err = bio->bi_error;
476 bio_check_pages_dirty(bio); /* transfers ownership */
477 } else { 477 } else {
478 bio_for_each_segment_all(bvec, bio, i) { 478 bio_for_each_segment_all(bvec, bio, i) {
479 struct page *page = bvec->bv_page; 479 struct page *page = bvec->bv_page;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index c424e4813ec8..d48e0d261d78 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/mount.h>
13 14
14#include "internal.h" 15#include "internal.h"
15 16
@@ -103,9 +104,78 @@ out_free:
103 return size; 104 return size;
104} 105}
105 106
107static int
108efivarfs_ioc_getxflags(struct file *file, void __user *arg)
109{
110 struct inode *inode = file->f_mapping->host;
111 unsigned int i_flags;
112 unsigned int flags = 0;
113
114 i_flags = inode->i_flags;
115 if (i_flags & S_IMMUTABLE)
116 flags |= FS_IMMUTABLE_FL;
117
118 if (copy_to_user(arg, &flags, sizeof(flags)))
119 return -EFAULT;
120 return 0;
121}
122
123static int
124efivarfs_ioc_setxflags(struct file *file, void __user *arg)
125{
126 struct inode *inode = file->f_mapping->host;
127 unsigned int flags;
128 unsigned int i_flags = 0;
129 int error;
130
131 if (!inode_owner_or_capable(inode))
132 return -EACCES;
133
134 if (copy_from_user(&flags, arg, sizeof(flags)))
135 return -EFAULT;
136
137 if (flags & ~FS_IMMUTABLE_FL)
138 return -EOPNOTSUPP;
139
140 if (!capable(CAP_LINUX_IMMUTABLE))
141 return -EPERM;
142
143 if (flags & FS_IMMUTABLE_FL)
144 i_flags |= S_IMMUTABLE;
145
146
147 error = mnt_want_write_file(file);
148 if (error)
149 return error;
150
151 inode_lock(inode);
152 inode_set_flags(inode, i_flags, S_IMMUTABLE);
153 inode_unlock(inode);
154
155 mnt_drop_write_file(file);
156
157 return 0;
158}
159
160long
161efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
162{
163 void __user *arg = (void __user *)p;
164
165 switch (cmd) {
166 case FS_IOC_GETFLAGS:
167 return efivarfs_ioc_getxflags(file, arg);
168 case FS_IOC_SETFLAGS:
169 return efivarfs_ioc_setxflags(file, arg);
170 }
171
172 return -ENOTTY;
173}
174
106const struct file_operations efivarfs_file_operations = { 175const struct file_operations efivarfs_file_operations = {
107 .open = simple_open, 176 .open = simple_open,
108 .read = efivarfs_file_read, 177 .read = efivarfs_file_read,
109 .write = efivarfs_file_write, 178 .write = efivarfs_file_write,
110 .llseek = no_llseek, 179 .llseek = no_llseek,
180 .unlocked_ioctl = efivarfs_file_ioctl,
111}; 181};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 3381b9da9ee6..e2ab6d0497f2 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -15,7 +15,8 @@
15#include "internal.h" 15#include "internal.h"
16 16
17struct inode *efivarfs_get_inode(struct super_block *sb, 17struct inode *efivarfs_get_inode(struct super_block *sb,
18 const struct inode *dir, int mode, dev_t dev) 18 const struct inode *dir, int mode,
19 dev_t dev, bool is_removable)
19{ 20{
20 struct inode *inode = new_inode(sb); 21 struct inode *inode = new_inode(sb);
21 22
@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
23 inode->i_ino = get_next_ino(); 24 inode->i_ino = get_next_ino();
24 inode->i_mode = mode; 25 inode->i_mode = mode;
25 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 26 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
27 inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
26 switch (mode & S_IFMT) { 28 switch (mode & S_IFMT) {
27 case S_IFREG: 29 case S_IFREG:
28 inode->i_fop = &efivarfs_file_operations; 30 inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
102static int efivarfs_create(struct inode *dir, struct dentry *dentry, 104static int efivarfs_create(struct inode *dir, struct dentry *dentry,
103 umode_t mode, bool excl) 105 umode_t mode, bool excl)
104{ 106{
105 struct inode *inode; 107 struct inode *inode = NULL;
106 struct efivar_entry *var; 108 struct efivar_entry *var;
107 int namelen, i = 0, err = 0; 109 int namelen, i = 0, err = 0;
110 bool is_removable = false;
108 111
109 if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) 112 if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
110 return -EINVAL; 113 return -EINVAL;
111 114
112 inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
113 if (!inode)
114 return -ENOMEM;
115
116 var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); 115 var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
117 if (!var) { 116 if (!var)
118 err = -ENOMEM; 117 return -ENOMEM;
119 goto out;
120 }
121 118
122 /* length of the variable name itself: remove GUID and separator */ 119 /* length of the variable name itself: remove GUID and separator */
123 namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; 120 namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
125 efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, 122 efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
126 &var->var.VendorGuid); 123 &var->var.VendorGuid);
127 124
125 if (efivar_variable_is_removable(var->var.VendorGuid,
126 dentry->d_name.name, namelen))
127 is_removable = true;
128
129 inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
130 if (!inode) {
131 err = -ENOMEM;
132 goto out;
133 }
134
128 for (i = 0; i < namelen; i++) 135 for (i = 0; i < namelen; i++)
129 var->var.VariableName[i] = dentry->d_name.name[i]; 136 var->var.VariableName[i] = dentry->d_name.name[i];
130 137
@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
138out: 145out:
139 if (err) { 146 if (err) {
140 kfree(var); 147 kfree(var);
141 iput(inode); 148 if (inode)
149 iput(inode);
142 } 150 }
143 return err; 151 return err;
144} 152}
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index b5ff16addb7c..b4505188e799 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
15extern const struct inode_operations efivarfs_dir_inode_operations; 15extern const struct inode_operations efivarfs_dir_inode_operations;
16extern bool efivarfs_valid_name(const char *str, int len); 16extern bool efivarfs_valid_name(const char *str, int len);
17extern struct inode *efivarfs_get_inode(struct super_block *sb, 17extern struct inode *efivarfs_get_inode(struct super_block *sb,
18 const struct inode *dir, int mode, dev_t dev); 18 const struct inode *dir, int mode, dev_t dev,
19 bool is_removable);
19 20
20extern struct list_head efivarfs_list; 21extern struct list_head efivarfs_list;
21 22
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index b8a564f29107..dd029d13ea61 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
118 struct dentry *dentry, *root = sb->s_root; 118 struct dentry *dentry, *root = sb->s_root;
119 unsigned long size = 0; 119 unsigned long size = 0;
120 char *name; 120 char *name;
121 int len, i; 121 int len;
122 int err = -ENOMEM; 122 int err = -ENOMEM;
123 bool is_removable = false;
123 124
124 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 125 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
125 if (!entry) 126 if (!entry)
@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
128 memcpy(entry->var.VariableName, name16, name_size); 129 memcpy(entry->var.VariableName, name16, name_size);
129 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); 130 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
130 131
131 len = ucs2_strlen(entry->var.VariableName); 132 len = ucs2_utf8size(entry->var.VariableName);
132 133
133 /* name, plus '-', plus GUID, plus NUL*/ 134 /* name, plus '-', plus GUID, plus NUL*/
134 name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); 135 name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
135 if (!name) 136 if (!name)
136 goto fail; 137 goto fail;
137 138
138 for (i = 0; i < len; i++) 139 ucs2_as_utf8(name, entry->var.VariableName, len);
139 name[i] = entry->var.VariableName[i] & 0xFF; 140
141 if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
142 is_removable = true;
140 143
141 name[len] = '-'; 144 name[len] = '-';
142 145
@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
144 147
145 name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; 148 name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
146 149
147 inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0); 150 inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
151 is_removable);
148 if (!inode) 152 if (!inode)
149 goto fail_name; 153 goto fail_name;
150 154
@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
200 sb->s_d_op = &efivarfs_d_ops; 204 sb->s_d_op = &efivarfs_d_ops;
201 sb->s_time_gran = 1; 205 sb->s_time_gran = 1;
202 206
203 inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); 207 inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
204 if (!inode) 208 if (!inode)
205 return -ENOMEM; 209 return -ENOMEM;
206 inode->i_op = &efivarfs_dir_inode_operations; 210 inode->i_op = &efivarfs_dir_inode_operations;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ec0668a60678..fe1f50fe764f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -191,7 +191,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
191 /* If checksum is bad mark all blocks used to prevent allocation 191 /* If checksum is bad mark all blocks used to prevent allocation
192 * essentially implementing a per-group read-only flag. */ 192 * essentially implementing a per-group read-only flag. */
193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194 ext4_error(sb, "Checksum bad for group %u", block_group);
195 grp = ext4_get_group_info(sb, block_group); 194 grp = ext4_get_group_info(sb, block_group);
196 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 195 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
197 percpu_counter_sub(&sbi->s_freeclusters_counter, 196 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -442,14 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
442 } 441 }
443 ext4_lock_group(sb, block_group); 442 ext4_lock_group(sb, block_group);
444 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 443 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
445
446 err = ext4_init_block_bitmap(sb, bh, block_group, desc); 444 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
447 set_bitmap_uptodate(bh); 445 set_bitmap_uptodate(bh);
448 set_buffer_uptodate(bh); 446 set_buffer_uptodate(bh);
449 ext4_unlock_group(sb, block_group); 447 ext4_unlock_group(sb, block_group);
450 unlock_buffer(bh); 448 unlock_buffer(bh);
451 if (err) 449 if (err) {
450 ext4_error(sb, "Failed to init block bitmap for group "
451 "%u: %d", block_group, err);
452 goto out; 452 goto out;
453 }
453 goto verify; 454 goto verify;
454 } 455 }
455 ext4_unlock_group(sb, block_group); 456 ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index c8021208a7eb..38f7562489bb 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -467,3 +467,59 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
467 return size; 467 return size;
468 return 0; 468 return 0;
469} 469}
470
471/*
472 * Validate dentries for encrypted directories to make sure we aren't
473 * potentially caching stale data after a key has been added or
474 * removed.
475 */
476static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
477{
478 struct inode *dir = d_inode(dentry->d_parent);
479 struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
480 int dir_has_key, cached_with_key;
481
482 if (!ext4_encrypted_inode(dir))
483 return 0;
484
485 if (ci && ci->ci_keyring_key &&
486 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
487 (1 << KEY_FLAG_REVOKED) |
488 (1 << KEY_FLAG_DEAD))))
489 ci = NULL;
490
491 /* this should eventually be an flag in d_flags */
492 cached_with_key = dentry->d_fsdata != NULL;
493 dir_has_key = (ci != NULL);
494
495 /*
496 * If the dentry was cached without the key, and it is a
497 * negative dentry, it might be a valid name. We can't check
498 * if the key has since been made available due to locking
499 * reasons, so we fail the validation so ext4_lookup() can do
500 * this check.
501 *
502 * We also fail the validation if the dentry was created with
503 * the key present, but we no longer have the key, or vice versa.
504 */
505 if ((!cached_with_key && d_is_negative(dentry)) ||
506 (!cached_with_key && dir_has_key) ||
507 (cached_with_key && !dir_has_key)) {
508#if 0 /* Revalidation debug */
509 char buf[80];
510 char *cp = simple_dname(dentry, buf, sizeof(buf));
511
512 if (IS_ERR(cp))
513 cp = (char *) "???";
514 pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
515 cached_with_key, d_is_negative(dentry),
516 dir_has_key);
517#endif
518 return 0;
519 }
520 return 1;
521}
522
523const struct dentry_operations ext4_encrypted_d_ops = {
524 .d_revalidate = ext4_d_revalidate,
525};
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 1d1bca74f844..33f5e2a50cf8 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,6 +111,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
111 int dir_has_error = 0; 111 int dir_has_error = 0;
112 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; 112 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
113 113
114 if (ext4_encrypted_inode(inode)) {
115 err = ext4_get_encryption_info(inode);
116 if (err && err != -ENOKEY)
117 return err;
118 }
119
114 if (is_dx_dir(inode)) { 120 if (is_dx_dir(inode)) {
115 err = ext4_dx_readdir(file, ctx); 121 err = ext4_dx_readdir(file, ctx);
116 if (err != ERR_BAD_DX_DIR) { 122 if (err != ERR_BAD_DX_DIR) {
@@ -157,8 +163,11 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
157 index, 1); 163 index, 1);
158 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 164 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
159 bh = ext4_bread(NULL, inode, map.m_lblk, 0); 165 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
160 if (IS_ERR(bh)) 166 if (IS_ERR(bh)) {
161 return PTR_ERR(bh); 167 err = PTR_ERR(bh);
168 bh = NULL;
169 goto errout;
170 }
162 } 171 }
163 172
164 if (!bh) { 173 if (!bh) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0662b285dc8a..157b458a69d4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2302,6 +2302,7 @@ struct page *ext4_encrypt(struct inode *inode,
2302int ext4_decrypt(struct page *page); 2302int ext4_decrypt(struct page *page);
2303int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 2303int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
2304 ext4_fsblk_t pblk, ext4_lblk_t len); 2304 ext4_fsblk_t pblk, ext4_lblk_t len);
2305extern const struct dentry_operations ext4_encrypted_d_ops;
2305 2306
2306#ifdef CONFIG_EXT4_FS_ENCRYPTION 2307#ifdef CONFIG_EXT4_FS_ENCRYPTION
2307int ext4_init_crypto(void); 2308int ext4_init_crypto(void);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0ffabaf90aa5..3753ceb0b0dd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3928,7 +3928,7 @@ static int
3928convert_initialized_extent(handle_t *handle, struct inode *inode, 3928convert_initialized_extent(handle_t *handle, struct inode *inode,
3929 struct ext4_map_blocks *map, 3929 struct ext4_map_blocks *map,
3930 struct ext4_ext_path **ppath, int flags, 3930 struct ext4_ext_path **ppath, int flags,
3931 unsigned int allocated, ext4_fsblk_t newblock) 3931 unsigned int allocated)
3932{ 3932{
3933 struct ext4_ext_path *path = *ppath; 3933 struct ext4_ext_path *path = *ppath;
3934 struct ext4_extent *ex; 3934 struct ext4_extent *ex;
@@ -4347,7 +4347,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4347 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4347 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4348 allocated = convert_initialized_extent( 4348 allocated = convert_initialized_extent(
4349 handle, inode, map, &path, 4349 handle, inode, map, &path,
4350 flags, allocated, newblock); 4350 flags, allocated);
4351 goto out2; 4351 goto out2;
4352 } else if (!ext4_ext_is_unwritten(ex)) 4352 } else if (!ext4_ext_is_unwritten(ex))
4353 goto out; 4353 goto out;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1126436dada1..474f1a4d2ca8 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -350,6 +350,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
350 struct super_block *sb = inode->i_sb; 350 struct super_block *sb = inode->i_sb;
351 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 351 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
352 struct vfsmount *mnt = filp->f_path.mnt; 352 struct vfsmount *mnt = filp->f_path.mnt;
353 struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
353 struct path path; 354 struct path path;
354 char buf[64], *cp; 355 char buf[64], *cp;
355 int ret; 356 int ret;
@@ -393,6 +394,14 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
393 if (ext4_encryption_info(inode) == NULL) 394 if (ext4_encryption_info(inode) == NULL)
394 return -ENOKEY; 395 return -ENOKEY;
395 } 396 }
397 if (ext4_encrypted_inode(dir) &&
398 !ext4_is_child_context_consistent_with_parent(dir, inode)) {
399 ext4_warning(inode->i_sb,
400 "Inconsistent encryption contexts: %lu/%lu\n",
401 (unsigned long) dir->i_ino,
402 (unsigned long) inode->i_ino);
403 return -EPERM;
404 }
396 /* 405 /*
397 * Set up the jbd2_inode if we are opening the inode for 406 * Set up the jbd2_inode if we are opening the inode for
398 * writing and the journal is present 407 * writing and the journal is present
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3fcfd50a2e8a..acc0ad56bf2f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,7 +76,6 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
76 /* If checksum is bad mark all blocks and inodes use to prevent 76 /* If checksum is bad mark all blocks and inodes use to prevent
77 * allocation, essentially implementing a per-group read-only flag. */ 77 * allocation, essentially implementing a per-group read-only flag. */
78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79 ext4_error(sb, "Checksum bad for group %u", block_group);
80 grp = ext4_get_group_info(sb, block_group); 79 grp = ext4_get_group_info(sb, block_group);
81 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 80 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
82 percpu_counter_sub(&sbi->s_freeclusters_counter, 81 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -191,8 +190,11 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
191 set_buffer_verified(bh); 190 set_buffer_verified(bh);
192 ext4_unlock_group(sb, block_group); 191 ext4_unlock_group(sb, block_group);
193 unlock_buffer(bh); 192 unlock_buffer(bh);
194 if (err) 193 if (err) {
194 ext4_error(sb, "Failed to init inode bitmap for group "
195 "%u: %d", block_group, err);
195 goto out; 196 goto out;
197 }
196 return bh; 198 return bh;
197 } 199 }
198 ext4_unlock_group(sb, block_group); 200 ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 83bc8bfb3bea..9cc57c3b4661 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -686,6 +686,34 @@ out_sem:
686 return retval; 686 return retval;
687} 687}
688 688
689/*
690 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
691 * we have to be careful as someone else may be manipulating b_state as well.
692 */
693static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
694{
695 unsigned long old_state;
696 unsigned long new_state;
697
698 flags &= EXT4_MAP_FLAGS;
699
700 /* Dummy buffer_head? Set non-atomically. */
701 if (!bh->b_page) {
702 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
703 return;
704 }
705 /*
706 * Someone else may be modifying b_state. Be careful! This is ugly but
707 * once we get rid of using bh as a container for mapping information
708 * to pass to / from get_block functions, this can go away.
709 */
710 do {
711 old_state = READ_ONCE(bh->b_state);
712 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
713 } while (unlikely(
714 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
715}
716
689/* Maximum number of blocks we map for direct IO at once. */ 717/* Maximum number of blocks we map for direct IO at once. */
690#define DIO_MAX_BLOCKS 4096 718#define DIO_MAX_BLOCKS 4096
691 719
@@ -722,7 +750,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
722 ext4_io_end_t *io_end = ext4_inode_aio(inode); 750 ext4_io_end_t *io_end = ext4_inode_aio(inode);
723 751
724 map_bh(bh, inode->i_sb, map.m_pblk); 752 map_bh(bh, inode->i_sb, map.m_pblk);
725 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 753 ext4_update_bh_state(bh, map.m_flags);
726 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) 754 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
727 set_buffer_defer_completion(bh); 755 set_buffer_defer_completion(bh);
728 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 756 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
@@ -1685,7 +1713,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1685 return ret; 1713 return ret;
1686 1714
1687 map_bh(bh, inode->i_sb, map.m_pblk); 1715 map_bh(bh, inode->i_sb, map.m_pblk);
1688 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1716 ext4_update_bh_state(bh, map.m_flags);
1689 1717
1690 if (buffer_unwritten(bh)) { 1718 if (buffer_unwritten(bh)) {
1691 /* A delayed write to unwritten bh should be marked 1719 /* A delayed write to unwritten bh should be marked
@@ -3253,29 +3281,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
3253 * case, we allocate an io_end structure to hook to the iocb. 3281 * case, we allocate an io_end structure to hook to the iocb.
3254 */ 3282 */
3255 iocb->private = NULL; 3283 iocb->private = NULL;
3256 ext4_inode_aio_set(inode, NULL);
3257 if (!is_sync_kiocb(iocb)) {
3258 io_end = ext4_init_io_end(inode, GFP_NOFS);
3259 if (!io_end) {
3260 ret = -ENOMEM;
3261 goto retake_lock;
3262 }
3263 /*
3264 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3265 */
3266 iocb->private = ext4_get_io_end(io_end);
3267 /*
3268 * we save the io structure for current async direct
3269 * IO, so that later ext4_map_blocks() could flag the
3270 * io structure whether there is a unwritten extents
3271 * needs to be converted when IO is completed.
3272 */
3273 ext4_inode_aio_set(inode, io_end);
3274 }
3275
3276 if (overwrite) { 3284 if (overwrite) {
3277 get_block_func = ext4_get_block_overwrite; 3285 get_block_func = ext4_get_block_overwrite;
3278 } else { 3286 } else {
3287 ext4_inode_aio_set(inode, NULL);
3288 if (!is_sync_kiocb(iocb)) {
3289 io_end = ext4_init_io_end(inode, GFP_NOFS);
3290 if (!io_end) {
3291 ret = -ENOMEM;
3292 goto retake_lock;
3293 }
3294 /*
3295 * Grab reference for DIO. Will be dropped in
3296 * ext4_end_io_dio()
3297 */
3298 iocb->private = ext4_get_io_end(io_end);
3299 /*
3300 * we save the io structure for current async direct
3301 * IO, so that later ext4_map_blocks() could flag the
3302 * io structure whether there is a unwritten extents
3303 * needs to be converted when IO is completed.
3304 */
3305 ext4_inode_aio_set(inode, io_end);
3306 }
3279 get_block_func = ext4_get_block_write; 3307 get_block_func = ext4_get_block_write;
3280 dio_flags = DIO_LOCKING; 3308 dio_flags = DIO_LOCKING;
3281 } 3309 }
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0f6c36922c24..a99b010e2194 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -208,7 +208,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
208{ 208{
209 struct ext4_inode_info *ei = EXT4_I(inode); 209 struct ext4_inode_info *ei = EXT4_I(inode);
210 handle_t *handle = NULL; 210 handle_t *handle = NULL;
211 int err = EPERM, migrate = 0; 211 int err = -EPERM, migrate = 0;
212 struct ext4_iloc iloc; 212 struct ext4_iloc iloc;
213 unsigned int oldflags, mask, i; 213 unsigned int oldflags, mask, i;
214 unsigned int jflag; 214 unsigned int jflag;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 61eaf74dca37..4424b7bf8ac6 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2285,7 +2285,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2285 if (group == 0) 2285 if (group == 0)
2286 seq_puts(seq, "#group: free frags first [" 2286 seq_puts(seq, "#group: free frags first ["
2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]"); 2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2289 2289
2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2291 sizeof(struct ext4_group_info); 2291 sizeof(struct ext4_group_info);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index fb6f11709ae6..e032a0423e35 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -265,11 +265,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
265 ext4_lblk_t orig_blk_offset, donor_blk_offset; 265 ext4_lblk_t orig_blk_offset, donor_blk_offset;
266 unsigned long blocksize = orig_inode->i_sb->s_blocksize; 266 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
267 unsigned int tmp_data_size, data_size, replaced_size; 267 unsigned int tmp_data_size, data_size, replaced_size;
268 int err2, jblocks, retries = 0; 268 int i, err2, jblocks, retries = 0;
269 int replaced_count = 0; 269 int replaced_count = 0;
270 int from = data_offset_in_page << orig_inode->i_blkbits; 270 int from = data_offset_in_page << orig_inode->i_blkbits;
271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
272 struct super_block *sb = orig_inode->i_sb; 272 struct super_block *sb = orig_inode->i_sb;
273 struct buffer_head *bh = NULL;
273 274
274 /* 275 /*
275 * It needs twice the amount of ordinary journal buffers because 276 * It needs twice the amount of ordinary journal buffers because
@@ -380,8 +381,16 @@ data_copy:
380 } 381 }
381 /* Perform all necessary steps similar write_begin()/write_end() 382 /* Perform all necessary steps similar write_begin()/write_end()
382 * but keeping in mind that i_size will not change */ 383 * but keeping in mind that i_size will not change */
383 *err = __block_write_begin(pagep[0], from, replaced_size, 384 if (!page_has_buffers(pagep[0]))
384 ext4_get_block); 385 create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
386 bh = page_buffers(pagep[0]);
387 for (i = 0; i < data_offset_in_page; i++)
388 bh = bh->b_this_page;
389 for (i = 0; i < block_len_in_page; i++) {
390 *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
391 if (*err < 0)
392 break;
393 }
385 if (!*err) 394 if (!*err)
386 *err = block_commit_write(pagep[0], from, from + replaced_size); 395 *err = block_commit_write(pagep[0], from, from + replaced_size);
387 396
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 06574dd77614..48e4b8907826 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1558,6 +1558,24 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1558 struct ext4_dir_entry_2 *de; 1558 struct ext4_dir_entry_2 *de;
1559 struct buffer_head *bh; 1559 struct buffer_head *bh;
1560 1560
1561 if (ext4_encrypted_inode(dir)) {
1562 int res = ext4_get_encryption_info(dir);
1563
1564 /*
1565 * This should be a properly defined flag for
1566 * dentry->d_flags when we uplift this to the VFS.
1567 * d_fsdata is set to (void *) 1 if if the dentry is
1568 * created while the directory was encrypted and we
1569 * don't have access to the key.
1570 */
1571 dentry->d_fsdata = NULL;
1572 if (ext4_encryption_info(dir))
1573 dentry->d_fsdata = (void *) 1;
1574 d_set_d_op(dentry, &ext4_encrypted_d_ops);
1575 if (res && res != -ENOKEY)
1576 return ERR_PTR(res);
1577 }
1578
1561 if (dentry->d_name.len > EXT4_NAME_LEN) 1579 if (dentry->d_name.len > EXT4_NAME_LEN)
1562 return ERR_PTR(-ENAMETOOLONG); 1580 return ERR_PTR(-ENAMETOOLONG);
1563 1581
@@ -1585,11 +1603,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1585 return ERR_PTR(-EFSCORRUPTED); 1603 return ERR_PTR(-EFSCORRUPTED);
1586 } 1604 }
1587 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) && 1605 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
1588 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1606 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
1589 S_ISLNK(inode->i_mode)) &&
1590 !ext4_is_child_context_consistent_with_parent(dir, 1607 !ext4_is_child_context_consistent_with_parent(dir,
1591 inode)) { 1608 inode)) {
1609 int nokey = ext4_encrypted_inode(inode) &&
1610 !ext4_encryption_info(inode);
1611
1592 iput(inode); 1612 iput(inode);
1613 if (nokey)
1614 return ERR_PTR(-ENOKEY);
1593 ext4_warning(inode->i_sb, 1615 ext4_warning(inode->i_sb,
1594 "Inconsistent encryption contexts: %lu/%lu\n", 1616 "Inconsistent encryption contexts: %lu/%lu\n",
1595 (unsigned long) dir->i_ino, 1617 (unsigned long) dir->i_ino,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ad62d7acc315..34038e3598d5 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
198 if (flex_gd == NULL) 198 if (flex_gd == NULL)
199 goto out3; 199 goto out3;
200 200
201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) 201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
202 goto out2; 202 goto out2;
203 flex_gd->count = flexbg_size; 203 flex_gd->count = flexbg_size;
204 204
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6915c950e6e8..1f76d8950a57 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
317 struct inode_switch_wbs_context *isw = 317 struct inode_switch_wbs_context *isw =
318 container_of(work, struct inode_switch_wbs_context, work); 318 container_of(work, struct inode_switch_wbs_context, work);
319 struct inode *inode = isw->inode; 319 struct inode *inode = isw->inode;
320 struct super_block *sb = inode->i_sb;
320 struct address_space *mapping = inode->i_mapping; 321 struct address_space *mapping = inode->i_mapping;
321 struct bdi_writeback *old_wb = inode->i_wb; 322 struct bdi_writeback *old_wb = inode->i_wb;
322 struct bdi_writeback *new_wb = isw->new_wb; 323 struct bdi_writeback *new_wb = isw->new_wb;
@@ -423,6 +424,7 @@ skip_switch:
423 wb_put(new_wb); 424 wb_put(new_wb);
424 425
425 iput(inode); 426 iput(inode);
427 deactivate_super(sb);
426 kfree(isw); 428 kfree(isw);
427} 429}
428 430
@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
469 471
470 /* while holding I_WB_SWITCH, no one else can update the association */ 472 /* while holding I_WB_SWITCH, no one else can update the association */
471 spin_lock(&inode->i_lock); 473 spin_lock(&inode->i_lock);
474
472 if (inode->i_state & (I_WB_SWITCH | I_FREEING) || 475 if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
473 inode_to_wb(inode) == isw->new_wb) { 476 inode_to_wb(inode) == isw->new_wb)
474 spin_unlock(&inode->i_lock); 477 goto out_unlock;
475 goto out_free; 478
476 } 479 if (!atomic_inc_not_zero(&inode->i_sb->s_active))
480 goto out_unlock;
481
477 inode->i_state |= I_WB_SWITCH; 482 inode->i_state |= I_WB_SWITCH;
478 spin_unlock(&inode->i_lock); 483 spin_unlock(&inode->i_lock);
479 484
@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
489 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 494 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
490 return; 495 return;
491 496
497out_unlock:
498 spin_unlock(&inode->i_lock);
492out_free: 499out_free:
493 if (isw->new_wb) 500 if (isw->new_wb)
494 wb_put(isw->new_wb); 501 wb_put(isw->new_wb);
diff --git a/fs/inode.c b/fs/inode.c
index 9f62db3bcc3e..69b8b526c194 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
154 inode->i_rdev = 0; 154 inode->i_rdev = 0;
155 inode->dirtied_when = 0; 155 inode->dirtied_when = 0;
156 156
157#ifdef CONFIG_CGROUP_WRITEBACK
158 inode->i_wb_frn_winner = 0;
159 inode->i_wb_frn_avg_time = 0;
160 inode->i_wb_frn_history = 0;
161#endif
162
157 if (security_inode_alloc(inode)) 163 if (security_inode_alloc(inode))
158 goto out; 164 goto out;
159 spin_lock_init(&inode->i_lock); 165 spin_lock_init(&inode->i_lock);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index cfcbf114676e..7115c5d7d373 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,14 @@
91#include <linux/fsnotify_backend.h> 91#include <linux/fsnotify_backend.h>
92#include "fsnotify.h" 92#include "fsnotify.h"
93 93
94#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
95
94struct srcu_struct fsnotify_mark_srcu; 96struct srcu_struct fsnotify_mark_srcu;
97static DEFINE_SPINLOCK(destroy_lock);
98static LIST_HEAD(destroy_list);
99
100static void fsnotify_mark_destroy(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
95 102
96void fsnotify_get_mark(struct fsnotify_mark *mark) 103void fsnotify_get_mark(struct fsnotify_mark *mark)
97{ 104{
@@ -165,19 +172,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
165 atomic_dec(&group->num_marks); 172 atomic_dec(&group->num_marks);
166} 173}
167 174
168static void
169fsnotify_mark_free_rcu(struct rcu_head *rcu)
170{
171 struct fsnotify_mark *mark;
172
173 mark = container_of(rcu, struct fsnotify_mark, g_rcu);
174 fsnotify_put_mark(mark);
175}
176
177/* 175/*
178 * Free fsnotify mark. The freeing is actually happening from a call_srcu 176 * Free fsnotify mark. The freeing is actually happening from a kthread which
179 * callback. Caller must have a reference to the mark or be protected by 177 * first waits for srcu period end. Caller must have a reference to the mark
180 * fsnotify_mark_srcu. 178 * or be protected by fsnotify_mark_srcu.
181 */ 179 */
182void fsnotify_free_mark(struct fsnotify_mark *mark) 180void fsnotify_free_mark(struct fsnotify_mark *mark)
183{ 181{
@@ -192,7 +190,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
192 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; 190 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
193 spin_unlock(&mark->lock); 191 spin_unlock(&mark->lock);
194 192
195 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 193 spin_lock(&destroy_lock);
194 list_add(&mark->g_list, &destroy_list);
195 spin_unlock(&destroy_lock);
196 queue_delayed_work(system_unbound_wq, &reaper_work,
197 FSNOTIFY_REAPER_DELAY);
196 198
197 /* 199 /*
198 * Some groups like to know that marks are being freed. This is a 200 * Some groups like to know that marks are being freed. This is a
@@ -388,7 +390,12 @@ err:
388 390
389 spin_unlock(&mark->lock); 391 spin_unlock(&mark->lock);
390 392
391 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 393 spin_lock(&destroy_lock);
394 list_add(&mark->g_list, &destroy_list);
395 spin_unlock(&destroy_lock);
396 queue_delayed_work(system_unbound_wq, &reaper_work,
397 FSNOTIFY_REAPER_DELAY);
398
392 return ret; 399 return ret;
393} 400}
394 401
@@ -491,3 +498,21 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
491 atomic_set(&mark->refcnt, 1); 498 atomic_set(&mark->refcnt, 1);
492 mark->free_mark = free_mark; 499 mark->free_mark = free_mark;
493} 500}
501
502static void fsnotify_mark_destroy(struct work_struct *work)
503{
504 struct fsnotify_mark *mark, *next;
505 struct list_head private_destroy_list;
506
507 spin_lock(&destroy_lock);
508 /* exchange the list head */
509 list_replace_init(&destroy_list, &private_destroy_list);
510 spin_unlock(&destroy_lock);
511
512 synchronize_srcu(&fsnotify_mark_srcu);
513
514 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
515 list_del_init(&mark->g_list);
516 fsnotify_put_mark(mark);
517 }
518}
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 0b3c0d39ef75..c370b261c720 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -239,6 +239,14 @@ extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
239 pmd_t *pmdp); 239 pmd_t *pmdp);
240#endif 240#endif
241 241
242#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
243static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
244 unsigned long address, pmd_t *pmdp)
245{
246
247}
248#endif
249
242#ifndef __HAVE_ARCH_PTE_SAME 250#ifndef __HAVE_ARCH_PTE_SAME
243static inline int pte_same(pte_t pte_a, pte_t pte_b) 251static inline int pte_same(pte_t pte_a, pte_t pte_b)
244{ 252{
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c65a212db77e..c5b4b81a831b 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1166,6 +1166,7 @@ struct drm_connector {
1166 struct drm_mode_object base; 1166 struct drm_mode_object base;
1167 1167
1168 char *name; 1168 char *name;
1169 int connector_id;
1169 int connector_type; 1170 int connector_type;
1170 int connector_type_id; 1171 int connector_type_id;
1171 bool interlace_allowed; 1172 bool interlace_allowed;
@@ -2047,6 +2048,7 @@ struct drm_mode_config {
2047 struct list_head fb_list; 2048 struct list_head fb_list;
2048 2049
2049 int num_connector; 2050 int num_connector;
2051 struct ida connector_ida;
2050 struct list_head connector_list; 2052 struct list_head connector_list;
2051 int num_encoder; 2053 int num_encoder;
2052 struct list_head encoder_list; 2054 struct list_head encoder_list;
@@ -2200,7 +2202,11 @@ int drm_connector_register(struct drm_connector *connector);
2200void drm_connector_unregister(struct drm_connector *connector); 2202void drm_connector_unregister(struct drm_connector *connector);
2201 2203
2202extern void drm_connector_cleanup(struct drm_connector *connector); 2204extern void drm_connector_cleanup(struct drm_connector *connector);
2203extern unsigned int drm_connector_index(struct drm_connector *connector); 2205static inline unsigned drm_connector_index(struct drm_connector *connector)
2206{
2207 return connector->connector_id;
2208}
2209
2204/* helper to unplug all connectors from sysfs for device */ 2210/* helper to unplug all connectors from sysfs for device */
2205extern void drm_connector_unplug_all(struct drm_device *dev); 2211extern void drm_connector_unplug_all(struct drm_device *dev);
2206 2212
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6f45aea49e4f..0a05b0d36ae7 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -126,7 +126,7 @@
126/* 104 */ 126/* 104 */
127/* 105 */ 127/* 105 */
128#define TEGRA210_CLK_D_AUDIO 106 128#define TEGRA210_CLK_D_AUDIO 106
129/* 107 ( affects abp -> ape) */ 129#define TEGRA210_CLK_APB2APE 107
130/* 108 */ 130/* 108 */
131/* 109 */ 131/* 109 */
132/* 110 */ 132/* 110 */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 00b042c49ccd..48f5aab117ae 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
144 */ 144 */
145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
146#define __trace_if(cond) \ 146#define __trace_if(cond) \
147 if (__builtin_constant_p((cond)) ? !!(cond) : \ 147 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
148 ({ \ 148 ({ \
149 int ______r; \ 149 int ______r; \
150 static struct ftrace_branch_data \ 150 static struct ftrace_branch_data \
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 569b5a866bb1..47be3ad7d3e5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
1199struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, 1199struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
1200 struct list_head *head, bool remove); 1200 struct list_head *head, bool remove);
1201 1201
1202bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len); 1202bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
1203 unsigned long data_size);
1204bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
1205 size_t len);
1203 1206
1204extern struct work_struct efivar_work; 1207extern struct work_struct efivar_work;
1205void efivar_run_worker(void); 1208void efivar_run_worker(void);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6b7e89f45aa4..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -220,10 +220,7 @@ struct fsnotify_mark {
220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing 220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing
221 * mark into destroy_list when it's waiting for the end of SRCU period 221 * mark into destroy_list when it's waiting for the end of SRCU period
222 * before it can be freed. [group->mark_mutex] */ 222 * before it can be freed. [group->mark_mutex] */
223 union { 223 struct list_head g_list;
224 struct list_head g_list;
225 struct rcu_head g_rcu;
226 };
227 /* Protects inode / mnt pointers, flags, masks */ 224 /* Protects inode / mnt pointers, flags, masks */
228 spinlock_t lock; 225 spinlock_t lock;
229 /* List of marks for inode / vfsmount [obj_lock] */ 226 /* List of marks for inode / vfsmount [obj_lock] */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 81de7123959d..c2b340e23f62 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -603,6 +603,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
603 603
604extern int skip_trace(unsigned long ip); 604extern int skip_trace(unsigned long ip);
605extern void ftrace_module_init(struct module *mod); 605extern void ftrace_module_init(struct module *mod);
606extern void ftrace_module_enable(struct module *mod);
606extern void ftrace_release_mod(struct module *mod); 607extern void ftrace_release_mod(struct module *mod);
607 608
608extern void ftrace_disable_daemon(void); 609extern void ftrace_disable_daemon(void);
@@ -612,8 +613,9 @@ static inline int skip_trace(unsigned long ip) { return 0; }
612static inline int ftrace_force_update(void) { return 0; } 613static inline int ftrace_force_update(void) { return 0; }
613static inline void ftrace_disable_daemon(void) { } 614static inline void ftrace_disable_daemon(void) { }
614static inline void ftrace_enable_daemon(void) { } 615static inline void ftrace_enable_daemon(void) { }
615static inline void ftrace_release_mod(struct module *mod) {} 616static inline void ftrace_module_init(struct module *mod) { }
616static inline void ftrace_module_init(struct module *mod) {} 617static inline void ftrace_module_enable(struct module *mod) { }
618static inline void ftrace_release_mod(struct module *mod) { }
617static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 619static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
618{ 620{
619 return -EINVAL; 621 return -EINVAL;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 821273ca4873..2d9b650047a5 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
235/* low 64 bit */ 235/* low 64 bit */
236#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) 236#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
237 237
238/* PRS_REG */
239#define DMA_PRS_PPR ((u32)1)
240
238#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 241#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
239do { \ 242do { \
240 cycles_t start_time = get_cycles(); \ 243 cycles_t start_time = get_cycles(); \
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d6750111e48e..2190419bdf0a 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -135,6 +135,10 @@ enum {
135 /* Memory types */ 135 /* Memory types */
136 NVM_ID_FMTYPE_SLC = 0, 136 NVM_ID_FMTYPE_SLC = 0,
137 NVM_ID_FMTYPE_MLC = 1, 137 NVM_ID_FMTYPE_MLC = 1,
138
139 /* Device capabilities */
140 NVM_ID_DCAP_BBLKMGMT = 0x1,
141 NVM_UD_DCAP_ECC = 0x2,
138}; 142};
139 143
140struct nvm_id_lp_mlc { 144struct nvm_id_lp_mlc {
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index acd522a91539..acfdbf353a0b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
14 * See the file COPYING for more details. 14 * See the file COPYING for more details.
15 */ 15 */
16 16
17#include <linux/smp.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/cpumask.h>
19#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
20#include <linux/tracepoint-defs.h> 22#include <linux/tracepoint-defs.h>
21 23
@@ -132,6 +134,9 @@ extern void syscall_unregfunc(void);
132 void *it_func; \ 134 void *it_func; \
133 void *__data; \ 135 void *__data; \
134 \ 136 \
137 if (!cpu_online(raw_smp_processor_id())) \
138 return; \
139 \
135 if (!(cond)) \ 140 if (!(cond)) \
136 return; \ 141 return; \
137 prercu; \ 142 prercu; \
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cbb20afdbc01..bb679b48f408 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
11unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); 11unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
12int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); 12int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
13 13
14unsigned long ucs2_utf8size(const ucs2_char_t *src);
15unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
16 unsigned long maxlength);
17
14#endif /* _LINUX_UCS2_STRING_H_ */ 18#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/ipc/shm.c b/ipc/shm.c
index ed3027d0f277..331fc1b0b3c7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 157
158 /* 158 /*
159 * We raced in the idr lookup or with shm_destroy(). Either way, the 159 * Callers of shm_lock() must validate the status of the returned ipc
160 * ID is busted. 160 * object pointer (as returned by ipc_lock()), and error out as
161 * appropriate.
161 */ 162 */
162 WARN_ON(IS_ERR(ipcp)); 163 if (IS_ERR(ipcp))
163 164 return (void *)ipcp;
164 return container_of(ipcp, struct shmid_kernel, shm_perm); 165 return container_of(ipcp, struct shmid_kernel, shm_perm);
165} 166}
166 167
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
186} 187}
187 188
188 189
189/* This is called by fork, once for every shm attach. */ 190static int __shm_open(struct vm_area_struct *vma)
190static void shm_open(struct vm_area_struct *vma)
191{ 191{
192 struct file *file = vma->vm_file; 192 struct file *file = vma->vm_file;
193 struct shm_file_data *sfd = shm_file_data(file); 193 struct shm_file_data *sfd = shm_file_data(file);
194 struct shmid_kernel *shp; 194 struct shmid_kernel *shp;
195 195
196 shp = shm_lock(sfd->ns, sfd->id); 196 shp = shm_lock(sfd->ns, sfd->id);
197
198 if (IS_ERR(shp))
199 return PTR_ERR(shp);
200
197 shp->shm_atim = get_seconds(); 201 shp->shm_atim = get_seconds();
198 shp->shm_lprid = task_tgid_vnr(current); 202 shp->shm_lprid = task_tgid_vnr(current);
199 shp->shm_nattch++; 203 shp->shm_nattch++;
200 shm_unlock(shp); 204 shm_unlock(shp);
205 return 0;
206}
207
208/* This is called by fork, once for every shm attach. */
209static void shm_open(struct vm_area_struct *vma)
210{
211 int err = __shm_open(vma);
212 /*
213 * We raced in the idr lookup or with shm_destroy().
214 * Either way, the ID is busted.
215 */
216 WARN_ON_ONCE(err);
201} 217}
202 218
203/* 219/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
260 down_write(&shm_ids(ns).rwsem); 276 down_write(&shm_ids(ns).rwsem);
261 /* remove from the list of attaches of the shm segment */ 277 /* remove from the list of attaches of the shm segment */
262 shp = shm_lock(ns, sfd->id); 278 shp = shm_lock(ns, sfd->id);
279
280 /*
281 * We raced in the idr lookup or with shm_destroy().
282 * Either way, the ID is busted.
283 */
284 if (WARN_ON_ONCE(IS_ERR(shp)))
285 goto done; /* no-op */
286
263 shp->shm_lprid = task_tgid_vnr(current); 287 shp->shm_lprid = task_tgid_vnr(current);
264 shp->shm_dtim = get_seconds(); 288 shp->shm_dtim = get_seconds();
265 shp->shm_nattch--; 289 shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
267 shm_destroy(ns, shp); 291 shm_destroy(ns, shp);
268 else 292 else
269 shm_unlock(shp); 293 shm_unlock(shp);
294done:
270 up_write(&shm_ids(ns).rwsem); 295 up_write(&shm_ids(ns).rwsem);
271} 296}
272 297
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
388 struct shm_file_data *sfd = shm_file_data(file); 413 struct shm_file_data *sfd = shm_file_data(file);
389 int ret; 414 int ret;
390 415
416 /*
417 * In case of remap_file_pages() emulation, the file can represent
418 * removed IPC ID: propogate shm_lock() error to caller.
419 */
420 ret =__shm_open(vma);
421 if (ret)
422 return ret;
423
391 ret = sfd->file->f_op->mmap(sfd->file, vma); 424 ret = sfd->file->f_op->mmap(sfd->file, vma);
392 if (ret != 0) 425 if (ret) {
426 shm_close(vma);
393 return ret; 427 return ret;
428 }
394 sfd->vm_ops = vma->vm_ops; 429 sfd->vm_ops = vma->vm_ops;
395#ifdef CONFIG_MMU 430#ifdef CONFIG_MMU
396 WARN_ON(!sfd->vm_ops->fault); 431 WARN_ON(!sfd->vm_ops->fault);
397#endif 432#endif
398 vma->vm_ops = &shm_vm_ops; 433 vma->vm_ops = &shm_vm_ops;
399 shm_open(vma); 434 return 0;
400
401 return ret;
402} 435}
403 436
404static int shm_release(struct inode *ino, struct file *file) 437static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5946460b2425..0d58522103cd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9206,7 +9206,7 @@ static void perf_event_init_cpu(int cpu)
9206 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9206 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9207 9207
9208 mutex_lock(&swhash->hlist_mutex); 9208 mutex_lock(&swhash->hlist_mutex);
9209 if (swhash->hlist_refcount > 0) { 9209 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
9210 struct swevent_hlist *hlist; 9210 struct swevent_hlist *hlist;
9211 9211
9212 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9212 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -9282,11 +9282,9 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
9282 switch (action & ~CPU_TASKS_FROZEN) { 9282 switch (action & ~CPU_TASKS_FROZEN) {
9283 9283
9284 case CPU_UP_PREPARE: 9284 case CPU_UP_PREPARE:
9285 case CPU_DOWN_FAILED:
9286 perf_event_init_cpu(cpu); 9285 perf_event_init_cpu(cpu);
9287 break; 9286 break;
9288 9287
9289 case CPU_UP_CANCELED:
9290 case CPU_DOWN_PREPARE: 9288 case CPU_DOWN_PREPARE:
9291 perf_event_exit_cpu(cpu); 9289 perf_event_exit_cpu(cpu);
9292 break; 9290 break;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 2c468dea60bc..7a1b5c3ef14e 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(memunmap);
114 114
115static void devm_memremap_release(struct device *dev, void *res) 115static void devm_memremap_release(struct device *dev, void *res)
116{ 116{
117 memunmap(res); 117 memunmap(*(void **)res);
118} 118}
119 119
120static int devm_memremap_match(struct device *dev, void *res, void *match_data) 120static int devm_memremap_match(struct device *dev, void *res, void *match_data)
diff --git a/kernel/module.c b/kernel/module.c
index 9537da37ce87..794ebe8e878d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -984,6 +984,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
984 mod->exit(); 984 mod->exit();
985 blocking_notifier_call_chain(&module_notify_list, 985 blocking_notifier_call_chain(&module_notify_list,
986 MODULE_STATE_GOING, mod); 986 MODULE_STATE_GOING, mod);
987 ftrace_release_mod(mod);
988
987 async_synchronize_full(); 989 async_synchronize_full();
988 990
989 /* Store the name of the last unloaded module for diagnostic purposes */ 991 /* Store the name of the last unloaded module for diagnostic purposes */
@@ -3313,6 +3315,7 @@ fail:
3313 module_put(mod); 3315 module_put(mod);
3314 blocking_notifier_call_chain(&module_notify_list, 3316 blocking_notifier_call_chain(&module_notify_list,
3315 MODULE_STATE_GOING, mod); 3317 MODULE_STATE_GOING, mod);
3318 ftrace_release_mod(mod);
3316 free_module(mod); 3319 free_module(mod);
3317 wake_up_all(&module_wq); 3320 wake_up_all(&module_wq);
3318 return ret; 3321 return ret;
@@ -3389,6 +3392,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
3389 mod->state = MODULE_STATE_COMING; 3392 mod->state = MODULE_STATE_COMING;
3390 mutex_unlock(&module_mutex); 3393 mutex_unlock(&module_mutex);
3391 3394
3395 ftrace_module_enable(mod);
3392 blocking_notifier_call_chain(&module_notify_list, 3396 blocking_notifier_call_chain(&module_notify_list,
3393 MODULE_STATE_COMING, mod); 3397 MODULE_STATE_COMING, mod);
3394 return 0; 3398 return 0;
diff --git a/kernel/resource.c b/kernel/resource.c
index 09c0597840b0..3669d1bfc425 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
1083 if (!conflict) 1083 if (!conflict)
1084 break; 1084 break;
1085 if (conflict != parent) { 1085 if (conflict != parent) {
1086 parent = conflict; 1086 if (!(conflict->flags & IORESOURCE_BUSY)) {
1087 if (!(conflict->flags & IORESOURCE_BUSY)) 1087 parent = conflict;
1088 continue; 1088 continue;
1089 }
1089 } 1090 }
1090 if (conflict->flags & flags & IORESOURCE_MUXED) { 1091 if (conflict->flags & flags & IORESOURCE_MUXED) {
1091 add_wait_queue(&muxed_resource_wait, &wait); 1092 add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca592f977b2..57a6eea84694 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4961,7 +4961,7 @@ void ftrace_release_mod(struct module *mod)
4961 mutex_unlock(&ftrace_lock); 4961 mutex_unlock(&ftrace_lock);
4962} 4962}
4963 4963
4964static void ftrace_module_enable(struct module *mod) 4964void ftrace_module_enable(struct module *mod)
4965{ 4965{
4966 struct dyn_ftrace *rec; 4966 struct dyn_ftrace *rec;
4967 struct ftrace_page *pg; 4967 struct ftrace_page *pg;
@@ -5038,38 +5038,8 @@ void ftrace_module_init(struct module *mod)
5038 ftrace_process_locs(mod, mod->ftrace_callsites, 5038 ftrace_process_locs(mod, mod->ftrace_callsites,
5039 mod->ftrace_callsites + mod->num_ftrace_callsites); 5039 mod->ftrace_callsites + mod->num_ftrace_callsites);
5040} 5040}
5041
5042static int ftrace_module_notify(struct notifier_block *self,
5043 unsigned long val, void *data)
5044{
5045 struct module *mod = data;
5046
5047 switch (val) {
5048 case MODULE_STATE_COMING:
5049 ftrace_module_enable(mod);
5050 break;
5051 case MODULE_STATE_GOING:
5052 ftrace_release_mod(mod);
5053 break;
5054 default:
5055 break;
5056 }
5057
5058 return 0;
5059}
5060#else
5061static int ftrace_module_notify(struct notifier_block *self,
5062 unsigned long val, void *data)
5063{
5064 return 0;
5065}
5066#endif /* CONFIG_MODULES */ 5041#endif /* CONFIG_MODULES */
5067 5042
5068struct notifier_block ftrace_module_nb = {
5069 .notifier_call = ftrace_module_notify,
5070 .priority = INT_MIN, /* Run after anything that can remove kprobes */
5071};
5072
5073void __init ftrace_init(void) 5043void __init ftrace_init(void)
5074{ 5044{
5075 extern unsigned long __start_mcount_loc[]; 5045 extern unsigned long __start_mcount_loc[];
@@ -5098,10 +5068,6 @@ void __init ftrace_init(void)
5098 __start_mcount_loc, 5068 __start_mcount_loc,
5099 __stop_mcount_loc); 5069 __stop_mcount_loc);
5100 5070
5101 ret = register_module_notifier(&ftrace_module_nb);
5102 if (ret)
5103 pr_warning("Failed to register trace ftrace module exit notifier\n");
5104
5105 set_ftrace_early_filters(); 5071 set_ftrace_early_filters();
5106 5072
5107 return; 5073 return;
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 6f500ef2301d..f0b323abb4c6 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
49 } 49 }
50} 50}
51EXPORT_SYMBOL(ucs2_strncmp); 51EXPORT_SYMBOL(ucs2_strncmp);
52
53unsigned long
54ucs2_utf8size(const ucs2_char_t *src)
55{
56 unsigned long i;
57 unsigned long j = 0;
58
59 for (i = 0; i < ucs2_strlen(src); i++) {
60 u16 c = src[i];
61
62 if (c >= 0x800)
63 j += 3;
64 else if (c >= 0x80)
65 j += 2;
66 else
67 j += 1;
68 }
69
70 return j;
71}
72EXPORT_SYMBOL(ucs2_utf8size);
73
74/*
75 * copy at most maxlength bytes of whole utf8 characters to dest from the
76 * ucs2 string src.
77 *
78 * The return value is the number of characters copied, not including the
79 * final NUL character.
80 */
81unsigned long
82ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
83{
84 unsigned int i;
85 unsigned long j = 0;
86 unsigned long limit = ucs2_strnlen(src, maxlength);
87
88 for (i = 0; maxlength && i < limit; i++) {
89 u16 c = src[i];
90
91 if (c >= 0x800) {
92 if (maxlength < 3)
93 break;
94 maxlength -= 3;
95 dest[j++] = 0xe0 | (c & 0xf000) >> 12;
96 dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
97 dest[j++] = 0x80 | (c & 0x003f);
98 } else if (c >= 0x80) {
99 if (maxlength < 2)
100 break;
101 maxlength -= 2;
102 dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
103 dest[j++] = 0x80 | (c & 0x03f);
104 } else {
105 maxlength -= 1;
106 dest[j++] = c & 0x7f;
107 }
108 }
109 if (maxlength)
110 dest[j] = '\0';
111 return j;
112}
113EXPORT_SYMBOL(ucs2_as_utf8);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 08fc0ba2207e..1c317b85ea7d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1700,7 +1700,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1701 VM_BUG_ON(!pmd_none(*new_pmd)); 1701 VM_BUG_ON(!pmd_none(*new_pmd));
1702 1702
1703 if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1703 if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
1704 vma_is_anonymous(vma)) {
1704 pgtable_t pgtable; 1705 pgtable_t pgtable;
1705 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1706 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1706 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1707 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
@@ -2860,6 +2861,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2860 young = pmd_young(*pmd); 2861 young = pmd_young(*pmd);
2861 dirty = pmd_dirty(*pmd); 2862 dirty = pmd_dirty(*pmd);
2862 2863
2864 pmdp_huge_split_prepare(vma, haddr, pmd);
2863 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2865 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2864 pmd_populate(mm, &_pmd, pgtable); 2866 pmd_populate(mm, &_pmd, pgtable);
2865 2867
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06ae13e869d0..01f2b48c8618 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2630,8 +2630,10 @@ static int __init hugetlb_init(void)
2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2631 } 2631 }
2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2633 if (default_hstate_max_huge_pages) 2633 if (default_hstate_max_huge_pages) {
2634 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2634 if (!default_hstate.max_huge_pages)
2635 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2636 }
2635 2637
2636 hugetlb_init_hstates(); 2638 hugetlb_init_hstates();
2637 gather_bootmem_prealloc(); 2639 gather_bootmem_prealloc();
diff --git a/mm/mmap.c b/mm/mmap.c
index 2f2415a7a688..76d1ec29149b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2664,12 +2664,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2664 if (!vma || !(vma->vm_flags & VM_SHARED)) 2664 if (!vma || !(vma->vm_flags & VM_SHARED))
2665 goto out; 2665 goto out;
2666 2666
2667 if (start < vma->vm_start || start + size > vma->vm_end) 2667 if (start < vma->vm_start)
2668 goto out; 2668 goto out;
2669 2669
2670 if (pgoff == linear_page_index(vma, start)) { 2670 if (start + size > vma->vm_end) {
2671 ret = 0; 2671 struct vm_area_struct *next;
2672 goto out; 2672
2673 for (next = vma->vm_next; next; next = next->vm_next) {
2674 /* hole between vmas ? */
2675 if (next->vm_start != next->vm_prev->vm_end)
2676 goto out;
2677
2678 if (next->vm_file != vma->vm_file)
2679 goto out;
2680
2681 if (next->vm_flags != vma->vm_flags)
2682 goto out;
2683
2684 if (start + size <= next->vm_end)
2685 break;
2686 }
2687
2688 if (!next)
2689 goto out;
2673 } 2690 }
2674 2691
2675 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 2692 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2679,9 +2696,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2679 flags &= MAP_NONBLOCK; 2696 flags &= MAP_NONBLOCK;
2680 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 2697 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2681 if (vma->vm_flags & VM_LOCKED) { 2698 if (vma->vm_flags & VM_LOCKED) {
2699 struct vm_area_struct *tmp;
2682 flags |= MAP_LOCKED; 2700 flags |= MAP_LOCKED;
2701
2683 /* drop PG_Mlocked flag for over-mapped range */ 2702 /* drop PG_Mlocked flag for over-mapped range */
2684 munlock_vma_pages_range(vma, start, start + size); 2703 for (tmp = vma; tmp->vm_start >= start + size;
2704 tmp = tmp->vm_next) {
2705 munlock_vma_pages_range(tmp,
2706 max(tmp->vm_start, start),
2707 min(tmp->vm_end, start + size));
2708 }
2685 } 2709 }
2686 2710
2687 file = get_file(vma->vm_file); 2711 file = get_file(vma->vm_file);
diff --git a/mm/slab.c b/mm/slab.c
index 6ecc697a8bc4..621fbcb35a36 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2275 2275
2276 err = setup_cpu_cache(cachep, gfp); 2276 err = setup_cpu_cache(cachep, gfp);
2277 if (err) { 2277 if (err) {
2278 __kmem_cache_shutdown(cachep); 2278 __kmem_cache_release(cachep);
2279 return err; 2279 return err;
2280 } 2280 }
2281 2281
@@ -2414,12 +2414,13 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2414 2414
2415int __kmem_cache_shutdown(struct kmem_cache *cachep) 2415int __kmem_cache_shutdown(struct kmem_cache *cachep)
2416{ 2416{
2417 return __kmem_cache_shrink(cachep, false);
2418}
2419
2420void __kmem_cache_release(struct kmem_cache *cachep)
2421{
2417 int i; 2422 int i;
2418 struct kmem_cache_node *n; 2423 struct kmem_cache_node *n;
2419 int rc = __kmem_cache_shrink(cachep, false);
2420
2421 if (rc)
2422 return rc;
2423 2424
2424 free_percpu(cachep->cpu_cache); 2425 free_percpu(cachep->cpu_cache);
2425 2426
@@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2430 kfree(n); 2431 kfree(n);
2431 cachep->node[i] = NULL; 2432 cachep->node[i] = NULL;
2432 } 2433 }
2433 return 0;
2434} 2434}
2435 2435
2436/* 2436/*
diff --git a/mm/slab.h b/mm/slab.h
index 834ad240c0bb..2eedacea439d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141 141
142int __kmem_cache_shutdown(struct kmem_cache *); 142int __kmem_cache_shutdown(struct kmem_cache *);
143void __kmem_cache_release(struct kmem_cache *);
143int __kmem_cache_shrink(struct kmem_cache *, bool); 144int __kmem_cache_shrink(struct kmem_cache *, bool);
144void slab_kmem_cache_release(struct kmem_cache *); 145void slab_kmem_cache_release(struct kmem_cache *);
145 146
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b50aef01ccf7..065b7bdabdc3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
693 693
694void slab_kmem_cache_release(struct kmem_cache *s) 694void slab_kmem_cache_release(struct kmem_cache *s)
695{ 695{
696 __kmem_cache_release(s);
696 destroy_memcg_params(s); 697 destroy_memcg_params(s);
697 kfree_const(s->name); 698 kfree_const(s->name);
698 kmem_cache_free(kmem_cache, s); 699 kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index 17e8f8cc7c53..5ec158054ffe 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
630 return 0; 630 return 0;
631} 631}
632 632
633void __kmem_cache_release(struct kmem_cache *c)
634{
635}
636
633int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) 637int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
634{ 638{
635 return 0; 639 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 2e1355ac056b..d8fbd4a6ed59 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
1592 __add_partial(n, page, tail); 1592 __add_partial(n, page, tail);
1593} 1593}
1594 1594
1595static inline void
1596__remove_partial(struct kmem_cache_node *n, struct page *page)
1597{
1598 list_del(&page->lru);
1599 n->nr_partial--;
1600}
1601
1602static inline void remove_partial(struct kmem_cache_node *n, 1595static inline void remove_partial(struct kmem_cache_node *n,
1603 struct page *page) 1596 struct page *page)
1604{ 1597{
1605 lockdep_assert_held(&n->list_lock); 1598 lockdep_assert_held(&n->list_lock);
1606 __remove_partial(n, page); 1599 list_del(&page->lru);
1600 n->nr_partial--;
1607} 1601}
1608 1602
1609/* 1603/*
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
3184 } 3178 }
3185} 3179}
3186 3180
3181void __kmem_cache_release(struct kmem_cache *s)
3182{
3183 free_percpu(s->cpu_slab);
3184 free_kmem_cache_nodes(s);
3185}
3186
3187static int init_kmem_cache_nodes(struct kmem_cache *s) 3187static int init_kmem_cache_nodes(struct kmem_cache *s)
3188{ 3188{
3189 int node; 3189 int node;
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3443 3443
3444/* 3444/*
3445 * Attempt to free all partial slabs on a node. 3445 * Attempt to free all partial slabs on a node.
3446 * This is called from kmem_cache_close(). We must be the last thread 3446 * This is called from __kmem_cache_shutdown(). We must take list_lock
3447 * using the cache and therefore we do not need to lock anymore. 3447 * because sysfs file might still access partial list after the shutdowning.
3448 */ 3448 */
3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3450{ 3450{
3451 struct page *page, *h; 3451 struct page *page, *h;
3452 3452
3453 BUG_ON(irqs_disabled());
3454 spin_lock_irq(&n->list_lock);
3453 list_for_each_entry_safe(page, h, &n->partial, lru) { 3455 list_for_each_entry_safe(page, h, &n->partial, lru) {
3454 if (!page->inuse) { 3456 if (!page->inuse) {
3455 __remove_partial(n, page); 3457 remove_partial(n, page);
3456 discard_slab(s, page); 3458 discard_slab(s, page);
3457 } else { 3459 } else {
3458 list_slab_objects(s, page, 3460 list_slab_objects(s, page,
3459 "Objects remaining in %s on kmem_cache_close()"); 3461 "Objects remaining in %s on __kmem_cache_shutdown()");
3460 } 3462 }
3461 } 3463 }
3464 spin_unlock_irq(&n->list_lock);
3462} 3465}
3463 3466
3464/* 3467/*
3465 * Release all resources used by a slab cache. 3468 * Release all resources used by a slab cache.
3466 */ 3469 */
3467static inline int kmem_cache_close(struct kmem_cache *s) 3470int __kmem_cache_shutdown(struct kmem_cache *s)
3468{ 3471{
3469 int node; 3472 int node;
3470 struct kmem_cache_node *n; 3473 struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3476 if (n->nr_partial || slabs_node(s, node)) 3479 if (n->nr_partial || slabs_node(s, node))
3477 return 1; 3480 return 1;
3478 } 3481 }
3479 free_percpu(s->cpu_slab);
3480 free_kmem_cache_nodes(s);
3481 return 0; 3482 return 0;
3482} 3483}
3483 3484
3484int __kmem_cache_shutdown(struct kmem_cache *s)
3485{
3486 return kmem_cache_close(s);
3487}
3488
3489/******************************************************************** 3485/********************************************************************
3490 * Kmalloc subsystem 3486 * Kmalloc subsystem
3491 *******************************************************************/ 3487 *******************************************************************/
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3980 memcg_propagate_slab_attrs(s); 3976 memcg_propagate_slab_attrs(s);
3981 err = sysfs_slab_add(s); 3977 err = sysfs_slab_add(s);
3982 if (err) 3978 if (err)
3983 kmem_cache_close(s); 3979 __kmem_cache_release(s);
3984 3980
3985 return err; 3981 return err;
3986} 3982}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index fadd3eb8e8bb..9106d8e2300e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
74static DEFINE_RWLOCK(snd_pcm_link_rwlock); 74static DEFINE_RWLOCK(snd_pcm_link_rwlock);
75static DECLARE_RWSEM(snd_pcm_link_rwsem); 75static DECLARE_RWSEM(snd_pcm_link_rwsem);
76 76
77/* Writer in rwsem may block readers even during its waiting in queue,
78 * and this may lead to a deadlock when the code path takes read sem
79 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
80 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
81 * spin until it gets the lock.
82 */
83static inline void down_write_nonblock(struct rw_semaphore *lock)
84{
85 while (!down_write_trylock(lock))
86 cond_resched();
87}
88
77/** 89/**
78 * snd_pcm_stream_lock - Lock the PCM stream 90 * snd_pcm_stream_lock - Lock the PCM stream
79 * @substream: PCM substream 91 * @substream: PCM substream
@@ -1813,7 +1825,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1813 res = -ENOMEM; 1825 res = -ENOMEM;
1814 goto _nolock; 1826 goto _nolock;
1815 } 1827 }
1816 down_write(&snd_pcm_link_rwsem); 1828 down_write_nonblock(&snd_pcm_link_rwsem);
1817 write_lock_irq(&snd_pcm_link_rwlock); 1829 write_lock_irq(&snd_pcm_link_rwlock);
1818 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || 1830 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1819 substream->runtime->status->state != substream1->runtime->status->state || 1831 substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1860,7 +1872,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
1860 struct snd_pcm_substream *s; 1872 struct snd_pcm_substream *s;
1861 int res = 0; 1873 int res = 0;
1862 1874
1863 down_write(&snd_pcm_link_rwsem); 1875 down_write_nonblock(&snd_pcm_link_rwsem);
1864 write_lock_irq(&snd_pcm_link_rwlock); 1876 write_lock_irq(&snd_pcm_link_rwlock);
1865 if (!snd_pcm_stream_linked(substream)) { 1877 if (!snd_pcm_stream_linked(substream)) {
1866 res = -EALREADY; 1878 res = -EALREADY;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 801076687bb1..c850345c43b5 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
383 383
384 if (snd_BUG_ON(!pool)) 384 if (snd_BUG_ON(!pool))
385 return -EINVAL; 385 return -EINVAL;
386 if (pool->ptr) /* should be atomic? */
387 return 0;
388 386
389 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 387 cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
390 if (!pool->ptr) 388 if (!cellptr)
391 return -ENOMEM; 389 return -ENOMEM;
392 390
393 /* add new cells to the free cell list */ 391 /* add new cells to the free cell list */
394 spin_lock_irqsave(&pool->lock, flags); 392 spin_lock_irqsave(&pool->lock, flags);
393 if (pool->ptr) {
394 spin_unlock_irqrestore(&pool->lock, flags);
395 vfree(cellptr);
396 return 0;
397 }
398
399 pool->ptr = cellptr;
395 pool->free = NULL; 400 pool->free = NULL;
396 401
397 for (cell = 0; cell < pool->size; cell++) { 402 for (cell = 0; cell < pool->size; cell++) {
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 921fb2bd8fad..fe686ee41c6d 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
535 bool is_src, bool ack) 535 bool is_src, bool ack)
536{ 536{
537 struct snd_seq_port_subs_info *grp; 537 struct snd_seq_port_subs_info *grp;
538 struct list_head *list;
539 bool empty;
538 540
539 grp = is_src ? &port->c_src : &port->c_dest; 541 grp = is_src ? &port->c_src : &port->c_dest;
542 list = is_src ? &subs->src_list : &subs->dest_list;
540 down_write(&grp->list_mutex); 543 down_write(&grp->list_mutex);
541 write_lock_irq(&grp->list_lock); 544 write_lock_irq(&grp->list_lock);
542 if (is_src) 545 empty = list_empty(list);
543 list_del(&subs->src_list); 546 if (!empty)
544 else 547 list_del_init(list);
545 list_del(&subs->dest_list);
546 grp->exclusive = 0; 548 grp->exclusive = 0;
547 write_unlock_irq(&grp->list_lock); 549 write_unlock_irq(&grp->list_lock);
548 up_write(&grp->list_mutex); 550 up_write(&grp->list_mutex);
549 551
550 unsubscribe_port(client, port, grp, &subs->info, ack); 552 if (!empty)
553 unsubscribe_port(client, port, grp, &subs->info, ack);
551} 554}
552 555
553/* connect two ports */ 556/* connect two ports */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4045dca3d699..ce6b97f31390 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2168,10 +2168,10 @@ static void azx_remove(struct pci_dev *pci)
2168 struct hda_intel *hda; 2168 struct hda_intel *hda;
2169 2169
2170 if (card) { 2170 if (card) {
2171 /* flush the pending probing work */ 2171 /* cancel the pending probing work */
2172 chip = card->private_data; 2172 chip = card->private_data;
2173 hda = container_of(chip, struct hda_intel, chip); 2173 hda = container_of(chip, struct hda_intel, chip);
2174 flush_work(&hda->probe_work); 2174 cancel_work_sync(&hda->probe_work);
2175 2175
2176 snd_card_free(card); 2176 snd_card_free(card);
2177 } 2177 }
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 77edcdcc016b..057278448515 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -88,7 +88,11 @@ test_delete()
88 exit 1 88 exit 1
89 fi 89 fi
90 90
91 rm $file 91 rm $file 2>/dev/null
92 if [ $? -ne 0 ]; then
93 chattr -i $file
94 rm $file
95 fi
92 96
93 if [ -e $file ]; then 97 if [ -e $file ]; then
94 echo "$file couldn't be deleted" >&2 98 echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@ test_zero_size_delete()
111 exit 1 115 exit 1
112 fi 116 fi
113 117
118 chattr -i $file
114 printf "$attrs" > $file 119 printf "$attrs" > $file
115 120
116 if [ -e $file ]; then 121 if [ -e $file ]; then
@@ -141,7 +146,11 @@ test_valid_filenames()
141 echo "$file could not be created" >&2 146 echo "$file could not be created" >&2
142 ret=1 147 ret=1
143 else 148 else
144 rm $file 149 rm $file 2>/dev/null
150 if [ $? -ne 0 ]; then
151 chattr -i $file
152 rm $file
153 fi
145 fi 154 fi
146 done 155 done
147 156
@@ -174,7 +183,11 @@ test_invalid_filenames()
174 183
175 if [ -e $file ]; then 184 if [ -e $file ]; then
176 echo "Creating $file should have failed" >&2 185 echo "Creating $file should have failed" >&2
177 rm $file 186 rm $file 2>/dev/null
187 if [ $? -ne 0 ]; then
188 chattr -i $file
189 rm $file
190 fi
178 ret=1 191 ret=1
179 fi 192 fi
180 done 193 done
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
index 8c0764407b3c..4af74f733036 100644
--- a/tools/testing/selftests/efivarfs/open-unlink.c
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -1,10 +1,68 @@
1#include <errno.h>
1#include <stdio.h> 2#include <stdio.h>
2#include <stdint.h> 3#include <stdint.h>
3#include <stdlib.h> 4#include <stdlib.h>
4#include <unistd.h> 5#include <unistd.h>
6#include <sys/ioctl.h>
5#include <sys/types.h> 7#include <sys/types.h>
6#include <sys/stat.h> 8#include <sys/stat.h>
7#include <fcntl.h> 9#include <fcntl.h>
10#include <linux/fs.h>
11
12static int set_immutable(const char *path, int immutable)
13{
14 unsigned int flags;
15 int fd;
16 int rc;
17 int error;
18
19 fd = open(path, O_RDONLY);
20 if (fd < 0)
21 return fd;
22
23 rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
24 if (rc < 0) {
25 error = errno;
26 close(fd);
27 errno = error;
28 return rc;
29 }
30
31 if (immutable)
32 flags |= FS_IMMUTABLE_FL;
33 else
34 flags &= ~FS_IMMUTABLE_FL;
35
36 rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
37 error = errno;
38 close(fd);
39 errno = error;
40 return rc;
41}
42
43static int get_immutable(const char *path)
44{
45 unsigned int flags;
46 int fd;
47 int rc;
48 int error;
49
50 fd = open(path, O_RDONLY);
51 if (fd < 0)
52 return fd;
53
54 rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
55 if (rc < 0) {
56 error = errno;
57 close(fd);
58 errno = error;
59 return rc;
60 }
61 close(fd);
62 if (flags & FS_IMMUTABLE_FL)
63 return 1;
64 return 0;
65}
8 66
9int main(int argc, char **argv) 67int main(int argc, char **argv)
10{ 68{
@@ -27,7 +85,7 @@ int main(int argc, char **argv)
27 buf[4] = 0; 85 buf[4] = 0;
28 86
29 /* create a test variable */ 87 /* create a test variable */
30 fd = open(path, O_WRONLY | O_CREAT); 88 fd = open(path, O_WRONLY | O_CREAT, 0600);
31 if (fd < 0) { 89 if (fd < 0) {
32 perror("open(O_WRONLY)"); 90 perror("open(O_WRONLY)");
33 return EXIT_FAILURE; 91 return EXIT_FAILURE;
@@ -41,6 +99,18 @@ int main(int argc, char **argv)
41 99
42 close(fd); 100 close(fd);
43 101
102 rc = get_immutable(path);
103 if (rc < 0) {
104 perror("ioctl(FS_IOC_GETFLAGS)");
105 return EXIT_FAILURE;
106 } else if (rc) {
107 rc = set_immutable(path, 0);
108 if (rc < 0) {
109 perror("ioctl(FS_IOC_SETFLAGS)");
110 return EXIT_FAILURE;
111 }
112 }
113
44 fd = open(path, O_RDONLY); 114 fd = open(path, O_RDONLY);
45 if (fd < 0) { 115 if (fd < 0) {
46 perror("open"); 116 perror("open");
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 69bca185c471..ea6064696fe4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -143,7 +143,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
143 * Check if there was a change in the timer state (should we raise or lower 143 * Check if there was a change in the timer state (should we raise or lower
144 * the line level to the GIC). 144 * the line level to the GIC).
145 */ 145 */
146static void kvm_timer_update_state(struct kvm_vcpu *vcpu) 146static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
147{ 147{
148 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 148 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
149 149
@@ -154,10 +154,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
154 * until we call this function from kvm_timer_flush_hwstate. 154 * until we call this function from kvm_timer_flush_hwstate.
155 */ 155 */
156 if (!vgic_initialized(vcpu->kvm)) 156 if (!vgic_initialized(vcpu->kvm))
157 return; 157 return -ENODEV;
158 158
159 if (kvm_timer_should_fire(vcpu) != timer->irq.level) 159 if (kvm_timer_should_fire(vcpu) != timer->irq.level)
160 kvm_timer_update_irq(vcpu, !timer->irq.level); 160 kvm_timer_update_irq(vcpu, !timer->irq.level);
161
162 return 0;
161} 163}
162 164
163/* 165/*
@@ -218,7 +220,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
218 bool phys_active; 220 bool phys_active;
219 int ret; 221 int ret;
220 222
221 kvm_timer_update_state(vcpu); 223 if (kvm_timer_update_state(vcpu))
224 return;
222 225
223 /* 226 /*
224 * If we enter the guest with the virtual input level to the VGIC 227 * If we enter the guest with the virtual input level to the VGIC