aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/img-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt16
-rw-r--r--Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt1
-rw-r--r--Makefile2
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arm/include/asm/kexec.h5
-rw-r--r--arch/arm/include/asm/ucontext.h6
-rw-r--r--arch/arm/kernel/machine_kexec.c11
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c78
-rw-r--r--arch/arm/mach-omap2/hsmmc.c239
-rw-r--r--arch/arm/mach-omap2/hsmmc.h9
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c1
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c45
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm64/include/asm/atomic_lse.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h2
-rw-r--r--arch/arm64/kernel/cpu_ops.c4
-rw-r--r--arch/arm64/kernel/smp.c12
-rw-r--r--arch/arm64/kernel/topology.c22
-rw-r--r--arch/arm64/kernel/traps.c8
-rw-r--r--arch/arm64/lib/copy_page.S9
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/mmu.c18
-rw-r--r--arch/arm64/mm/numa.c7
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/parisc/configs/712_defconfig41
-rw-r--r--arch/parisc/configs/a500_defconfig50
-rw-r--r--arch/parisc/configs/b180_defconfig17
-rw-r--r--arch/parisc/configs/c3000_defconfig38
-rw-r--r--arch/parisc/configs/c8000_defconfig17
-rw-r--r--arch/parisc/configs/default_defconfig49
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig21
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig48
-rw-r--r--arch/parisc/include/asm/pdcpat.h16
-rw-r--r--arch/parisc/kernel/cache.c36
-rw-r--r--arch/parisc/kernel/firmware.c36
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/parisc/kernel/pdt.c23
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/Makefile25
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h15
-rw-r--r--arch/powerpc/include/asm/mmu_context.h18
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S59
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c34
-rw-r--r--arch/powerpc/mm/subpage-prot.c2
-rw-r--r--arch/powerpc/mm/tlb-radix.c45
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c8
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/string.c9
-rw-r--r--arch/x86/entry/entry_64.S1
-rw-r--r--arch/x86/events/intel/uncore_snbep.c51
-rw-r--r--arch/x86/include/asm/entry_arch.h2
-rw-r--r--arch/x86/include/asm/hardirq.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h3
-rw-r--r--arch/x86/kernel/irq.c19
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kvm/lapic.c17
-rw-r--r--arch/x86/kvm/vmx.c25
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-mq-cpumap.c4
-rw-r--r--crypto/authencesn.c5
-rw-r--r--drivers/acpi/acpi_apd.c4
-rw-r--r--drivers/acpi/acpi_lpss.c14
-rw-r--r--drivers/acpi/acpi_watchdog.c7
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--drivers/block/nbd.c18
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c25
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/bcm/spu2.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c4
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/host1x/dev.c8
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c16
-rw-r--r--drivers/lightnvm/pblk-rb.c4
-rw-r--r--drivers/lightnvm/pblk-read.c23
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-integrity.c22
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c12
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c8
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h54
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c68
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/fc.c121
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/target/fc.c101
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/parisc/pdc_stable.c2
-rw-r--r--drivers/perf/arm_pmu.c41
-rw-r--r--drivers/perf/arm_pmu_platform.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/s390/cio/chp.c1
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/sg.c7
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/thunderbolt/switch.c8
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tb_msgs.h12
-rw-r--r--drivers/virtio/virtio_balloon.c28
-rw-r--r--drivers/xen/events/events_base.c13
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenfs/super.c1
-rw-r--r--fs/btrfs/extent-tree.c11
-rw-r--r--fs/btrfs/tree-log.c3
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/jfs/acl.c24
-rw-r--r--fs/jfs/resize.c4
-rw-r--r--fs/jfs/super.c4
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/nfs4proc.c14
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c21
-rw-r--r--fs/xfs/libxfs/xfs_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c4
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_reflink.c4
-rw-r--r--include/acpi/acpi_numa.h1
-rw-r--r--include/linux/dax.h1
-rw-r--r--include/linux/dma-mapping.h40
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/nvme-fc.h19
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/perf/arm_pmu.h4
-rw-r--r--include/linux/platform_data/hsmmc-omap.h10
-rw-r--r--include/linux/uuid.h14
-rw-r--r--include/linux/wait.h8
-rw-r--r--kernel/irq/cpuhotplug.c9
-rw-r--r--kernel/sched/core.c2
-rw-r--r--lib/test_uuid.c2
-rwxr-xr-xscripts/dtc/dtx_diff2
-rw-r--r--sound/pci/fm801.c4
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c27
-rw-r--r--sound/pci/hda/patch_realtek.c144
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat22
-rw-r--r--virt/kvm/kvm_main.c35
226 files changed, 1930 insertions, 1509 deletions
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 7e06e65586d4..4a0a7469fdd7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -343,3 +343,4 @@ Version History
3431.11.0 Fix table line argument order 3431.11.0 Fix table line argument order
344 (wrong raid10_copies/raid10_format sequence) 344 (wrong raid10_copies/raid10_format sequence)
3451.11.1 Add raid4/5/6 journal write-back support via journal_mode option 3451.11.1 Add raid4/5/6 journal write-back support via journal_mode option
3461.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index f69773f4252b..941bb6a6fb13 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -8,7 +8,6 @@ Required properties:
8 8
9Optional properties: 9Optional properties:
10- clocks: Reference to the crypto engine clock. 10- clocks: Reference to the crypto engine clock.
11- dma-mask: The address mask limitation. Defaults to 64.
12 11
13Example: 12Example:
14 13
@@ -24,6 +23,5 @@ Example:
24 interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3", 23 interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3",
25 "eip"; 24 "eip";
26 clocks = <&cpm_syscon0 1 26>; 25 clocks = <&cpm_syscon0 1 26>;
27 dma-mask = <0xff 0xffffffff>;
28 status = "disabled"; 26 status = "disabled";
29 }; 27 };
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index aad98442788b..a58c173b7ab9 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -78,7 +78,6 @@ Example:
78 }; 78 };
79 79
80 dwmmc0@12200000 { 80 dwmmc0@12200000 {
81 num-slots = <1>;
82 cap-mmc-highspeed; 81 cap-mmc-highspeed;
83 cap-sd-highspeed; 82 cap-sd-highspeed;
84 broken-cd; 83 broken-cd;
diff --git a/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt
index 85de99fcaa2f..c54e577eea07 100644
--- a/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt
@@ -24,6 +24,5 @@ Example:
24 24
25 fifo-depth = <0x20>; 25 fifo-depth = <0x20>;
26 bus-width = <4>; 26 bus-width = <4>;
27 num-slots = <1>;
28 disable-wp; 27 disable-wp;
29 }; 28 };
diff --git a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
index 8af1afcb86dc..07242d141773 100644
--- a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
@@ -36,7 +36,6 @@ Example:
36 36
37 /* Board portion */ 37 /* Board portion */
38 dwmmc0@fcd03000 { 38 dwmmc0@fcd03000 {
39 num-slots = <1>;
40 vmmc-supply = <&ldo12>; 39 vmmc-supply = <&ldo12>;
41 fifo-depth = <0x100>; 40 fifo-depth = <0x100>;
42 pinctrl-names = "default"; 41 pinctrl-names = "default";
@@ -52,7 +51,6 @@ Example:
52 51
53 dwmmc_1: dwmmc1@f723e000 { 52 dwmmc_1: dwmmc1@f723e000 {
54 compatible = "hisilicon,hi6220-dw-mshc"; 53 compatible = "hisilicon,hi6220-dw-mshc";
55 num-slots = <0x1>;
56 bus-width = <0x4>; 54 bus-width = <0x4>;
57 disable-wp; 55 disable-wp;
58 cap-sd-highspeed; 56 cap-sd-highspeed;
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 9cb55ca57461..ef3e5f14067a 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -12,12 +12,12 @@ Required Properties:
12* #address-cells: should be 1. 12* #address-cells: should be 1.
13* #size-cells: should be 0. 13* #size-cells: should be 0.
14 14
15# Slots: The slot specific information are contained within child-nodes with 15# Slots (DEPRECATED): The slot specific information are contained within
16 each child-node representing a supported slot. There should be atleast one 16 child-nodes with each child-node representing a supported slot. There should
17 child node representing a card slot. The name of the child node representing 17 be atleast one child node representing a card slot. The name of the child node
18 the slot is recommended to be slot@n where n is the unique number of the slot 18 representing the slot is recommended to be slot@n where n is the unique number
19 connected to the controller. The following are optional properties which 19 of the slot connected to the controller. The following are optional properties
20 can be included in the slot child node. 20 which can be included in the slot child node.
21 21
22 * reg: specifies the physical slot number. The valid values of this 22 * reg: specifies the physical slot number. The valid values of this
23 property is 0 to (num-slots -1), where num-slots is the value 23 property is 0 to (num-slots -1), where num-slots is the value
@@ -63,7 +63,7 @@ Optional properties:
63 clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default. 63 clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
64 (Use the "max-frequency" instead of "clock-freq-min-max".) 64 (Use the "max-frequency" instead of "clock-freq-min-max".)
65 65
66* num-slots: specifies the number of slots supported by the controller. 66* num-slots (DEPRECATED): specifies the number of slots supported by the controller.
67 The number of physical slots actually used could be equal or less than the 67 The number of physical slots actually used could be equal or less than the
68 value specified by num-slots. If this property is not specified, the value 68 value specified by num-slots. If this property is not specified, the value
69 of num-slot property is assumed to be 1. 69 of num-slot property is assumed to be 1.
@@ -124,7 +124,6 @@ board specific portions as listed below.
124 dwmmc0@12200000 { 124 dwmmc0@12200000 {
125 clock-frequency = <400000000>; 125 clock-frequency = <400000000>;
126 clock-freq-min-max = <400000 200000000>; 126 clock-freq-min-max = <400000 200000000>;
127 num-slots = <1>;
128 broken-cd; 127 broken-cd;
129 fifo-depth = <0x80>; 128 fifo-depth = <0x80>;
130 card-detect-delay = <200>; 129 card-detect-delay = <200>;
@@ -139,7 +138,6 @@ board specific portions as listed below.
139 dwmmc0@12200000 { 138 dwmmc0@12200000 {
140 clock-frequency = <400000000>; 139 clock-frequency = <400000000>;
141 clock-freq-min-max = <400000 200000000>; 140 clock-freq-min-max = <400000 200000000>;
142 num-slots = <1>;
143 broken-cd; 141 broken-cd;
144 fifo-depth = <0x80>; 142 fifo-depth = <0x80>;
145 card-detect-delay = <200>; 143 card-detect-delay = <200>;
diff --git a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
index eaade0e5adeb..906819a90c2b 100644
--- a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
@@ -25,7 +25,6 @@ Example:
25 clock-frequency = <50000000>; 25 clock-frequency = <50000000>;
26 clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>; 26 clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>;
27 clock-names = "biu", "ciu"; 27 clock-names = "biu", "ciu";
28 num-slots = <1>;
29 max-frequency = <50000000>; 28 max-frequency = <50000000>;
30 cap-sdio-irq; 29 cap-sdio-irq;
31 cap-sd-highspeed; 30 cap-sd-highspeed;
diff --git a/Makefile b/Makefile
index 0662b5201d3e..37087b4657b7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 13 2PATCHLEVEL = 13
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 2a07e6ecafbd..71d3efff99d3 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
117 117
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 119
120 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 120 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
121 return ret; 121 return ret;
122 122
123 if (off < count && user_count <= (count - off)) { 123 if (off < count && user_count <= (count - off)) {
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 1869af6bac5c..25021b798a1e 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -19,6 +19,11 @@
19 19
20#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
21 21
22#define ARCH_HAS_KIMAGE_ARCH
23struct kimage_arch {
24 u32 kernel_r2;
25};
26
22/** 27/**
23 * crash_setup_regs() - save registers for the panic kernel 28 * crash_setup_regs() - save registers for the panic kernel
24 * @newregs: registers are saved here 29 * @newregs: registers are saved here
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 14749aec94bf..921d8274855c 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -35,6 +35,12 @@ struct ucontext {
35 * bytes, to prevent unpredictable padding in the signal frame. 35 * bytes, to prevent unpredictable padding in the signal frame.
36 */ 36 */
37 37
38/*
39 * Dummy padding block: if this magic is encountered, the block should
40 * be skipped using the corresponding size field.
41 */
42#define DUMMY_MAGIC 0xb0d9ed01
43
38#ifdef CONFIG_CRUNCH 44#ifdef CONFIG_CRUNCH
39#define CRUNCH_MAGIC 0x5065cf03 45#define CRUNCH_MAGIC 0x5065cf03
40#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8) 46#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 15495887ca14..fe1419eeb932 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -30,7 +30,6 @@ extern unsigned long kexec_boot_atags;
30 30
31static atomic_t waiting_for_crash_ipi; 31static atomic_t waiting_for_crash_ipi;
32 32
33static unsigned long dt_mem;
34/* 33/*
35 * Provide a dummy crash_notes definition while crash dump arrives to arm. 34 * Provide a dummy crash_notes definition while crash dump arrives to arm.
36 * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. 35 * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -42,6 +41,9 @@ int machine_kexec_prepare(struct kimage *image)
42 __be32 header; 41 __be32 header;
43 int i, err; 42 int i, err;
44 43
44 image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET
45 + KEXEC_ARM_ATAGS_OFFSET;
46
45 /* 47 /*
46 * Validate that if the current HW supports SMP, then the SW supports 48 * Validate that if the current HW supports SMP, then the SW supports
47 * and implements CPU hotplug for the current HW. If not, we won't be 49 * and implements CPU hotplug for the current HW. If not, we won't be
@@ -66,8 +68,8 @@ int machine_kexec_prepare(struct kimage *image)
66 if (err) 68 if (err)
67 return err; 69 return err;
68 70
69 if (be32_to_cpu(header) == OF_DT_HEADER) 71 if (header == cpu_to_be32(OF_DT_HEADER))
70 dt_mem = current_segment->mem; 72 image->arch.kernel_r2 = current_segment->mem;
71 } 73 }
72 return 0; 74 return 0;
73} 75}
@@ -165,8 +167,7 @@ void machine_kexec(struct kimage *image)
165 kexec_start_address = image->start; 167 kexec_start_address = image->start;
166 kexec_indirection_page = page_list; 168 kexec_indirection_page = page_list;
167 kexec_mach_type = machine_arch_type; 169 kexec_mach_type = machine_arch_type;
168 kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET 170 kexec_boot_atags = image->arch.kernel_r2;
169 + KEXEC_ARM_ATAGS_OFFSET;
170 171
171 /* copy our kernel relocation code to the control code page */ 172 /* copy our kernel relocation code to the control code page */
172 reboot_entry = fncpy(reboot_code_buffer, 173 reboot_entry = fncpy(reboot_code_buffer,
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 4e80bf7420d4..8e9a3e40d949 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -987,6 +987,9 @@ static void __init reserve_crashkernel(void)
987 987
988 if (crash_base <= 0) { 988 if (crash_base <= 0) {
989 unsigned long long crash_max = idmap_to_phys((u32)~0); 989 unsigned long long crash_max = idmap_to_phys((u32)~0);
990 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
991 if (crash_max > lowmem_max)
992 crash_max = lowmem_max;
990 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, 993 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
991 crash_size, CRASH_ALIGN); 994 crash_size, CRASH_ALIGN);
992 if (!crash_base) { 995 if (!crash_base) {
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7b8f2141427b..5814298ef0b7 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -40,8 +40,10 @@ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
40 return __copy_to_user(frame, kframe, sizeof(*frame)); 40 return __copy_to_user(frame, kframe, sizeof(*frame));
41} 41}
42 42
43static int restore_crunch_context(struct crunch_sigframe __user *frame) 43static int restore_crunch_context(char __user **auxp)
44{ 44{
45 struct crunch_sigframe __user *frame =
46 (struct crunch_sigframe __user *)*auxp;
45 char kbuf[sizeof(*frame) + 8]; 47 char kbuf[sizeof(*frame) + 8];
46 struct crunch_sigframe *kframe; 48 struct crunch_sigframe *kframe;
47 49
@@ -52,6 +54,7 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame)
52 if (kframe->magic != CRUNCH_MAGIC || 54 if (kframe->magic != CRUNCH_MAGIC ||
53 kframe->size != CRUNCH_STORAGE_SIZE) 55 kframe->size != CRUNCH_STORAGE_SIZE)
54 return -1; 56 return -1;
57 *auxp += CRUNCH_STORAGE_SIZE;
55 crunch_task_restore(current_thread_info(), &kframe->storage); 58 crunch_task_restore(current_thread_info(), &kframe->storage);
56 return 0; 59 return 0;
57} 60}
@@ -59,21 +62,39 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame)
59 62
60#ifdef CONFIG_IWMMXT 63#ifdef CONFIG_IWMMXT
61 64
62static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) 65static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
63{ 66{
64 char kbuf[sizeof(*frame) + 8]; 67 char kbuf[sizeof(*frame) + 8];
65 struct iwmmxt_sigframe *kframe; 68 struct iwmmxt_sigframe *kframe;
69 int err = 0;
66 70
67 /* the iWMMXt context must be 64 bit aligned */ 71 /* the iWMMXt context must be 64 bit aligned */
68 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 72 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
69 kframe->magic = IWMMXT_MAGIC; 73
70 kframe->size = IWMMXT_STORAGE_SIZE; 74 if (test_thread_flag(TIF_USING_IWMMXT)) {
71 iwmmxt_task_copy(current_thread_info(), &kframe->storage); 75 kframe->magic = IWMMXT_MAGIC;
72 return __copy_to_user(frame, kframe, sizeof(*frame)); 76 kframe->size = IWMMXT_STORAGE_SIZE;
77 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
78
79 err = __copy_to_user(frame, kframe, sizeof(*frame));
80 } else {
81 /*
82 * For bug-compatibility with older kernels, some space
83 * has to be reserved for iWMMXt even if it's not used.
84 * Set the magic and size appropriately so that properly
85 * written userspace can skip it reliably:
86 */
87 __put_user_error(DUMMY_MAGIC, &frame->magic, err);
88 __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
89 }
90
91 return err;
73} 92}
74 93
75static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) 94static int restore_iwmmxt_context(char __user **auxp)
76{ 95{
96 struct iwmmxt_sigframe __user *frame =
97 (struct iwmmxt_sigframe __user *)*auxp;
77 char kbuf[sizeof(*frame) + 8]; 98 char kbuf[sizeof(*frame) + 8];
78 struct iwmmxt_sigframe *kframe; 99 struct iwmmxt_sigframe *kframe;
79 100
@@ -81,10 +102,28 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
81 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); 102 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
82 if (__copy_from_user(kframe, frame, sizeof(*frame))) 103 if (__copy_from_user(kframe, frame, sizeof(*frame)))
83 return -1; 104 return -1;
84 if (kframe->magic != IWMMXT_MAGIC || 105
85 kframe->size != IWMMXT_STORAGE_SIZE) 106 /*
107 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
108 * block is discarded for compatibility with setup_sigframe() if
109 * present, but we don't mandate its presence. If some other
110 * magic is here, it's not for us:
111 */
112 if (!test_thread_flag(TIF_USING_IWMMXT) &&
113 kframe->magic != DUMMY_MAGIC)
114 return 0;
115
116 if (kframe->size != IWMMXT_STORAGE_SIZE)
86 return -1; 117 return -1;
87 iwmmxt_task_restore(current_thread_info(), &kframe->storage); 118
119 if (test_thread_flag(TIF_USING_IWMMXT)) {
120 if (kframe->magic != IWMMXT_MAGIC)
121 return -1;
122
123 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
124 }
125
126 *auxp += IWMMXT_STORAGE_SIZE;
88 return 0; 127 return 0;
89} 128}
90 129
@@ -107,8 +146,10 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
107 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); 146 return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
108} 147}
109 148
110static int restore_vfp_context(struct vfp_sigframe __user *frame) 149static int restore_vfp_context(char __user **auxp)
111{ 150{
151 struct vfp_sigframe __user *frame =
152 (struct vfp_sigframe __user *)*auxp;
112 unsigned long magic; 153 unsigned long magic;
113 unsigned long size; 154 unsigned long size;
114 int err = 0; 155 int err = 0;
@@ -121,6 +162,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
121 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) 162 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
122 return -EINVAL; 163 return -EINVAL;
123 164
165 *auxp += size;
124 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); 166 return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
125} 167}
126 168
@@ -141,7 +183,7 @@ struct rt_sigframe {
141 183
142static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) 184static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
143{ 185{
144 struct aux_sigframe __user *aux; 186 char __user *aux;
145 sigset_t set; 187 sigset_t set;
146 int err; 188 int err;
147 189
@@ -169,18 +211,18 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
169 211
170 err |= !valid_user_regs(regs); 212 err |= !valid_user_regs(regs);
171 213
172 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; 214 aux = (char __user *) sf->uc.uc_regspace;
173#ifdef CONFIG_CRUNCH 215#ifdef CONFIG_CRUNCH
174 if (err == 0) 216 if (err == 0)
175 err |= restore_crunch_context(&aux->crunch); 217 err |= restore_crunch_context(&aux);
176#endif 218#endif
177#ifdef CONFIG_IWMMXT 219#ifdef CONFIG_IWMMXT
178 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 220 if (err == 0)
179 err |= restore_iwmmxt_context(&aux->iwmmxt); 221 err |= restore_iwmmxt_context(&aux);
180#endif 222#endif
181#ifdef CONFIG_VFP 223#ifdef CONFIG_VFP
182 if (err == 0) 224 if (err == 0)
183 err |= restore_vfp_context(&aux->vfp); 225 err |= restore_vfp_context(&aux);
184#endif 226#endif
185 227
186 return err; 228 return err;
@@ -286,7 +328,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
286 err |= preserve_crunch_context(&aux->crunch); 328 err |= preserve_crunch_context(&aux->crunch);
287#endif 329#endif
288#ifdef CONFIG_IWMMXT 330#ifdef CONFIG_IWMMXT
289 if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) 331 if (err == 0)
290 err |= preserve_iwmmxt_context(&aux->iwmmxt); 332 err |= preserve_iwmmxt_context(&aux->iwmmxt);
291#endif 333#endif
292#ifdef CONFIG_VFP 334#ifdef CONFIG_VFP
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be517b048762..5b614388d72f 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -32,120 +32,6 @@ static u16 control_devconf1_offset;
32 32
33#define HSMMC_NAME_LEN 9 33#define HSMMC_NAME_LEN 9
34 34
35static void omap_hsmmc1_before_set_reg(struct device *dev,
36 int power_on, int vdd)
37{
38 u32 reg, prog_io;
39 struct omap_hsmmc_platform_data *mmc = dev->platform_data;
40
41 if (mmc->remux)
42 mmc->remux(dev, power_on);
43
44 /*
45 * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
46 * card with Vcc regulator (from twl4030 or whatever). OMAP has both
47 * 1.8V and 3.0V modes, controlled by the PBIAS register.
48 *
49 * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
50 * is most naturally TWL VSIM; those pins also use PBIAS.
51 *
52 * FIXME handle VMMC1A as needed ...
53 */
54 if (power_on) {
55 if (cpu_is_omap2430()) {
56 reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1);
57 if ((1 << vdd) >= MMC_VDD_30_31)
58 reg |= OMAP243X_MMC1_ACTIVE_OVERWRITE;
59 else
60 reg &= ~OMAP243X_MMC1_ACTIVE_OVERWRITE;
61 omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1);
62 }
63
64 if (mmc->internal_clock) {
65 reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
66 reg |= OMAP2_MMCSDIO1ADPCLKISEL;
67 omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0);
68 }
69
70 reg = omap_ctrl_readl(control_pbias_offset);
71 if (cpu_is_omap3630()) {
72 /* Set MMC I/O to 52MHz */
73 prog_io = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1);
74 prog_io |= OMAP3630_PRG_SDMMC1_SPEEDCTRL;
75 omap_ctrl_writel(prog_io, OMAP343X_CONTROL_PROG_IO1);
76 } else {
77 reg |= OMAP2_PBIASSPEEDCTRL0;
78 }
79 reg &= ~OMAP2_PBIASLITEPWRDNZ0;
80 omap_ctrl_writel(reg, control_pbias_offset);
81 } else {
82 reg = omap_ctrl_readl(control_pbias_offset);
83 reg &= ~OMAP2_PBIASLITEPWRDNZ0;
84 omap_ctrl_writel(reg, control_pbias_offset);
85 }
86}
87
88static void omap_hsmmc1_after_set_reg(struct device *dev, int power_on, int vdd)
89{
90 u32 reg;
91
92 /* 100ms delay required for PBIAS configuration */
93 msleep(100);
94
95 if (power_on) {
96 reg = omap_ctrl_readl(control_pbias_offset);
97 reg |= (OMAP2_PBIASLITEPWRDNZ0 | OMAP2_PBIASSPEEDCTRL0);
98 if ((1 << vdd) <= MMC_VDD_165_195)
99 reg &= ~OMAP2_PBIASLITEVMODE0;
100 else
101 reg |= OMAP2_PBIASLITEVMODE0;
102 omap_ctrl_writel(reg, control_pbias_offset);
103 } else {
104 reg = omap_ctrl_readl(control_pbias_offset);
105 reg |= (OMAP2_PBIASSPEEDCTRL0 | OMAP2_PBIASLITEPWRDNZ0 |
106 OMAP2_PBIASLITEVMODE0);
107 omap_ctrl_writel(reg, control_pbias_offset);
108 }
109}
110
111static void hsmmc2_select_input_clk_src(struct omap_hsmmc_platform_data *mmc)
112{
113 u32 reg;
114
115 reg = omap_ctrl_readl(control_devconf1_offset);
116 if (mmc->internal_clock)
117 reg |= OMAP2_MMCSDIO2ADPCLKISEL;
118 else
119 reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
120 omap_ctrl_writel(reg, control_devconf1_offset);
121}
122
123static void hsmmc2_before_set_reg(struct device *dev, int power_on, int vdd)
124{
125 struct omap_hsmmc_platform_data *mmc = dev->platform_data;
126
127 if (mmc->remux)
128 mmc->remux(dev, power_on);
129
130 if (power_on)
131 hsmmc2_select_input_clk_src(mmc);
132}
133
134static int am35x_hsmmc2_set_power(struct device *dev, int power_on, int vdd)
135{
136 struct omap_hsmmc_platform_data *mmc = dev->platform_data;
137
138 if (power_on)
139 hsmmc2_select_input_clk_src(mmc);
140
141 return 0;
142}
143
144static int nop_mmc_set_power(struct device *dev, int power_on, int vdd)
145{
146 return 0;
147}
148
149static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, 35static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
150 struct omap_hsmmc_platform_data *mmc) 36 struct omap_hsmmc_platform_data *mmc)
151{ 37{
@@ -157,101 +43,11 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
157 return -ENOMEM; 43 return -ENOMEM;
158 } 44 }
159 45
160 if (c->name) 46 snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1);
161 strncpy(hc_name, c->name, HSMMC_NAME_LEN);
162 else
163 snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i",
164 c->mmc, 1);
165 mmc->name = hc_name; 47 mmc->name = hc_name;
166 mmc->caps = c->caps; 48 mmc->caps = c->caps;
167 mmc->internal_clock = !c->ext_clock;
168 mmc->reg_offset = 0; 49 mmc->reg_offset = 0;
169 50
170 if (c->cover_only) {
171 /* detect if mobile phone cover removed */
172 mmc->gpio_cd = -EINVAL;
173 mmc->gpio_cod = c->gpio_cd;
174 } else {
175 /* card detect pin on the mmc socket itself */
176 mmc->gpio_cd = c->gpio_cd;
177 mmc->gpio_cod = -EINVAL;
178 }
179 mmc->gpio_wp = c->gpio_wp;
180
181 mmc->remux = c->remux;
182 mmc->init_card = c->init_card;
183
184 if (c->nonremovable)
185 mmc->nonremovable = 1;
186
187 /*
188 * NOTE: MMC slots should have a Vcc regulator set up.
189 * This may be from a TWL4030-family chip, another
190 * controllable regulator, or a fixed supply.
191 *
192 * temporary HACK: ocr_mask instead of fixed supply
193 */
194 if (soc_is_am35xx())
195 mmc->ocr_mask = MMC_VDD_165_195 |
196 MMC_VDD_26_27 |
197 MMC_VDD_27_28 |
198 MMC_VDD_29_30 |
199 MMC_VDD_30_31 |
200 MMC_VDD_31_32;
201 else
202 mmc->ocr_mask = c->ocr_mask;
203
204 if (!soc_is_am35xx())
205 mmc->features |= HSMMC_HAS_PBIAS;
206
207 switch (c->mmc) {
208 case 1:
209 if (mmc->features & HSMMC_HAS_PBIAS) {
210 /* on-chip level shifting via PBIAS0/PBIAS1 */
211 mmc->before_set_reg =
212 omap_hsmmc1_before_set_reg;
213 mmc->after_set_reg =
214 omap_hsmmc1_after_set_reg;
215 }
216
217 if (soc_is_am35xx())
218 mmc->set_power = nop_mmc_set_power;
219
220 /* OMAP3630 HSMMC1 supports only 4-bit */
221 if (cpu_is_omap3630() &&
222 (c->caps & MMC_CAP_8_BIT_DATA)) {
223 c->caps &= ~MMC_CAP_8_BIT_DATA;
224 c->caps |= MMC_CAP_4_BIT_DATA;
225 mmc->caps = c->caps;
226 }
227 break;
228 case 2:
229 if (soc_is_am35xx())
230 mmc->set_power = am35x_hsmmc2_set_power;
231
232 if (c->ext_clock)
233 c->transceiver = 1;
234 if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
235 c->caps &= ~MMC_CAP_8_BIT_DATA;
236 c->caps |= MMC_CAP_4_BIT_DATA;
237 }
238 if (mmc->features & HSMMC_HAS_PBIAS) {
239 /* off-chip level shifting, or none */
240 mmc->before_set_reg = hsmmc2_before_set_reg;
241 mmc->after_set_reg = NULL;
242 }
243 break;
244 case 3:
245 case 4:
246 case 5:
247 mmc->before_set_reg = NULL;
248 mmc->after_set_reg = NULL;
249 break;
250 default:
251 pr_err("MMC%d configuration not supported!\n", c->mmc);
252 kfree(hc_name);
253 return -ENODEV;
254 }
255 return 0; 51 return 0;
256} 52}
257 53
@@ -260,7 +56,6 @@ static int omap_hsmmc_done;
260void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) 56void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
261{ 57{
262 struct platform_device *pdev; 58 struct platform_device *pdev;
263 struct omap_hsmmc_platform_data *mmc_pdata;
264 int res; 59 int res;
265 60
266 if (omap_hsmmc_done != 1) 61 if (omap_hsmmc_done != 1)
@@ -269,32 +64,12 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
269 omap_hsmmc_done++; 64 omap_hsmmc_done++;
270 65
271 for (; c->mmc; c++) { 66 for (; c->mmc; c++) {
272 if (!c->deferred)
273 continue;
274
275 pdev = c->pdev; 67 pdev = c->pdev;
276 if (!pdev) 68 if (!pdev)
277 continue; 69 continue;
278
279 mmc_pdata = pdev->dev.platform_data;
280 if (!mmc_pdata)
281 continue;
282
283 if (c->cover_only) {
284 /* detect if mobile phone cover removed */
285 mmc_pdata->gpio_cd = -EINVAL;
286 mmc_pdata->gpio_cod = c->gpio_cd;
287 } else {
288 /* card detect pin on the mmc socket itself */
289 mmc_pdata->gpio_cd = c->gpio_cd;
290 mmc_pdata->gpio_cod = -EINVAL;
291 }
292 mmc_pdata->gpio_wp = c->gpio_wp;
293
294 res = omap_device_register(pdev); 70 res = omap_device_register(pdev);
295 if (res) 71 if (res)
296 pr_err("Could not late init MMC %s\n", 72 pr_err("Could not late init MMC\n");
297 c->name);
298 } 73 }
299} 74}
300 75
@@ -336,13 +111,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
336 if (oh->dev_attr != NULL) { 111 if (oh->dev_attr != NULL) {
337 mmc_dev_attr = oh->dev_attr; 112 mmc_dev_attr = oh->dev_attr;
338 mmc_data->controller_flags = mmc_dev_attr->flags; 113 mmc_data->controller_flags = mmc_dev_attr->flags;
339 /*
340 * erratum 2.1.1.128 doesn't apply if board has
341 * a transceiver is attached
342 */
343 if (hsmmcinfo->transceiver)
344 mmc_data->controller_flags &=
345 ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ;
346 } 114 }
347 115
348 pdev = platform_device_alloc(name, ctrl_nr - 1); 116 pdev = platform_device_alloc(name, ctrl_nr - 1);
@@ -367,9 +135,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
367 135
368 hsmmcinfo->pdev = pdev; 136 hsmmcinfo->pdev = pdev;
369 137
370 if (hsmmcinfo->deferred)
371 goto free_mmc;
372
373 res = omap_device_register(pdev); 138 res = omap_device_register(pdev);
374 if (res) { 139 if (res) {
375 pr_err("Could not register od for %s\n", name); 140 pr_err("Could not register od for %s\n", name);
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index 69b619ddc765..af9af5094ec3 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -12,18 +12,9 @@ struct omap2_hsmmc_info {
12 u8 mmc; /* controller 1/2/3 */ 12 u8 mmc; /* controller 1/2/3 */
13 u32 caps; /* 4/8 wires and any additional host 13 u32 caps; /* 4/8 wires and any additional host
14 * capabilities OR'd (ref. linux/mmc/host.h) */ 14 * capabilities OR'd (ref. linux/mmc/host.h) */
15 bool transceiver; /* MMC-2 option */
16 bool ext_clock; /* use external pin for input clock */
17 bool cover_only; /* No card detect - just cover switch */
18 bool nonremovable; /* Nonremovable e.g. eMMC */
19 bool deferred; /* mmc needs a deferred probe */
20 int gpio_cd; /* or -EINVAL */ 15 int gpio_cd; /* or -EINVAL */
21 int gpio_wp; /* or -EINVAL */ 16 int gpio_wp; /* or -EINVAL */
22 char *name; /* or NULL for default */
23 struct platform_device *pdev; /* mmc controller instance */ 17 struct platform_device *pdev; /* mmc controller instance */
24 int ocr_mask; /* temporary HACK */
25 /* Remux (pad configuration) when powering on/off */
26 void (*remux)(struct device *dev, int power_on);
27 /* init some special card */ 18 /* init some special card */
28 void (*init_card)(struct mmc_card *card); 19 void (*init_card)(struct mmc_card *card);
29}; 20};
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 28083ef72819..71a34e8c345a 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -133,6 +133,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
133 133
134static struct arm_pmu_platdata db8500_pmu_platdata = { 134static struct arm_pmu_platdata db8500_pmu_platdata = {
135 .handle_irq = db8500_pmu_handler, 135 .handle_irq = db8500_pmu_handler,
136 .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
136}; 137};
137 138
138static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { 139static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index 90ee354d803e..6db5fc26d154 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
40 40
41{ 41{
42 const struct dma_map_ops *ops = &dma_noop_ops; 42 const struct dma_map_ops *ops = &dma_noop_ops;
43 void *ret;
43 44
44 /* 45 /*
45 * We are here because: 46 * Try generic allocator first if we are advertised that
47 * consistency is not required.
48 */
49
50 if (attrs & DMA_ATTR_NON_CONSISTENT)
51 return ops->alloc(dev, size, dma_handle, gfp, attrs);
52
53 ret = dma_alloc_from_global_coherent(size, dma_handle);
54
55 /*
56 * dma_alloc_from_global_coherent() may fail because:
57 *
46 * - no consistent DMA region has been defined, so we can't 58 * - no consistent DMA region has been defined, so we can't
47 * continue. 59 * continue.
48 * - there is no space left in consistent DMA region, so we 60 * - there is no space left in consistent DMA region, so we
@@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
50 * advertised that consistency is not required. 62 * advertised that consistency is not required.
51 */ 63 */
52 64
53 if (attrs & DMA_ATTR_NON_CONSISTENT) 65 WARN_ON_ONCE(ret == NULL);
54 return ops->alloc(dev, size, dma_handle, gfp, attrs); 66 return ret;
55
56 WARN_ON_ONCE(1);
57 return NULL;
58} 67}
59 68
60static void arm_nommu_dma_free(struct device *dev, size_t size, 69static void arm_nommu_dma_free(struct device *dev, size_t size,
@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
63{ 72{
64 const struct dma_map_ops *ops = &dma_noop_ops; 73 const struct dma_map_ops *ops = &dma_noop_ops;
65 74
66 if (attrs & DMA_ATTR_NON_CONSISTENT) 75 if (attrs & DMA_ATTR_NON_CONSISTENT) {
67 ops->free(dev, size, cpu_addr, dma_addr, attrs); 76 ops->free(dev, size, cpu_addr, dma_addr, attrs);
68 else 77 } else {
69 WARN_ON_ONCE(1); 78 int ret = dma_release_from_global_coherent(get_order(size),
79 cpu_addr);
80
81 WARN_ON_ONCE(ret == 0);
82 }
70 83
71 return; 84 return;
72} 85}
73 86
87static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
88 void *cpu_addr, dma_addr_t dma_addr, size_t size,
89 unsigned long attrs)
90{
91 int ret;
92
93 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
94 return ret;
95
96 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
97}
98
99
74static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, 100static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
75 enum dma_data_direction dir) 101 enum dma_data_direction dir)
76{ 102{
@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
173const struct dma_map_ops arm_nommu_dma_ops = { 199const struct dma_map_ops arm_nommu_dma_ops = {
174 .alloc = arm_nommu_dma_alloc, 200 .alloc = arm_nommu_dma_alloc,
175 .free = arm_nommu_dma_free, 201 .free = arm_nommu_dma_free,
202 .mmap = arm_nommu_dma_mmap,
176 .map_page = arm_nommu_dma_map_page, 203 .map_page = arm_nommu_dma_map_page,
177 .unmap_page = arm_nommu_dma_unmap_page, 204 .unmap_page = arm_nommu_dma_unmap_page,
178 .map_sg = arm_nommu_dma_map_sg, 205 .map_sg = arm_nommu_dma_map_sg,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e7380bafbfa6..fcf1473d6fed 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
851 unsigned long pfn = dma_to_pfn(dev, dma_addr); 851 unsigned long pfn = dma_to_pfn(dev, dma_addr);
852 unsigned long off = vma->vm_pgoff; 852 unsigned long off = vma->vm_pgoff;
853 853
854 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 854 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
855 return ret; 855 return ret;
856 856
857 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 857 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 99fa69c9c3cf..9ef0797380cb 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
435 " sub x30, x30, %[ret]\n" 435 " sub x30, x30, %[ret]\n"
436 " cbnz x30, 1b\n" 436 " cbnz x30, 1b\n"
437 "2:") 437 "2:")
438 : [ret] "+&r" (x0), [v] "+Q" (v->counter) 438 : [ret] "+r" (x0), [v] "+Q" (v->counter)
439 : 439 :
440 : __LL_SC_CLOBBERS, "cc", "memory"); 440 : __LL_SC_CLOBBERS, "cc", "memory");
441 441
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 16e44fa9b3b6..248339e4aaf5 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -492,7 +492,7 @@ asm(
492 * the "%x0" template means XZR. 492 * the "%x0" template means XZR.
493 */ 493 */
494#define write_sysreg(v, r) do { \ 494#define write_sysreg(v, r) do { \
495 u64 __val = (u64)v; \ 495 u64 __val = (u64)(v); \
496 asm volatile("msr " __stringify(r) ", %x0" \ 496 asm volatile("msr " __stringify(r) ", %x0" \
497 : : "rZ" (__val)); \ 497 : : "rZ" (__val)); \
498} while (0) 498} while (0)
@@ -508,7 +508,7 @@ asm(
508}) 508})
509 509
510#define write_sysreg_s(v, r) do { \ 510#define write_sysreg_s(v, r) do { \
511 u64 __val = (u64)v; \ 511 u64 __val = (u64)(v); \
512 asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ 512 asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
513} while (0) 513} while (0)
514 514
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 8f0a1de11e4a..fab46a0ea223 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs)
69 */ 69 */
70#define __range_ok(addr, size) \ 70#define __range_ok(addr, size) \
71({ \ 71({ \
72 unsigned long __addr = (unsigned long __force)(addr); \ 72 unsigned long __addr = (unsigned long)(addr); \
73 unsigned long flag, roksum; \ 73 unsigned long flag, roksum; \
74 __chk_user_ptr(addr); \ 74 __chk_user_ptr(addr); \
75 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ 75 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index e137ceaf5016..d16978213c5b 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu)
82 * Don't warn spuriously. 82 * Don't warn spuriously.
83 */ 83 */
84 if (cpu != 0) 84 if (cpu != 0)
85 pr_err("%s: missing enable-method property\n", 85 pr_err("%pOF: missing enable-method property\n",
86 dn->full_name); 86 dn);
87 } 87 }
88 } else { 88 } else {
89 enable_method = acpi_get_enable_method(cpu); 89 enable_method = acpi_get_enable_method(cpu);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 321119881abf..dc66e6ec3a99 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
469 */ 469 */
470 cell = of_get_property(dn, "reg", NULL); 470 cell = of_get_property(dn, "reg", NULL);
471 if (!cell) { 471 if (!cell) {
472 pr_err("%s: missing reg property\n", dn->full_name); 472 pr_err("%pOF: missing reg property\n", dn);
473 return INVALID_HWID; 473 return INVALID_HWID;
474 } 474 }
475 475
@@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
478 * Non affinity bits must be set to 0 in the DT 478 * Non affinity bits must be set to 0 in the DT
479 */ 479 */
480 if (hwid & ~MPIDR_HWID_BITMASK) { 480 if (hwid & ~MPIDR_HWID_BITMASK) {
481 pr_err("%s: invalid reg property\n", dn->full_name); 481 pr_err("%pOF: invalid reg property\n", dn);
482 return INVALID_HWID; 482 return INVALID_HWID;
483 } 483 }
484 return hwid; 484 return hwid;
@@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void)
627 goto next; 627 goto next;
628 628
629 if (is_mpidr_duplicate(cpu_count, hwid)) { 629 if (is_mpidr_duplicate(cpu_count, hwid)) {
630 pr_err("%s: duplicate cpu reg properties in the DT\n", 630 pr_err("%pOF: duplicate cpu reg properties in the DT\n",
631 dn->full_name); 631 dn);
632 goto next; 632 goto next;
633 } 633 }
634 634
@@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void)
640 */ 640 */
641 if (hwid == cpu_logical_map(0)) { 641 if (hwid == cpu_logical_map(0)) {
642 if (bootcpu_valid) { 642 if (bootcpu_valid) {
643 pr_err("%s: duplicate boot cpu reg property in DT\n", 643 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
644 dn->full_name); 644 dn);
645 goto next; 645 goto next;
646 } 646 }
647 647
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 79244c75eaec..8d48b233e6ce 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node)
45 } 45 }
46 } 46 }
47 47
48 pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name); 48 pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
49 49
50 of_node_put(cpu_node); 50 of_node_put(cpu_node);
51 return -1; 51 return -1;
@@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id,
71 cpu_topology[cpu].core_id = core_id; 71 cpu_topology[cpu].core_id = core_id;
72 cpu_topology[cpu].thread_id = i; 72 cpu_topology[cpu].thread_id = i;
73 } else { 73 } else {
74 pr_err("%s: Can't get CPU for thread\n", 74 pr_err("%pOF: Can't get CPU for thread\n",
75 t->full_name); 75 t);
76 of_node_put(t); 76 of_node_put(t);
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
@@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id,
84 cpu = get_cpu_for_node(core); 84 cpu = get_cpu_for_node(core);
85 if (cpu >= 0) { 85 if (cpu >= 0) {
86 if (!leaf) { 86 if (!leaf) {
87 pr_err("%s: Core has both threads and CPU\n", 87 pr_err("%pOF: Core has both threads and CPU\n",
88 core->full_name); 88 core);
89 return -EINVAL; 89 return -EINVAL;
90 } 90 }
91 91
92 cpu_topology[cpu].cluster_id = cluster_id; 92 cpu_topology[cpu].cluster_id = cluster_id;
93 cpu_topology[cpu].core_id = core_id; 93 cpu_topology[cpu].core_id = core_id;
94 } else if (leaf) { 94 } else if (leaf) {
95 pr_err("%s: Can't get CPU for leaf core\n", core->full_name); 95 pr_err("%pOF: Can't get CPU for leaf core\n", core);
96 return -EINVAL; 96 return -EINVAL;
97 } 97 }
98 98
@@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
137 has_cores = true; 137 has_cores = true;
138 138
139 if (depth == 0) { 139 if (depth == 0) {
140 pr_err("%s: cpu-map children should be clusters\n", 140 pr_err("%pOF: cpu-map children should be clusters\n",
141 c->full_name); 141 c);
142 of_node_put(c); 142 of_node_put(c);
143 return -EINVAL; 143 return -EINVAL;
144 } 144 }
@@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
146 if (leaf) { 146 if (leaf) {
147 ret = parse_core(c, cluster_id, core_id++); 147 ret = parse_core(c, cluster_id, core_id++);
148 } else { 148 } else {
149 pr_err("%s: Non-leaf cluster with core %s\n", 149 pr_err("%pOF: Non-leaf cluster with core %s\n",
150 cluster->full_name, name); 150 cluster, name);
151 ret = -EINVAL; 151 ret = -EINVAL;
152 } 152 }
153 153
@@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
159 } while (c); 159 } while (c);
160 160
161 if (leaf && !has_cores) 161 if (leaf && !has_cores)
162 pr_warn("%s: empty cluster\n", cluster->full_name); 162 pr_warn("%pOF: empty cluster\n", cluster);
163 163
164 if (leaf) 164 if (leaf)
165 cluster_id++; 165 cluster_id++;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c7c7088097be..d48f47080213 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock);
274void die(const char *str, struct pt_regs *regs, int err) 274void die(const char *str, struct pt_regs *regs, int err)
275{ 275{
276 int ret; 276 int ret;
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&die_lock, flags);
277 280
278 oops_enter(); 281 oops_enter();
279 282
280 raw_spin_lock_irq(&die_lock);
281 console_verbose(); 283 console_verbose();
282 bust_spinlocks(1); 284 bust_spinlocks(1);
283 ret = __die(str, err, regs); 285 ret = __die(str, err, regs);
@@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err)
287 289
288 bust_spinlocks(0); 290 bust_spinlocks(0);
289 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 291 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
290 raw_spin_unlock_irq(&die_lock);
291 oops_exit(); 292 oops_exit();
292 293
293 if (in_interrupt()) 294 if (in_interrupt())
294 panic("Fatal exception in interrupt"); 295 panic("Fatal exception in interrupt");
295 if (panic_on_oops) 296 if (panic_on_oops)
296 panic("Fatal exception"); 297 panic("Fatal exception");
298
299 raw_spin_unlock_irqrestore(&die_lock, flags);
300
297 if (ret != NOTIFY_STOP) 301 if (ret != NOTIFY_STOP)
298 do_exit(SIGSEGV); 302 do_exit(SIGSEGV);
299} 303}
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index c3cd65e31814..076c43715e64 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -30,9 +30,10 @@
30 */ 30 */
31ENTRY(copy_page) 31ENTRY(copy_page)
32alternative_if ARM64_HAS_NO_HW_PREFETCH 32alternative_if ARM64_HAS_NO_HW_PREFETCH
33 # Prefetch two cache lines ahead. 33 // Prefetch three cache lines ahead.
34 prfm pldl1strm, [x1, #128] 34 prfm pldl1strm, [x1, #128]
35 prfm pldl1strm, [x1, #256] 35 prfm pldl1strm, [x1, #256]
36 prfm pldl1strm, [x1, #384]
36alternative_else_nop_endif 37alternative_else_nop_endif
37 38
38 ldp x2, x3, [x1] 39 ldp x2, x3, [x1]
@@ -50,7 +51,7 @@ alternative_else_nop_endif
50 subs x18, x18, #128 51 subs x18, x18, #128
51 52
52alternative_if ARM64_HAS_NO_HW_PREFETCH 53alternative_if ARM64_HAS_NO_HW_PREFETCH
53 prfm pldl1strm, [x1, #384] 54 prfm pldl1strm, [x1, #384]
54alternative_else_nop_endif 55alternative_else_nop_endif
55 56
56 stnp x2, x3, [x0] 57 stnp x2, x3, [x0]
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index e90cd1db42a8..f27d4dd04384 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev,
329 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, 329 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
330 is_device_dma_coherent(dev)); 330 is_device_dma_coherent(dev));
331 331
332 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 332 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
333 return ret; 333 return ret;
334 334
335 return __swiotlb_mmap_pfn(vma, pfn, size); 335 return __swiotlb_mmap_pfn(vma, pfn, size);
@@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
706 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, 706 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
707 is_device_dma_coherent(dev)); 707 is_device_dma_coherent(dev));
708 708
709 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 709 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
710 return ret; 710 return ret;
711 711
712 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 712 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 23c2d89a362e..f1eb15e0e864 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -496,7 +496,7 @@ void mark_rodata_ro(void)
496 496
497static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, 497static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
498 pgprot_t prot, struct vm_struct *vma, 498 pgprot_t prot, struct vm_struct *vma,
499 int flags) 499 int flags, unsigned long vm_flags)
500{ 500{
501 phys_addr_t pa_start = __pa_symbol(va_start); 501 phys_addr_t pa_start = __pa_symbol(va_start);
502 unsigned long size = va_end - va_start; 502 unsigned long size = va_end - va_start;
@@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
507 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 507 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
508 early_pgtable_alloc, flags); 508 early_pgtable_alloc, flags);
509 509
510 if (!(vm_flags & VM_NO_GUARD))
511 size += PAGE_SIZE;
512
510 vma->addr = va_start; 513 vma->addr = va_start;
511 vma->phys_addr = pa_start; 514 vma->phys_addr = pa_start;
512 vma->size = size; 515 vma->size = size;
513 vma->flags = VM_MAP; 516 vma->flags = VM_MAP | vm_flags;
514 vma->caller = __builtin_return_address(0); 517 vma->caller = __builtin_return_address(0);
515 518
516 vm_area_add_early(vma); 519 vm_area_add_early(vma);
@@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
541 * Only rodata will be remapped with different permissions later on, 544 * Only rodata will be remapped with different permissions later on,
542 * all other segments are allowed to use contiguous mappings. 545 * all other segments are allowed to use contiguous mappings.
543 */ 546 */
544 map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0); 547 map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
548 VM_NO_GUARD);
545 map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, 549 map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
546 &vmlinux_rodata, NO_CONT_MAPPINGS); 550 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
547 map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, 551 map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
548 &vmlinux_inittext, 0); 552 &vmlinux_inittext, 0, VM_NO_GUARD);
549 map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, 553 map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
550 &vmlinux_initdata, 0); 554 &vmlinux_initdata, 0, VM_NO_GUARD);
551 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0); 555 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
552 556
553 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 557 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
554 /* 558 /*
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index b388a99fea7b..dad128ba98bf 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
208 } 208 }
209 209
210 node_set(nid, numa_nodes_parsed); 210 node_set(nid, numa_nodes_parsed);
211 pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n",
212 start, (end - 1), nid);
213 return ret; 211 return ret;
214} 212}
215 213
@@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
223 void *nd; 221 void *nd;
224 int tnid; 222 int tnid;
225 223
226 if (start_pfn < end_pfn) 224 if (start_pfn >= end_pfn)
227 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
228 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
229 else
230 pr_info("Initmem setup node %d [<memory-less node>]\n", nid); 225 pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
231 226
232 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); 227 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e08598c70b3e..8e78251eccc2 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
232 else 232 else
233 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 233 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
234 234
235 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 235 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
236 return ret; 236 return ret;
237 237
238 if (off < count && user_count <= (count - off)) { 238 if (off < count && user_count <= (count - off)) {
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
index 143d02652792..ccc109761f44 100644
--- a/arch/parisc/configs/712_defconfig
+++ b/arch/parisc/configs/712_defconfig
@@ -1,11 +1,9 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
5CONFIG_IKCONFIG=y 4CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 5CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
10CONFIG_KALLSYMS_ALL=y 8CONFIG_KALLSYMS_ALL=y
11CONFIG_SLAB=y 9CONFIG_SLAB=y
@@ -14,7 +12,6 @@ CONFIG_OPROFILE=m
14CONFIG_MODULES=y 12CONFIG_MODULES=y
15CONFIG_MODULE_UNLOAD=y 13CONFIG_MODULE_UNLOAD=y
16CONFIG_MODULE_FORCE_UNLOAD=y 14CONFIG_MODULE_FORCE_UNLOAD=y
17# CONFIG_BLK_DEV_BSG is not set
18CONFIG_PA7100LC=y 15CONFIG_PA7100LC=y
19CONFIG_PREEMPT_VOLUNTARY=y 16CONFIG_PREEMPT_VOLUNTARY=y
20CONFIG_GSC_LASI=y 17CONFIG_GSC_LASI=y
@@ -32,11 +29,9 @@ CONFIG_IP_PNP_DHCP=y
32CONFIG_IP_PNP_BOOTP=y 29CONFIG_IP_PNP_BOOTP=y
33CONFIG_INET_AH=m 30CONFIG_INET_AH=m
34CONFIG_INET_ESP=m 31CONFIG_INET_ESP=m
35# CONFIG_INET_LRO is not set
36CONFIG_INET_DIAG=m 32CONFIG_INET_DIAG=m
37# CONFIG_IPV6 is not set 33# CONFIG_IPV6 is not set
38CONFIG_NETFILTER=y 34CONFIG_NETFILTER=y
39CONFIG_IP_NF_QUEUE=m
40CONFIG_LLC2=m 35CONFIG_LLC2=m
41CONFIG_NET_PKTGEN=m 36CONFIG_NET_PKTGEN=m
42CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 37CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -65,21 +60,20 @@ CONFIG_MD_LINEAR=m
65CONFIG_MD_RAID0=m 60CONFIG_MD_RAID0=m
66CONFIG_MD_RAID1=m 61CONFIG_MD_RAID1=m
67CONFIG_NETDEVICES=y 62CONFIG_NETDEVICES=y
68CONFIG_DUMMY=m
69CONFIG_BONDING=m 63CONFIG_BONDING=m
64CONFIG_DUMMY=m
70CONFIG_TUN=m 65CONFIG_TUN=m
71CONFIG_NET_ETHERNET=y
72CONFIG_MII=m
73CONFIG_LASI_82596=y 66CONFIG_LASI_82596=y
74CONFIG_PPP=m 67CONFIG_PPP=m
75CONFIG_PPP_ASYNC=m
76CONFIG_PPP_SYNC_TTY=m
77CONFIG_PPP_DEFLATE=m
78CONFIG_PPP_BSDCOMP=m 68CONFIG_PPP_BSDCOMP=m
69CONFIG_PPP_DEFLATE=m
79CONFIG_PPP_MPPE=m 70CONFIG_PPP_MPPE=m
80CONFIG_PPPOE=m 71CONFIG_PPPOE=m
72CONFIG_PPP_ASYNC=m
73CONFIG_PPP_SYNC_TTY=m
81# CONFIG_KEYBOARD_HIL_OLD is not set 74# CONFIG_KEYBOARD_HIL_OLD is not set
82CONFIG_MOUSE_SERIAL=m 75CONFIG_MOUSE_SERIAL=m
76CONFIG_LEGACY_PTY_COUNT=64
83CONFIG_SERIAL_8250=y 77CONFIG_SERIAL_8250=y
84CONFIG_SERIAL_8250_CONSOLE=y 78CONFIG_SERIAL_8250_CONSOLE=y
85CONFIG_SERIAL_8250_NR_UARTS=17 79CONFIG_SERIAL_8250_NR_UARTS=17
@@ -88,22 +82,17 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
88CONFIG_SERIAL_8250_SHARE_IRQ=y 82CONFIG_SERIAL_8250_SHARE_IRQ=y
89# CONFIG_SERIAL_MUX is not set 83# CONFIG_SERIAL_MUX is not set
90CONFIG_PDC_CONSOLE=y 84CONFIG_PDC_CONSOLE=y
91CONFIG_LEGACY_PTY_COUNT=64
92CONFIG_PRINTER=m 85CONFIG_PRINTER=m
93CONFIG_PPDEV=m 86CONFIG_PPDEV=m
94# CONFIG_HW_RANDOM is not set 87# CONFIG_HW_RANDOM is not set
95CONFIG_RAW_DRIVER=y 88CONFIG_RAW_DRIVER=y
96# CONFIG_HWMON is not set 89# CONFIG_HWMON is not set
97CONFIG_VIDEO_OUTPUT_CONTROL=m
98CONFIG_FB=y 90CONFIG_FB=y
99CONFIG_FB_MODE_HELPERS=y 91CONFIG_FB_MODE_HELPERS=y
100CONFIG_FB_TILEBLITTING=y 92CONFIG_FB_TILEBLITTING=y
101CONFIG_DUMMY_CONSOLE_COLUMNS=128 93CONFIG_DUMMY_CONSOLE_COLUMNS=128
102CONFIG_DUMMY_CONSOLE_ROWS=48 94CONFIG_DUMMY_CONSOLE_ROWS=48
103CONFIG_FRAMEBUFFER_CONSOLE=y 95CONFIG_FRAMEBUFFER_CONSOLE=y
104CONFIG_FONTS=y
105CONFIG_FONT_8x8=y
106CONFIG_FONT_8x16=y
107CONFIG_LOGO=y 96CONFIG_LOGO=y
108# CONFIG_LOGO_LINUX_MONO is not set 97# CONFIG_LOGO_LINUX_MONO is not set
109# CONFIG_LOGO_LINUX_VGA16 is not set 98# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -111,13 +100,9 @@ CONFIG_LOGO=y
111CONFIG_SOUND=y 100CONFIG_SOUND=y
112CONFIG_SND=y 101CONFIG_SND=y
113CONFIG_SND_SEQUENCER=y 102CONFIG_SND_SEQUENCER=y
114CONFIG_SND_MIXER_OSS=y
115CONFIG_SND_PCM_OSS=y
116CONFIG_SND_SEQUENCER_OSS=y
117CONFIG_SND_HARMONY=y 103CONFIG_SND_HARMONY=y
118CONFIG_EXT2_FS=y 104CONFIG_EXT2_FS=y
119CONFIG_EXT3_FS=y 105CONFIG_EXT3_FS=y
120# CONFIG_EXT3_FS_XATTR is not set
121CONFIG_JFS_FS=m 106CONFIG_JFS_FS=m
122CONFIG_XFS_FS=m 107CONFIG_XFS_FS=m
123CONFIG_AUTOFS4_FS=y 108CONFIG_AUTOFS4_FS=y
@@ -130,14 +115,10 @@ CONFIG_PROC_KCORE=y
130CONFIG_TMPFS=y 115CONFIG_TMPFS=y
131CONFIG_UFS_FS=m 116CONFIG_UFS_FS=m
132CONFIG_NFS_FS=y 117CONFIG_NFS_FS=y
133CONFIG_NFS_V3=y
134CONFIG_NFS_V4=y 118CONFIG_NFS_V4=y
135CONFIG_ROOT_NFS=y 119CONFIG_ROOT_NFS=y
136CONFIG_NFSD=m 120CONFIG_NFSD=m
137CONFIG_NFSD_V4=y 121CONFIG_NFSD_V4=y
138CONFIG_RPCSEC_GSS_SPKM3=m
139CONFIG_SMB_FS=m
140CONFIG_SMB_NLS_DEFAULT=y
141CONFIG_CIFS=m 122CONFIG_CIFS=m
142CONFIG_NLS_CODEPAGE_437=m 123CONFIG_NLS_CODEPAGE_437=m
143CONFIG_NLS_CODEPAGE_737=m 124CONFIG_NLS_CODEPAGE_737=m
@@ -177,21 +158,16 @@ CONFIG_NLS_ISO8859_15=m
177CONFIG_NLS_KOI8_R=m 158CONFIG_NLS_KOI8_R=m
178CONFIG_NLS_KOI8_U=m 159CONFIG_NLS_KOI8_U=m
179CONFIG_NLS_UTF8=m 160CONFIG_NLS_UTF8=m
180CONFIG_MAGIC_SYSRQ=y
181CONFIG_DEBUG_FS=y 161CONFIG_DEBUG_FS=y
162CONFIG_MAGIC_SYSRQ=y
182CONFIG_DEBUG_KERNEL=y 163CONFIG_DEBUG_KERNEL=y
183CONFIG_DEBUG_MUTEXES=y 164CONFIG_DEBUG_MUTEXES=y
184# CONFIG_RCU_CPU_STALL_DETECTOR is not set
185CONFIG_CRYPTO_NULL=m
186CONFIG_CRYPTO_TEST=m 165CONFIG_CRYPTO_TEST=m
187CONFIG_CRYPTO_HMAC=y 166CONFIG_CRYPTO_HMAC=y
188CONFIG_CRYPTO_MD4=m
189CONFIG_CRYPTO_MICHAEL_MIC=m 167CONFIG_CRYPTO_MICHAEL_MIC=m
190CONFIG_CRYPTO_SHA256=m
191CONFIG_CRYPTO_SHA512=m 168CONFIG_CRYPTO_SHA512=m
192CONFIG_CRYPTO_TGR192=m 169CONFIG_CRYPTO_TGR192=m
193CONFIG_CRYPTO_WP512=m 170CONFIG_CRYPTO_WP512=m
194CONFIG_CRYPTO_AES=m
195CONFIG_CRYPTO_ANUBIS=m 171CONFIG_CRYPTO_ANUBIS=m
196CONFIG_CRYPTO_BLOWFISH=m 172CONFIG_CRYPTO_BLOWFISH=m
197CONFIG_CRYPTO_CAST6=m 173CONFIG_CRYPTO_CAST6=m
@@ -200,6 +176,7 @@ CONFIG_CRYPTO_SERPENT=m
200CONFIG_CRYPTO_TEA=m 176CONFIG_CRYPTO_TEA=m
201CONFIG_CRYPTO_TWOFISH=m 177CONFIG_CRYPTO_TWOFISH=m
202CONFIG_CRYPTO_DEFLATE=m 178CONFIG_CRYPTO_DEFLATE=m
203# CONFIG_CRYPTO_ANSI_CPRNG is not set
204# CONFIG_CRYPTO_HW is not set 179# CONFIG_CRYPTO_HW is not set
205CONFIG_LIBCRC32C=m 180CONFIG_FONTS=y
181CONFIG_FONT_8x8=y
182CONFIG_FONT_8x16=y
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index 1a4f776b49b8..5acb93dcaabf 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -1,13 +1,10 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
5CONFIG_IKCONFIG=y 4CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 5CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
10# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
11CONFIG_EXPERT=y 8CONFIG_EXPERT=y
12CONFIG_KALLSYMS_ALL=y 9CONFIG_KALLSYMS_ALL=y
13CONFIG_SLAB=y 10CONFIG_SLAB=y
@@ -16,7 +13,6 @@ CONFIG_OPROFILE=m
16CONFIG_MODULES=y 13CONFIG_MODULES=y
17CONFIG_MODULE_UNLOAD=y 14CONFIG_MODULE_UNLOAD=y
18CONFIG_MODULE_FORCE_UNLOAD=y 15CONFIG_MODULE_FORCE_UNLOAD=y
19# CONFIG_BLK_DEV_BSG is not set
20CONFIG_PA8X00=y 16CONFIG_PA8X00=y
21CONFIG_64BIT=y 17CONFIG_64BIT=y
22CONFIG_SMP=y 18CONFIG_SMP=y
@@ -43,21 +39,17 @@ CONFIG_IP_PNP_DHCP=y
43CONFIG_IP_PNP_BOOTP=y 39CONFIG_IP_PNP_BOOTP=y
44CONFIG_INET_AH=m 40CONFIG_INET_AH=m
45CONFIG_INET_ESP=m 41CONFIG_INET_ESP=m
46# CONFIG_INET_LRO is not set
47CONFIG_INET6_AH=m 42CONFIG_INET6_AH=m
48CONFIG_INET6_ESP=m 43CONFIG_INET6_ESP=m
49CONFIG_INET6_IPCOMP=m 44CONFIG_INET6_IPCOMP=m
50CONFIG_IPV6_TUNNEL=m 45CONFIG_IPV6_TUNNEL=m
51CONFIG_NETFILTER=y 46CONFIG_NETFILTER=y
52# CONFIG_NETFILTER_XT_MATCH_DCCP is not set 47# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
53CONFIG_IP_NF_QUEUE=m
54CONFIG_IP_NF_IPTABLES=m 48CONFIG_IP_NF_IPTABLES=m
55CONFIG_IP_NF_MATCH_ECN=m 49CONFIG_IP_NF_MATCH_ECN=m
56CONFIG_IP_NF_MATCH_TTL=m 50CONFIG_IP_NF_MATCH_TTL=m
57CONFIG_IP_NF_FILTER=m 51CONFIG_IP_NF_FILTER=m
58CONFIG_IP_NF_TARGET_REJECT=m 52CONFIG_IP_NF_TARGET_REJECT=m
59CONFIG_IP_NF_TARGET_LOG=m
60CONFIG_IP_NF_TARGET_ULOG=m
61CONFIG_IP_NF_MANGLE=m 53CONFIG_IP_NF_MANGLE=m
62CONFIG_IP_NF_TARGET_ECN=m 54CONFIG_IP_NF_TARGET_ECN=m
63CONFIG_IP_NF_RAW=m 55CONFIG_IP_NF_RAW=m
@@ -70,7 +62,6 @@ CONFIG_IP6_NF_MATCH_OPTS=m
70CONFIG_IP6_NF_MATCH_HL=m 62CONFIG_IP6_NF_MATCH_HL=m
71CONFIG_IP6_NF_MATCH_IPV6HEADER=m 63CONFIG_IP6_NF_MATCH_IPV6HEADER=m
72CONFIG_IP6_NF_MATCH_RT=m 64CONFIG_IP6_NF_MATCH_RT=m
73CONFIG_IP6_NF_TARGET_LOG=m
74CONFIG_IP6_NF_FILTER=m 65CONFIG_IP6_NF_FILTER=m
75CONFIG_IP6_NF_TARGET_REJECT=m 66CONFIG_IP6_NF_TARGET_REJECT=m
76CONFIG_IP6_NF_MANGLE=m 67CONFIG_IP6_NF_MANGLE=m
@@ -94,7 +85,6 @@ CONFIG_BLK_DEV_SD=y
94CONFIG_CHR_DEV_ST=y 85CONFIG_CHR_DEV_ST=y
95CONFIG_BLK_DEV_SR=y 86CONFIG_BLK_DEV_SR=y
96CONFIG_CHR_DEV_SG=y 87CONFIG_CHR_DEV_SG=y
97CONFIG_SCSI_MULTI_LUN=y
98CONFIG_SCSI_ISCSI_ATTRS=m 88CONFIG_SCSI_ISCSI_ATTRS=m
99CONFIG_SCSI_SYM53C8XX_2=y 89CONFIG_SCSI_SYM53C8XX_2=y
100CONFIG_SCSI_QLOGIC_1280=m 90CONFIG_SCSI_QLOGIC_1280=m
@@ -106,43 +96,38 @@ CONFIG_MD_RAID0=y
106CONFIG_MD_RAID1=y 96CONFIG_MD_RAID1=y
107CONFIG_FUSION=y 97CONFIG_FUSION=y
108CONFIG_FUSION_SPI=m 98CONFIG_FUSION_SPI=m
109CONFIG_FUSION_FC=m
110CONFIG_FUSION_CTL=m 99CONFIG_FUSION_CTL=m
111CONFIG_NETDEVICES=y 100CONFIG_NETDEVICES=y
112CONFIG_DUMMY=m
113CONFIG_BONDING=m 101CONFIG_BONDING=m
102CONFIG_DUMMY=m
114CONFIG_TUN=m 103CONFIG_TUN=m
115CONFIG_NET_ETHERNET=y 104CONFIG_PCMCIA_3C574=m
116CONFIG_NET_VENDOR_3COM=y 105CONFIG_PCMCIA_3C589=m
117CONFIG_VORTEX=m 106CONFIG_VORTEX=m
118CONFIG_TYPHOON=m 107CONFIG_TYPHOON=m
108CONFIG_ACENIC=m
109CONFIG_ACENIC_OMIT_TIGON_I=y
110CONFIG_PCNET32=m
111CONFIG_TIGON3=m
119CONFIG_NET_TULIP=y 112CONFIG_NET_TULIP=y
120CONFIG_DE2104X=m 113CONFIG_DE2104X=m
121CONFIG_TULIP=y 114CONFIG_TULIP=y
122CONFIG_TULIP_MMIO=y 115CONFIG_TULIP_MMIO=y
123CONFIG_PCMCIA_XIRCOM=m 116CONFIG_PCMCIA_XIRCOM=m
124CONFIG_HP100=m 117CONFIG_HP100=m
125CONFIG_NET_PCI=y
126CONFIG_PCNET32=m
127CONFIG_E100=m 118CONFIG_E100=m
128CONFIG_ACENIC=m
129CONFIG_ACENIC_OMIT_TIGON_I=y
130CONFIG_E1000=m 119CONFIG_E1000=m
131CONFIG_TIGON3=m
132CONFIG_NET_PCMCIA=y
133CONFIG_PCMCIA_3C589=m
134CONFIG_PCMCIA_3C574=m
135CONFIG_PCMCIA_SMC91C92=m 120CONFIG_PCMCIA_SMC91C92=m
136CONFIG_PCMCIA_XIRC2PS=m 121CONFIG_PCMCIA_XIRC2PS=m
137CONFIG_PPP=m 122CONFIG_PPP=m
123CONFIG_PPP_BSDCOMP=m
124CONFIG_PPP_DEFLATE=m
138CONFIG_PPP_ASYNC=m 125CONFIG_PPP_ASYNC=m
139CONFIG_PPP_SYNC_TTY=m 126CONFIG_PPP_SYNC_TTY=m
140CONFIG_PPP_DEFLATE=m
141CONFIG_PPP_BSDCOMP=m
142# CONFIG_INPUT_MOUSEDEV is not set
143# CONFIG_INPUT_KEYBOARD is not set 127# CONFIG_INPUT_KEYBOARD is not set
144# CONFIG_INPUT_MOUSE is not set 128# CONFIG_INPUT_MOUSE is not set
145# CONFIG_SERIO is not set 129# CONFIG_SERIO is not set
130# CONFIG_LEGACY_PTYS is not set
146CONFIG_SERIAL_8250=y 131CONFIG_SERIAL_8250=y
147CONFIG_SERIAL_8250_CONSOLE=y 132CONFIG_SERIAL_8250_CONSOLE=y
148CONFIG_SERIAL_8250_CS=m 133CONFIG_SERIAL_8250_CS=m
@@ -151,7 +136,6 @@ CONFIG_SERIAL_8250_EXTENDED=y
151CONFIG_SERIAL_8250_MANY_PORTS=y 136CONFIG_SERIAL_8250_MANY_PORTS=y
152CONFIG_SERIAL_8250_SHARE_IRQ=y 137CONFIG_SERIAL_8250_SHARE_IRQ=y
153CONFIG_PDC_CONSOLE=y 138CONFIG_PDC_CONSOLE=y
154# CONFIG_LEGACY_PTYS is not set
155# CONFIG_HW_RANDOM is not set 139# CONFIG_HW_RANDOM is not set
156CONFIG_RAW_DRIVER=y 140CONFIG_RAW_DRIVER=y
157# CONFIG_HWMON is not set 141# CONFIG_HWMON is not set
@@ -160,7 +144,6 @@ CONFIG_AGP_PARISC=y
160# CONFIG_STI_CONSOLE is not set 144# CONFIG_STI_CONSOLE is not set
161CONFIG_EXT2_FS=y 145CONFIG_EXT2_FS=y
162CONFIG_EXT3_FS=y 146CONFIG_EXT3_FS=y
163# CONFIG_EXT3_FS_XATTR is not set
164CONFIG_JFS_FS=m 147CONFIG_JFS_FS=m
165CONFIG_XFS_FS=m 148CONFIG_XFS_FS=m
166CONFIG_AUTOFS4_FS=y 149CONFIG_AUTOFS4_FS=y
@@ -173,13 +156,9 @@ CONFIG_PROC_KCORE=y
173CONFIG_TMPFS=y 156CONFIG_TMPFS=y
174CONFIG_UFS_FS=m 157CONFIG_UFS_FS=m
175CONFIG_NFS_FS=m 158CONFIG_NFS_FS=m
176CONFIG_NFS_V3=y 159CONFIG_NFS_V4=m
177CONFIG_NFS_V4=y
178CONFIG_NFSD=m 160CONFIG_NFSD=m
179CONFIG_NFSD_V4=y 161CONFIG_NFSD_V4=y
180CONFIG_RPCSEC_GSS_SPKM3=m
181CONFIG_SMB_FS=m
182CONFIG_SMB_NLS_DEFAULT=y
183CONFIG_CIFS=m 162CONFIG_CIFS=m
184CONFIG_NLS_CODEPAGE_437=m 163CONFIG_NLS_CODEPAGE_437=m
185CONFIG_NLS_CODEPAGE_850=m 164CONFIG_NLS_CODEPAGE_850=m
@@ -187,17 +166,12 @@ CONFIG_NLS_ASCII=m
187CONFIG_NLS_ISO8859_1=m 166CONFIG_NLS_ISO8859_1=m
188CONFIG_NLS_ISO8859_15=m 167CONFIG_NLS_ISO8859_15=m
189CONFIG_NLS_UTF8=m 168CONFIG_NLS_UTF8=m
190CONFIG_MAGIC_SYSRQ=y
191CONFIG_DEBUG_FS=y 169CONFIG_DEBUG_FS=y
192CONFIG_HEADERS_CHECK=y 170CONFIG_HEADERS_CHECK=y
193CONFIG_DEBUG_KERNEL=y 171CONFIG_MAGIC_SYSRQ=y
194# CONFIG_DEBUG_BUGVERBOSE is not set 172# CONFIG_DEBUG_BUGVERBOSE is not set
195# CONFIG_RCU_CPU_STALL_DETECTOR is not set
196CONFIG_CRYPTO_NULL=m
197CONFIG_CRYPTO_TEST=m 173CONFIG_CRYPTO_TEST=m
198CONFIG_CRYPTO_HMAC=y 174CONFIG_CRYPTO_HMAC=y
199CONFIG_CRYPTO_MD5=y 175CONFIG_CRYPTO_MD5=y
200CONFIG_CRYPTO_BLOWFISH=m 176CONFIG_CRYPTO_BLOWFISH=m
201# CONFIG_CRYPTO_ANSI_CPRNG is not set
202# CONFIG_CRYPTO_HW is not set 177# CONFIG_CRYPTO_HW is not set
203CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index f1a0c25bef8d..83ffd161aec5 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
3CONFIG_IKCONFIG=y 3CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y 4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=16 5CONFIG_LOG_BUF_SHIFT=16
6CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_BLK_DEV_INITRD=y 6CONFIG_BLK_DEV_INITRD=y
8CONFIG_SLAB=y 7CONFIG_SLAB=y
9CONFIG_MODULES=y 8CONFIG_MODULES=y
@@ -25,8 +24,6 @@ CONFIG_INET=y
25CONFIG_IP_MULTICAST=y 24CONFIG_IP_MULTICAST=y
26CONFIG_IP_PNP=y 25CONFIG_IP_PNP=y
27CONFIG_IP_PNP_BOOTP=y 26CONFIG_IP_PNP_BOOTP=y
28# CONFIG_INET_LRO is not set
29CONFIG_IPV6=y
30CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
31CONFIG_DEVTMPFS=y 28CONFIG_DEVTMPFS=y
32CONFIG_DEVTMPFS_MOUNT=y 29CONFIG_DEVTMPFS_MOUNT=y
@@ -53,10 +50,9 @@ CONFIG_MD_LINEAR=y
53CONFIG_MD_RAID0=y 50CONFIG_MD_RAID0=y
54CONFIG_MD_RAID1=y 51CONFIG_MD_RAID1=y
55CONFIG_NETDEVICES=y 52CONFIG_NETDEVICES=y
56CONFIG_NET_ETHERNET=y
57CONFIG_LASI_82596=y
58CONFIG_NET_TULIP=y 53CONFIG_NET_TULIP=y
59CONFIG_TULIP=y 54CONFIG_TULIP=y
55CONFIG_LASI_82596=y
60CONFIG_PPP=y 56CONFIG_PPP=y
61CONFIG_INPUT_EVDEV=y 57CONFIG_INPUT_EVDEV=y
62# CONFIG_KEYBOARD_HIL_OLD is not set 58# CONFIG_KEYBOARD_HIL_OLD is not set
@@ -71,40 +67,31 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
71CONFIG_PRINTER=y 67CONFIG_PRINTER=y
72# CONFIG_HW_RANDOM is not set 68# CONFIG_HW_RANDOM is not set
73# CONFIG_HWMON is not set 69# CONFIG_HWMON is not set
74CONFIG_VIDEO_OUTPUT_CONTROL=m
75CONFIG_FB=y 70CONFIG_FB=y
76CONFIG_FRAMEBUFFER_CONSOLE=y 71CONFIG_FRAMEBUFFER_CONSOLE=y
77CONFIG_LOGO=y 72CONFIG_LOGO=y
78CONFIG_SOUND=y 73CONFIG_SOUND=y
79CONFIG_SND=y 74CONFIG_SND=y
80CONFIG_SND_SEQUENCER=y 75CONFIG_SND_SEQUENCER=y
81CONFIG_SND_MIXER_OSS=y
82CONFIG_SND_PCM_OSS=y
83CONFIG_SND_SEQUENCER_OSS=y
84CONFIG_SND_HARMONY=y 76CONFIG_SND_HARMONY=y
85CONFIG_EXT2_FS=y 77CONFIG_EXT2_FS=y
86CONFIG_EXT3_FS=y 78CONFIG_EXT3_FS=y
87# CONFIG_EXT3_FS_XATTR is not set
88CONFIG_AUTOFS4_FS=y 79CONFIG_AUTOFS4_FS=y
89CONFIG_ISO9660_FS=y 80CONFIG_ISO9660_FS=y
90CONFIG_JOLIET=y 81CONFIG_JOLIET=y
91CONFIG_PROC_KCORE=y 82CONFIG_PROC_KCORE=y
92CONFIG_TMPFS=y 83CONFIG_TMPFS=y
93CONFIG_NFS_FS=y 84CONFIG_NFS_FS=y
94CONFIG_NFS_V3=y
95CONFIG_ROOT_NFS=y 85CONFIG_ROOT_NFS=y
96CONFIG_NFSD=y 86CONFIG_NFSD=y
97CONFIG_NFSD_V3=y 87CONFIG_NFSD_V3=y
98CONFIG_SMB_FS=y
99CONFIG_NLS_CODEPAGE_437=m 88CONFIG_NLS_CODEPAGE_437=m
100CONFIG_NLS_CODEPAGE_850=m 89CONFIG_NLS_CODEPAGE_850=m
101CONFIG_NLS_ASCII=m 90CONFIG_NLS_ASCII=m
102CONFIG_NLS_ISO8859_1=m 91CONFIG_NLS_ISO8859_1=m
103CONFIG_NLS_ISO8859_15=m 92CONFIG_NLS_ISO8859_15=m
104CONFIG_NLS_UTF8=m 93CONFIG_NLS_UTF8=m
105CONFIG_MAGIC_SYSRQ=y
106CONFIG_HEADERS_CHECK=y 94CONFIG_HEADERS_CHECK=y
95CONFIG_MAGIC_SYSRQ=y
107CONFIG_DEBUG_KERNEL=y 96CONFIG_DEBUG_KERNEL=y
108# CONFIG_RCU_CPU_STALL_DETECTOR is not set
109CONFIG_SECURITY=y 97CONFIG_SECURITY=y
110# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 8e8f0e34f817..0764d3971cf6 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -1,12 +1,9 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_IKCONFIG=y 3CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y 4CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=16 5CONFIG_LOG_BUF_SHIFT=16
7CONFIG_SYSFS_DEPRECATED_V2=y
8CONFIG_BLK_DEV_INITRD=y 6CONFIG_BLK_DEV_INITRD=y
9# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
10CONFIG_EXPERT=y 7CONFIG_EXPERT=y
11CONFIG_KALLSYMS_ALL=y 8CONFIG_KALLSYMS_ALL=y
12CONFIG_SLAB=y 9CONFIG_SLAB=y
@@ -15,7 +12,6 @@ CONFIG_OPROFILE=m
15CONFIG_MODULES=y 12CONFIG_MODULES=y
16CONFIG_MODULE_UNLOAD=y 13CONFIG_MODULE_UNLOAD=y
17CONFIG_MODULE_FORCE_UNLOAD=y 14CONFIG_MODULE_FORCE_UNLOAD=y
18# CONFIG_BLK_DEV_BSG is not set
19CONFIG_PA8X00=y 15CONFIG_PA8X00=y
20CONFIG_PREEMPT_VOLUNTARY=y 16CONFIG_PREEMPT_VOLUNTARY=y
21# CONFIG_GSC is not set 17# CONFIG_GSC is not set
@@ -31,13 +27,11 @@ CONFIG_INET=y
31CONFIG_IP_MULTICAST=y 27CONFIG_IP_MULTICAST=y
32CONFIG_IP_PNP=y 28CONFIG_IP_PNP=y
33CONFIG_IP_PNP_BOOTP=y 29CONFIG_IP_PNP_BOOTP=y
34# CONFIG_INET_LRO is not set
35# CONFIG_INET_DIAG is not set 30# CONFIG_INET_DIAG is not set
36CONFIG_INET6_IPCOMP=m 31CONFIG_INET6_IPCOMP=m
37CONFIG_IPV6_TUNNEL=m 32CONFIG_IPV6_TUNNEL=m
38CONFIG_NETFILTER=y 33CONFIG_NETFILTER=y
39CONFIG_NETFILTER_DEBUG=y 34CONFIG_NETFILTER_DEBUG=y
40CONFIG_IP_NF_QUEUE=m
41CONFIG_NET_PKTGEN=m 35CONFIG_NET_PKTGEN=m
42CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 36CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
43CONFIG_DEVTMPFS=y 37CONFIG_DEVTMPFS=y
@@ -50,13 +44,11 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
50CONFIG_IDE=y 44CONFIG_IDE=y
51CONFIG_BLK_DEV_IDECD=y 45CONFIG_BLK_DEV_IDECD=y
52CONFIG_BLK_DEV_NS87415=y 46CONFIG_BLK_DEV_NS87415=y
53CONFIG_PATA_SIL680=m
54CONFIG_SCSI=y 47CONFIG_SCSI=y
55CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
56CONFIG_CHR_DEV_ST=y 49CONFIG_CHR_DEV_ST=y
57CONFIG_BLK_DEV_SR=y 50CONFIG_BLK_DEV_SR=y
58CONFIG_CHR_DEV_SG=y 51CONFIG_CHR_DEV_SG=y
59CONFIG_SCSI_MULTI_LUN=y
60CONFIG_SCSI_ISCSI_ATTRS=m 52CONFIG_SCSI_ISCSI_ATTRS=m
61CONFIG_SCSI_SYM53C8XX_2=y 53CONFIG_SCSI_SYM53C8XX_2=y
62CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 54CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
@@ -76,28 +68,23 @@ CONFIG_FUSION=y
76CONFIG_FUSION_SPI=m 68CONFIG_FUSION_SPI=m
77CONFIG_FUSION_CTL=m 69CONFIG_FUSION_CTL=m
78CONFIG_NETDEVICES=y 70CONFIG_NETDEVICES=y
79CONFIG_DUMMY=m
80CONFIG_BONDING=m 71CONFIG_BONDING=m
72CONFIG_DUMMY=m
81CONFIG_TUN=m 73CONFIG_TUN=m
82CONFIG_NET_ETHERNET=y 74CONFIG_ACENIC=m
75CONFIG_TIGON3=m
83CONFIG_NET_TULIP=y 76CONFIG_NET_TULIP=y
84CONFIG_DE2104X=m 77CONFIG_DE2104X=m
85CONFIG_TULIP=y 78CONFIG_TULIP=y
86CONFIG_TULIP_MMIO=y 79CONFIG_TULIP_MMIO=y
87CONFIG_NET_PCI=y
88CONFIG_E100=m 80CONFIG_E100=m
89CONFIG_ACENIC=m
90CONFIG_E1000=m 81CONFIG_E1000=m
91CONFIG_TIGON3=m
92CONFIG_PPP=m 82CONFIG_PPP=m
93CONFIG_PPP_ASYNC=m
94CONFIG_PPP_SYNC_TTY=m
95CONFIG_PPP_DEFLATE=m
96CONFIG_PPP_BSDCOMP=m 83CONFIG_PPP_BSDCOMP=m
84CONFIG_PPP_DEFLATE=m
97CONFIG_PPPOE=m 85CONFIG_PPPOE=m
98# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 86CONFIG_PPP_ASYNC=m
99CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600 87CONFIG_PPP_SYNC_TTY=m
100CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
101# CONFIG_KEYBOARD_ATKBD is not set 88# CONFIG_KEYBOARD_ATKBD is not set
102# CONFIG_MOUSE_PS2 is not set 89# CONFIG_MOUSE_PS2 is not set
103CONFIG_SERIO=m 90CONFIG_SERIO=m
@@ -111,7 +98,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
111# CONFIG_HW_RANDOM is not set 98# CONFIG_HW_RANDOM is not set
112CONFIG_RAW_DRIVER=y 99CONFIG_RAW_DRIVER=y
113# CONFIG_HWMON is not set 100# CONFIG_HWMON is not set
114CONFIG_VIDEO_OUTPUT_CONTROL=m
115CONFIG_FB=y 101CONFIG_FB=y
116CONFIG_FRAMEBUFFER_CONSOLE=y 102CONFIG_FRAMEBUFFER_CONSOLE=y
117CONFIG_LOGO=y 103CONFIG_LOGO=y
@@ -121,9 +107,6 @@ CONFIG_LOGO=y
121CONFIG_SOUND=y 107CONFIG_SOUND=y
122CONFIG_SND=y 108CONFIG_SND=y
123CONFIG_SND_SEQUENCER=y 109CONFIG_SND_SEQUENCER=y
124CONFIG_SND_MIXER_OSS=y
125CONFIG_SND_PCM_OSS=y
126CONFIG_SND_SEQUENCER_OSS=y
127CONFIG_SND_AD1889=y 110CONFIG_SND_AD1889=y
128CONFIG_USB_HIDDEV=y 111CONFIG_USB_HIDDEV=y
129CONFIG_USB=y 112CONFIG_USB=y
@@ -139,7 +122,6 @@ CONFIG_USB_MICROTEK=m
139CONFIG_USB_LEGOTOWER=m 122CONFIG_USB_LEGOTOWER=m
140CONFIG_EXT2_FS=y 123CONFIG_EXT2_FS=y
141CONFIG_EXT3_FS=y 124CONFIG_EXT3_FS=y
142# CONFIG_EXT3_FS_XATTR is not set
143CONFIG_XFS_FS=m 125CONFIG_XFS_FS=m
144CONFIG_AUTOFS4_FS=y 126CONFIG_AUTOFS4_FS=y
145CONFIG_ISO9660_FS=y 127CONFIG_ISO9660_FS=y
@@ -149,7 +131,6 @@ CONFIG_VFAT_FS=m
149CONFIG_PROC_KCORE=y 131CONFIG_PROC_KCORE=y
150CONFIG_TMPFS=y 132CONFIG_TMPFS=y
151CONFIG_NFS_FS=y 133CONFIG_NFS_FS=y
152CONFIG_NFS_V3=y
153CONFIG_ROOT_NFS=y 134CONFIG_ROOT_NFS=y
154CONFIG_NFSD=y 135CONFIG_NFSD=y
155CONFIG_NFSD_V3=y 136CONFIG_NFSD_V3=y
@@ -159,18 +140,13 @@ CONFIG_NLS_ASCII=m
159CONFIG_NLS_ISO8859_1=m 140CONFIG_NLS_ISO8859_1=m
160CONFIG_NLS_ISO8859_15=m 141CONFIG_NLS_ISO8859_15=m
161CONFIG_NLS_UTF8=m 142CONFIG_NLS_UTF8=m
162CONFIG_MAGIC_SYSRQ=y
163CONFIG_DEBUG_FS=y 143CONFIG_DEBUG_FS=y
164CONFIG_HEADERS_CHECK=y 144CONFIG_HEADERS_CHECK=y
165CONFIG_DEBUG_KERNEL=y 145CONFIG_MAGIC_SYSRQ=y
166CONFIG_DEBUG_MUTEXES=y 146CONFIG_DEBUG_MUTEXES=y
167# CONFIG_DEBUG_BUGVERBOSE is not set 147# CONFIG_DEBUG_BUGVERBOSE is not set
168# CONFIG_RCU_CPU_STALL_DETECTOR is not set
169CONFIG_CRYPTO_NULL=m
170CONFIG_CRYPTO_TEST=m 148CONFIG_CRYPTO_TEST=m
171CONFIG_CRYPTO_MD5=m 149CONFIG_CRYPTO_MD5=m
172CONFIG_CRYPTO_BLOWFISH=m 150CONFIG_CRYPTO_BLOWFISH=m
173CONFIG_CRYPTO_DES=m 151CONFIG_CRYPTO_DES=m
174# CONFIG_CRYPTO_ANSI_CPRNG is not set
175# CONFIG_CRYPTO_HW is not set 152# CONFIG_CRYPTO_HW is not set
176CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index f6a4c016304b..088ab948a5ca 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -1,16 +1,13 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y 4# CONFIG_CROSS_MEMORY_ATTACH is not set
5CONFIG_BSD_PROCESS_ACCT=y 5CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 6CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_IKCONFIG=y 7CONFIG_IKCONFIG=y
8CONFIG_IKCONFIG_PROC=y 8CONFIG_IKCONFIG_PROC=y
9CONFIG_RELAY=y 9CONFIG_RELAY=y
10CONFIG_BLK_DEV_INITRD=y 10CONFIG_BLK_DEV_INITRD=y
11CONFIG_RD_BZIP2=y
12CONFIG_RD_LZMA=y
13CONFIG_RD_LZO=y
14CONFIG_EXPERT=y 11CONFIG_EXPERT=y
15CONFIG_SYSCTL_SYSCALL=y 12CONFIG_SYSCTL_SYSCALL=y
16CONFIG_SLAB=y 13CONFIG_SLAB=y
@@ -23,7 +20,6 @@ CONFIG_PA8X00=y
23CONFIG_64BIT=y 20CONFIG_64BIT=y
24CONFIG_SMP=y 21CONFIG_SMP=y
25CONFIG_PREEMPT=y 22CONFIG_PREEMPT=y
26# CONFIG_CROSS_MEMORY_ATTACH is not set
27CONFIG_IOMMU_CCIO=y 23CONFIG_IOMMU_CCIO=y
28CONFIG_PCI=y 24CONFIG_PCI=y
29CONFIG_PCI_LBA=y 25CONFIG_PCI_LBA=y
@@ -146,7 +142,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y
146CONFIG_FB_MODE_HELPERS=y 142CONFIG_FB_MODE_HELPERS=y
147CONFIG_FB_TILEBLITTING=y 143CONFIG_FB_TILEBLITTING=y
148# CONFIG_FB_STI is not set 144# CONFIG_FB_STI is not set
149CONFIG_BACKLIGHT_LCD_SUPPORT=y
150# CONFIG_LCD_CLASS_DEVICE is not set 145# CONFIG_LCD_CLASS_DEVICE is not set
151# CONFIG_BACKLIGHT_GENERIC is not set 146# CONFIG_BACKLIGHT_GENERIC is not set
152CONFIG_FRAMEBUFFER_CONSOLE=y 147CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -157,12 +152,9 @@ CONFIG_LOGO=y
157# CONFIG_LOGO_LINUX_CLUT224 is not set 152# CONFIG_LOGO_LINUX_CLUT224 is not set
158CONFIG_SOUND=m 153CONFIG_SOUND=m
159CONFIG_SND=m 154CONFIG_SND=m
155CONFIG_SND_VERBOSE_PRINTK=y
160CONFIG_SND_SEQUENCER=m 156CONFIG_SND_SEQUENCER=m
161CONFIG_SND_SEQ_DUMMY=m 157CONFIG_SND_SEQ_DUMMY=m
162CONFIG_SND_MIXER_OSS=m
163CONFIG_SND_PCM_OSS=m
164CONFIG_SND_SEQUENCER_OSS=y
165CONFIG_SND_VERBOSE_PRINTK=y
166CONFIG_SND_AD1889=m 158CONFIG_SND_AD1889=m
167# CONFIG_SND_USB is not set 159# CONFIG_SND_USB is not set
168# CONFIG_SND_GSC is not set 160# CONFIG_SND_GSC is not set
@@ -174,8 +166,6 @@ CONFIG_EXT2_FS_XATTR=y
174CONFIG_EXT2_FS_POSIX_ACL=y 166CONFIG_EXT2_FS_POSIX_ACL=y
175CONFIG_EXT2_FS_SECURITY=y 167CONFIG_EXT2_FS_SECURITY=y
176CONFIG_EXT3_FS=y 168CONFIG_EXT3_FS=y
177# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
178CONFIG_EXT4_FS=m
179CONFIG_REISERFS_FS=m 169CONFIG_REISERFS_FS=m
180CONFIG_REISERFS_PROC_INFO=y 170CONFIG_REISERFS_PROC_INFO=y
181CONFIG_XFS_FS=m 171CONFIG_XFS_FS=m
@@ -238,11 +228,8 @@ CONFIG_DEBUG_SLAB=y
238CONFIG_DEBUG_SLAB_LEAK=y 228CONFIG_DEBUG_SLAB_LEAK=y
239CONFIG_DEBUG_MEMORY_INIT=y 229CONFIG_DEBUG_MEMORY_INIT=y
240CONFIG_DEBUG_STACKOVERFLOW=y 230CONFIG_DEBUG_STACKOVERFLOW=y
241CONFIG_LOCKUP_DETECTOR=y
242CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
243CONFIG_PANIC_ON_OOPS=y 231CONFIG_PANIC_ON_OOPS=y
244CONFIG_DEBUG_RT_MUTEXES=y 232CONFIG_DEBUG_RT_MUTEXES=y
245CONFIG_PROVE_RCU_DELAY=y
246CONFIG_DEBUG_BLOCK_EXT_DEVT=y 233CONFIG_DEBUG_BLOCK_EXT_DEVT=y
247CONFIG_LATENCYTOP=y 234CONFIG_LATENCYTOP=y
248CONFIG_KEYS=y 235CONFIG_KEYS=y
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
index 310b6657e4ac..52c9050a7c5c 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/default_defconfig
@@ -1,11 +1,9 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
5CONFIG_IKCONFIG=y 4CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y 5CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=16 6CONFIG_LOG_BUF_SHIFT=16
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
10CONFIG_KALLSYMS_ALL=y 8CONFIG_KALLSYMS_ALL=y
11CONFIG_SLAB=y 9CONFIG_SLAB=y
@@ -41,9 +39,7 @@ CONFIG_IP_PNP_DHCP=y
41CONFIG_IP_PNP_BOOTP=y 39CONFIG_IP_PNP_BOOTP=y
42CONFIG_INET_AH=m 40CONFIG_INET_AH=m
43CONFIG_INET_ESP=m 41CONFIG_INET_ESP=m
44# CONFIG_INET_LRO is not set
45CONFIG_INET_DIAG=m 42CONFIG_INET_DIAG=m
46CONFIG_IPV6=y
47CONFIG_INET6_AH=y 43CONFIG_INET6_AH=y
48CONFIG_INET6_ESP=y 44CONFIG_INET6_ESP=y
49CONFIG_INET6_IPCOMP=y 45CONFIG_INET6_IPCOMP=y
@@ -82,26 +78,23 @@ CONFIG_MD_RAID1=y
82CONFIG_MD_RAID10=y 78CONFIG_MD_RAID10=y
83CONFIG_BLK_DEV_DM=y 79CONFIG_BLK_DEV_DM=y
84CONFIG_NETDEVICES=y 80CONFIG_NETDEVICES=y
85CONFIG_DUMMY=m
86CONFIG_BONDING=m 81CONFIG_BONDING=m
82CONFIG_DUMMY=m
87CONFIG_TUN=m 83CONFIG_TUN=m
88CONFIG_NET_ETHERNET=y
89CONFIG_MII=m
90CONFIG_LASI_82596=y
91CONFIG_NET_TULIP=y
92CONFIG_TULIP=y
93CONFIG_NET_PCI=y
94CONFIG_ACENIC=y 84CONFIG_ACENIC=y
95CONFIG_TIGON3=y 85CONFIG_TIGON3=y
96CONFIG_NET_PCMCIA=y 86CONFIG_NET_TULIP=y
87CONFIG_TULIP=y
88CONFIG_LASI_82596=y
97CONFIG_PPP=m 89CONFIG_PPP=m
98CONFIG_PPP_ASYNC=m
99CONFIG_PPP_SYNC_TTY=m
100CONFIG_PPP_DEFLATE=m
101CONFIG_PPP_BSDCOMP=m 90CONFIG_PPP_BSDCOMP=m
91CONFIG_PPP_DEFLATE=m
102CONFIG_PPPOE=m 92CONFIG_PPPOE=m
93CONFIG_PPP_ASYNC=m
94CONFIG_PPP_SYNC_TTY=m
103# CONFIG_KEYBOARD_HIL_OLD is not set 95# CONFIG_KEYBOARD_HIL_OLD is not set
104CONFIG_MOUSE_SERIAL=y 96CONFIG_MOUSE_SERIAL=y
97CONFIG_LEGACY_PTY_COUNT=64
105CONFIG_SERIAL_8250=y 98CONFIG_SERIAL_8250=y
106CONFIG_SERIAL_8250_CONSOLE=y 99CONFIG_SERIAL_8250_CONSOLE=y
107CONFIG_SERIAL_8250_CS=y 100CONFIG_SERIAL_8250_CS=y
@@ -109,31 +102,24 @@ CONFIG_SERIAL_8250_NR_UARTS=17
109CONFIG_SERIAL_8250_EXTENDED=y 102CONFIG_SERIAL_8250_EXTENDED=y
110CONFIG_SERIAL_8250_MANY_PORTS=y 103CONFIG_SERIAL_8250_MANY_PORTS=y
111CONFIG_SERIAL_8250_SHARE_IRQ=y 104CONFIG_SERIAL_8250_SHARE_IRQ=y
112CONFIG_LEGACY_PTY_COUNT=64
113CONFIG_PRINTER=m 105CONFIG_PRINTER=m
114CONFIG_PPDEV=m 106CONFIG_PPDEV=m
115# CONFIG_HW_RANDOM is not set 107# CONFIG_HW_RANDOM is not set
116# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
117CONFIG_VIDEO_OUTPUT_CONTROL=m
118CONFIG_FB=y 109CONFIG_FB=y
119CONFIG_FB_MODE_HELPERS=y 110CONFIG_FB_MODE_HELPERS=y
120CONFIG_FB_TILEBLITTING=y 111CONFIG_FB_TILEBLITTING=y
121CONFIG_DUMMY_CONSOLE_COLUMNS=128 112CONFIG_DUMMY_CONSOLE_COLUMNS=128
122CONFIG_DUMMY_CONSOLE_ROWS=48 113CONFIG_DUMMY_CONSOLE_ROWS=48
123CONFIG_FRAMEBUFFER_CONSOLE=y 114CONFIG_FRAMEBUFFER_CONSOLE=y
124CONFIG_FONTS=y
125CONFIG_FONT_8x16=y
126CONFIG_LOGO=y 115CONFIG_LOGO=y
127# CONFIG_LOGO_LINUX_MONO is not set 116# CONFIG_LOGO_LINUX_MONO is not set
128# CONFIG_LOGO_LINUX_VGA16 is not set 117# CONFIG_LOGO_LINUX_VGA16 is not set
129# CONFIG_LOGO_LINUX_CLUT224 is not set 118# CONFIG_LOGO_LINUX_CLUT224 is not set
130CONFIG_SOUND=y 119CONFIG_SOUND=y
131CONFIG_SND=y 120CONFIG_SND=y
132CONFIG_SND_SEQUENCER=y
133CONFIG_SND_MIXER_OSS=y
134CONFIG_SND_PCM_OSS=y
135CONFIG_SND_SEQUENCER_OSS=y
136CONFIG_SND_DYNAMIC_MINORS=y 121CONFIG_SND_DYNAMIC_MINORS=y
122CONFIG_SND_SEQUENCER=y
137CONFIG_SND_AD1889=y 123CONFIG_SND_AD1889=y
138CONFIG_SND_HARMONY=y 124CONFIG_SND_HARMONY=y
139CONFIG_HID_GYRATION=y 125CONFIG_HID_GYRATION=y
@@ -141,7 +127,6 @@ CONFIG_HID_NTRIG=y
141CONFIG_HID_PANTHERLORD=y 127CONFIG_HID_PANTHERLORD=y
142CONFIG_HID_PETALYNX=y 128CONFIG_HID_PETALYNX=y
143CONFIG_HID_SAMSUNG=y 129CONFIG_HID_SAMSUNG=y
144CONFIG_HID_SONY=y
145CONFIG_HID_SUNPLUS=y 130CONFIG_HID_SUNPLUS=y
146CONFIG_HID_TOPSEED=y 131CONFIG_HID_TOPSEED=y
147CONFIG_USB=y 132CONFIG_USB=y
@@ -150,21 +135,15 @@ CONFIG_USB_OHCI_HCD=y
150CONFIG_USB_UHCI_HCD=y 135CONFIG_USB_UHCI_HCD=y
151CONFIG_EXT2_FS=y 136CONFIG_EXT2_FS=y
152CONFIG_EXT3_FS=y 137CONFIG_EXT3_FS=y
153# CONFIG_EXT3_FS_XATTR is not set
154CONFIG_AUTOFS_FS=y
155CONFIG_ISO9660_FS=y 138CONFIG_ISO9660_FS=y
156CONFIG_JOLIET=y 139CONFIG_JOLIET=y
157CONFIG_VFAT_FS=y 140CONFIG_VFAT_FS=y
158CONFIG_PROC_KCORE=y 141CONFIG_PROC_KCORE=y
159CONFIG_TMPFS=y 142CONFIG_TMPFS=y
160CONFIG_NFS_FS=y 143CONFIG_NFS_FS=y
161CONFIG_NFS_V3=y
162CONFIG_ROOT_NFS=y 144CONFIG_ROOT_NFS=y
163CONFIG_NFSD=y 145CONFIG_NFSD=y
164CONFIG_NFSD_V4=y 146CONFIG_NFSD_V4=y
165CONFIG_RPCSEC_GSS_SPKM3=m
166CONFIG_SMB_FS=m
167CONFIG_SMB_NLS_DEFAULT=y
168CONFIG_CIFS=m 147CONFIG_CIFS=m
169CONFIG_NLS_CODEPAGE_437=y 148CONFIG_NLS_CODEPAGE_437=y
170CONFIG_NLS_CODEPAGE_737=m 149CONFIG_NLS_CODEPAGE_737=m
@@ -204,30 +183,24 @@ CONFIG_NLS_ISO8859_15=m
204CONFIG_NLS_KOI8_R=m 183CONFIG_NLS_KOI8_R=m
205CONFIG_NLS_KOI8_U=m 184CONFIG_NLS_KOI8_U=m
206CONFIG_NLS_UTF8=y 185CONFIG_NLS_UTF8=y
207CONFIG_MAGIC_SYSRQ=y
208CONFIG_DEBUG_FS=y 186CONFIG_DEBUG_FS=y
209CONFIG_HEADERS_CHECK=y 187CONFIG_HEADERS_CHECK=y
188CONFIG_MAGIC_SYSRQ=y
210CONFIG_DEBUG_KERNEL=y 189CONFIG_DEBUG_KERNEL=y
211CONFIG_DEBUG_MUTEXES=y 190CONFIG_DEBUG_MUTEXES=y
212# CONFIG_RCU_CPU_STALL_DETECTOR is not set
213CONFIG_KEYS=y 191CONFIG_KEYS=y
214CONFIG_CRYPTO_NULL=m
215CONFIG_CRYPTO_TEST=m 192CONFIG_CRYPTO_TEST=m
216CONFIG_CRYPTO_MD4=m
217CONFIG_CRYPTO_MICHAEL_MIC=m 193CONFIG_CRYPTO_MICHAEL_MIC=m
218CONFIG_CRYPTO_SHA256=m
219CONFIG_CRYPTO_SHA512=m 194CONFIG_CRYPTO_SHA512=m
220CONFIG_CRYPTO_TGR192=m 195CONFIG_CRYPTO_TGR192=m
221CONFIG_CRYPTO_WP512=m 196CONFIG_CRYPTO_WP512=m
222CONFIG_CRYPTO_AES=m
223CONFIG_CRYPTO_ANUBIS=m 197CONFIG_CRYPTO_ANUBIS=m
224CONFIG_CRYPTO_ARC4=m
225CONFIG_CRYPTO_BLOWFISH=m 198CONFIG_CRYPTO_BLOWFISH=m
226CONFIG_CRYPTO_CAST6=m 199CONFIG_CRYPTO_CAST6=m
227CONFIG_CRYPTO_KHAZAD=m 200CONFIG_CRYPTO_KHAZAD=m
228CONFIG_CRYPTO_SERPENT=m 201CONFIG_CRYPTO_SERPENT=m
229CONFIG_CRYPTO_TEA=m 202CONFIG_CRYPTO_TEA=m
230CONFIG_CRYPTO_TWOFISH=m 203CONFIG_CRYPTO_TWOFISH=m
231# CONFIG_CRYPTO_ANSI_CPRNG is not set
232# CONFIG_CRYPTO_HW is not set 204# CONFIG_CRYPTO_HW is not set
233CONFIG_LIBCRC32C=m 205CONFIG_LIBCRC32C=m
206CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 8688ba7f5966..37ae4b57c001 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -2,15 +2,11 @@ CONFIG_LOCALVERSION="-32bit"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 4CONFIG_POSIX_MQUEUE=y
5CONFIG_FHANDLE=y
6CONFIG_BSD_PROCESS_ACCT=y 5CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_IKCONFIG=y 6CONFIG_IKCONFIG=y
8CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
9CONFIG_LOG_BUF_SHIFT=16 8CONFIG_LOG_BUF_SHIFT=16
10CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
11CONFIG_RD_BZIP2=y
12CONFIG_RD_LZMA=y
13CONFIG_RD_LZO=y
14CONFIG_EXPERT=y 10CONFIG_EXPERT=y
15CONFIG_SYSCTL_SYSCALL=y 11CONFIG_SYSCTL_SYSCALL=y
16CONFIG_PERF_EVENTS=y 12CONFIG_PERF_EVENTS=y
@@ -49,7 +45,6 @@ CONFIG_INET_ESP=m
49# CONFIG_INET_XFRM_MODE_TRANSPORT is not set 45# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
50# CONFIG_INET_XFRM_MODE_TUNNEL is not set 46# CONFIG_INET_XFRM_MODE_TUNNEL is not set
51# CONFIG_INET_XFRM_MODE_BEET is not set 47# CONFIG_INET_XFRM_MODE_BEET is not set
52# CONFIG_INET_LRO is not set
53CONFIG_INET_DIAG=m 48CONFIG_INET_DIAG=m
54CONFIG_LLC2=m 49CONFIG_LLC2=m
55# CONFIG_WIRELESS is not set 50# CONFIG_WIRELESS is not set
@@ -149,10 +144,8 @@ CONFIG_PRINTER=m
149CONFIG_PPDEV=m 144CONFIG_PPDEV=m
150# CONFIG_HW_RANDOM is not set 145# CONFIG_HW_RANDOM is not set
151CONFIG_I2C=y 146CONFIG_I2C=y
152CONFIG_POWER_SUPPLY=y
153# CONFIG_HWMON is not set 147# CONFIG_HWMON is not set
154CONFIG_AGP=y 148CONFIG_AGP=y
155CONFIG_VIDEO_OUTPUT_CONTROL=y
156CONFIG_FB=y 149CONFIG_FB=y
157CONFIG_FB_FOREIGN_ENDIAN=y 150CONFIG_FB_FOREIGN_ENDIAN=y
158CONFIG_FB_MODE_HELPERS=y 151CONFIG_FB_MODE_HELPERS=y
@@ -169,11 +162,8 @@ CONFIG_LOGO=y
169# CONFIG_LOGO_LINUX_CLUT224 is not set 162# CONFIG_LOGO_LINUX_CLUT224 is not set
170CONFIG_SOUND=m 163CONFIG_SOUND=m
171CONFIG_SND=m 164CONFIG_SND=m
172CONFIG_SND_SEQUENCER=m
173CONFIG_SND_MIXER_OSS=m
174CONFIG_SND_PCM_OSS=m
175CONFIG_SND_SEQUENCER_OSS=y
176CONFIG_SND_DYNAMIC_MINORS=y 165CONFIG_SND_DYNAMIC_MINORS=y
166CONFIG_SND_SEQUENCER=m
177CONFIG_SND_AD1889=m 167CONFIG_SND_AD1889=m
178CONFIG_SND_HARMONY=m 168CONFIG_SND_HARMONY=m
179CONFIG_HIDRAW=y 169CONFIG_HIDRAW=y
@@ -223,12 +213,7 @@ CONFIG_EXT2_FS=y
223CONFIG_EXT2_FS_XATTR=y 213CONFIG_EXT2_FS_XATTR=y
224CONFIG_EXT2_FS_SECURITY=y 214CONFIG_EXT2_FS_SECURITY=y
225CONFIG_EXT3_FS=y 215CONFIG_EXT3_FS=y
226# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
227CONFIG_EXT3_FS_SECURITY=y 216CONFIG_EXT3_FS_SECURITY=y
228CONFIG_EXT4_FS=y
229CONFIG_XFS_FS=m
230CONFIG_XFS_QUOTA=y
231CONFIG_XFS_RT=y
232CONFIG_QUOTA=y 217CONFIG_QUOTA=y
233CONFIG_QUOTA_NETLINK_INTERFACE=y 218CONFIG_QUOTA_NETLINK_INTERFACE=y
234CONFIG_QFMT_V2=y 219CONFIG_QFMT_V2=y
@@ -293,15 +278,12 @@ CONFIG_DEBUG_MEMORY_INIT=y
293CONFIG_DEBUG_STACKOVERFLOW=y 278CONFIG_DEBUG_STACKOVERFLOW=y
294CONFIG_DEBUG_SHIRQ=y 279CONFIG_DEBUG_SHIRQ=y
295CONFIG_DETECT_HUNG_TASK=y 280CONFIG_DETECT_HUNG_TASK=y
296CONFIG_TIMER_STATS=y
297CONFIG_DEBUG_RT_MUTEXES=y 281CONFIG_DEBUG_RT_MUTEXES=y
298CONFIG_DEBUG_SPINLOCK=y 282CONFIG_DEBUG_SPINLOCK=y
299CONFIG_DEBUG_MUTEXES=y 283CONFIG_DEBUG_MUTEXES=y
300CONFIG_RCU_CPU_STALL_INFO=y
301CONFIG_LATENCYTOP=y 284CONFIG_LATENCYTOP=y
302CONFIG_LKDTM=m 285CONFIG_LKDTM=m
303CONFIG_KEYS=y 286CONFIG_KEYS=y
304CONFIG_CRYPTO_NULL=m
305CONFIG_CRYPTO_TEST=m 287CONFIG_CRYPTO_TEST=m
306CONFIG_CRYPTO_HMAC=y 288CONFIG_CRYPTO_HMAC=y
307CONFIG_CRYPTO_MD5=y 289CONFIG_CRYPTO_MD5=y
@@ -320,7 +302,6 @@ CONFIG_CRYPTO_SERPENT=m
320CONFIG_CRYPTO_TEA=m 302CONFIG_CRYPTO_TEA=m
321CONFIG_CRYPTO_TWOFISH=m 303CONFIG_CRYPTO_TWOFISH=m
322CONFIG_CRYPTO_DEFLATE=y 304CONFIG_CRYPTO_DEFLATE=y
323# CONFIG_CRYPTO_ANSI_CPRNG is not set
324CONFIG_CRC_CCITT=m 305CONFIG_CRC_CCITT=m
325CONFIG_CRC_T10DIF=y 306CONFIG_CRC_T10DIF=y
326CONFIG_FONTS=y 307CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index c564e6e1fa23..d39e7f821aba 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -8,10 +8,11 @@ CONFIG_TASKSTATS=y
8CONFIG_TASK_DELAY_ACCT=y 8CONFIG_TASK_DELAY_ACCT=y
9CONFIG_TASK_XACCT=y 9CONFIG_TASK_XACCT=y
10CONFIG_TASK_IO_ACCOUNTING=y 10CONFIG_TASK_IO_ACCOUNTING=y
11# CONFIG_UTS_NS is not set 11CONFIG_CGROUPS=y
12# CONFIG_IPC_NS is not set 12CONFIG_MEMCG=y
13# CONFIG_PID_NS is not set 13CONFIG_MEMCG_SWAP=y
14# CONFIG_NET_NS is not set 14CONFIG_CGROUP_PIDS=y
15CONFIG_CPUSETS=y
15CONFIG_RELAY=y 16CONFIG_RELAY=y
16CONFIG_BLK_DEV_INITRD=y 17CONFIG_BLK_DEV_INITRD=y
17CONFIG_CC_OPTIMIZE_FOR_SIZE=y 18CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,7 +53,6 @@ CONFIG_INET_ESP=m
52CONFIG_INET_XFRM_MODE_TRANSPORT=m 53CONFIG_INET_XFRM_MODE_TRANSPORT=m
53CONFIG_INET_XFRM_MODE_TUNNEL=m 54CONFIG_INET_XFRM_MODE_TUNNEL=m
54CONFIG_INET_XFRM_MODE_BEET=m 55CONFIG_INET_XFRM_MODE_BEET=m
55CONFIG_INET_LRO=m
56CONFIG_INET_DIAG=m 56CONFIG_INET_DIAG=m
57CONFIG_NETFILTER=y 57CONFIG_NETFILTER=y
58# CONFIG_NETFILTER_ADVANCED is not set 58# CONFIG_NETFILTER_ADVANCED is not set
@@ -84,7 +84,6 @@ CONFIG_PATA_SIL680=y
84CONFIG_ATA_GENERIC=y 84CONFIG_ATA_GENERIC=y
85CONFIG_MD=y 85CONFIG_MD=y
86CONFIG_MD_LINEAR=m 86CONFIG_MD_LINEAR=m
87CONFIG_MD_RAID0=m
88CONFIG_BLK_DEV_DM=m 87CONFIG_BLK_DEV_DM=m
89CONFIG_DM_RAID=m 88CONFIG_DM_RAID=m
90CONFIG_DM_UEVENT=y 89CONFIG_DM_UEVENT=y
@@ -138,21 +137,21 @@ CONFIG_QLGE=m
138# CONFIG_NET_VENDOR_TI is not set 137# CONFIG_NET_VENDOR_TI is not set
139# CONFIG_NET_VENDOR_VIA is not set 138# CONFIG_NET_VENDOR_VIA is not set
140# CONFIG_NET_VENDOR_WIZNET is not set 139# CONFIG_NET_VENDOR_WIZNET is not set
140CONFIG_MDIO_BITBANG=m
141CONFIG_PHYLIB=y 141CONFIG_PHYLIB=y
142CONFIG_MARVELL_PHY=m
143CONFIG_DAVICOM_PHY=m
144CONFIG_QSEMI_PHY=m
145CONFIG_LXT_PHY=m
146CONFIG_CICADA_PHY=m
147CONFIG_VITESSE_PHY=m
148CONFIG_SMSC_PHY=m
149CONFIG_BROADCOM_PHY=m 142CONFIG_BROADCOM_PHY=m
143CONFIG_CICADA_PHY=m
144CONFIG_DAVICOM_PHY=m
150CONFIG_ICPLUS_PHY=m 145CONFIG_ICPLUS_PHY=m
151CONFIG_REALTEK_PHY=m 146CONFIG_LSI_ET1011C_PHY=m
147CONFIG_LXT_PHY=m
148CONFIG_MARVELL_PHY=m
152CONFIG_NATIONAL_PHY=m 149CONFIG_NATIONAL_PHY=m
150CONFIG_QSEMI_PHY=m
151CONFIG_REALTEK_PHY=m
152CONFIG_SMSC_PHY=m
153CONFIG_STE10XP=m 153CONFIG_STE10XP=m
154CONFIG_LSI_ET1011C_PHY=m 154CONFIG_VITESSE_PHY=m
155CONFIG_MDIO_BITBANG=m
156CONFIG_SLIP=m 155CONFIG_SLIP=m
157CONFIG_SLIP_COMPRESSED=y 156CONFIG_SLIP_COMPRESSED=y
158CONFIG_SLIP_SMART=y 157CONFIG_SLIP_SMART=y
@@ -166,10 +165,8 @@ CONFIG_INPUT_MISC=y
166CONFIG_SERIO_SERPORT=m 165CONFIG_SERIO_SERPORT=m
167# CONFIG_HP_SDC is not set 166# CONFIG_HP_SDC is not set
168CONFIG_SERIO_RAW=m 167CONFIG_SERIO_RAW=m
169CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
170# CONFIG_LEGACY_PTYS is not set 168# CONFIG_LEGACY_PTYS is not set
171CONFIG_NOZOMI=m 169CONFIG_NOZOMI=m
172# CONFIG_DEVKMEM is not set
173CONFIG_SERIAL_8250=y 170CONFIG_SERIAL_8250=y
174# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set 171# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
175CONFIG_SERIAL_8250_CONSOLE=y 172CONFIG_SERIAL_8250_CONSOLE=y
@@ -207,10 +204,8 @@ CONFIG_AGP=y
207CONFIG_AGP_PARISC=y 204CONFIG_AGP_PARISC=y
208CONFIG_DRM=y 205CONFIG_DRM=y
209CONFIG_DRM_RADEON=y 206CONFIG_DRM_RADEON=y
210CONFIG_DRM_RADEON_UMS=y
211CONFIG_FIRMWARE_EDID=y 207CONFIG_FIRMWARE_EDID=y
212CONFIG_FB_MODE_HELPERS=y 208CONFIG_FB_MODE_HELPERS=y
213CONFIG_BACKLIGHT_LCD_SUPPORT=y
214# CONFIG_BACKLIGHT_GENERIC is not set 209# CONFIG_BACKLIGHT_GENERIC is not set
215CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 210CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
216CONFIG_LOGO=y 211CONFIG_LOGO=y
@@ -246,8 +241,6 @@ CONFIG_EXT2_FS_XATTR=y
246CONFIG_EXT2_FS_SECURITY=y 241CONFIG_EXT2_FS_SECURITY=y
247CONFIG_EXT3_FS=y 242CONFIG_EXT3_FS=y
248CONFIG_EXT3_FS_SECURITY=y 243CONFIG_EXT3_FS_SECURITY=y
249CONFIG_EXT4_FS=y
250CONFIG_EXT4_FS_SECURITY=y
251CONFIG_XFS_FS=m 244CONFIG_XFS_FS=m
252CONFIG_BTRFS_FS=m 245CONFIG_BTRFS_FS=m
253CONFIG_QUOTA=y 246CONFIG_QUOTA=y
@@ -286,27 +279,16 @@ CONFIG_DEBUG_FS=y
286CONFIG_MAGIC_SYSRQ=y 279CONFIG_MAGIC_SYSRQ=y
287CONFIG_DEBUG_KERNEL=y 280CONFIG_DEBUG_KERNEL=y
288CONFIG_DEBUG_STACKOVERFLOW=y 281CONFIG_DEBUG_STACKOVERFLOW=y
289CONFIG_LOCKUP_DETECTOR=y
290CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
291CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
292# CONFIG_SCHED_DEBUG is not set 282# CONFIG_SCHED_DEBUG is not set
293CONFIG_TIMER_STATS=y
294CONFIG_CRYPTO_MANAGER=y 283CONFIG_CRYPTO_MANAGER=y
295CONFIG_CRYPTO_ECB=m 284CONFIG_CRYPTO_ECB=m
296CONFIG_CRYPTO_PCBC=m 285CONFIG_CRYPTO_PCBC=m
297CONFIG_CRYPTO_MD4=m 286CONFIG_CRYPTO_MD4=m
298CONFIG_CRYPTO_MD5=y 287CONFIG_CRYPTO_MD5=y
299CONFIG_CRYPTO_MICHAEL_MIC=m 288CONFIG_CRYPTO_MICHAEL_MIC=m
300CONFIG_CRYPTO_SHA256=m
301CONFIG_CRYPTO_ARC4=m 289CONFIG_CRYPTO_ARC4=m
302CONFIG_CRYPTO_FCRYPT=m 290CONFIG_CRYPTO_FCRYPT=m
303CONFIG_CRYPTO_DEFLATE=m 291CONFIG_CRYPTO_DEFLATE=m
304# CONFIG_CRYPTO_HW is not set 292# CONFIG_CRYPTO_HW is not set
305CONFIG_CRC_CCITT=m 293CONFIG_CRC_CCITT=m
306CONFIG_LIBCRC32C=y 294CONFIG_LIBCRC32C=y
307CONFIG_XZ_DEC_X86=y
308CONFIG_XZ_DEC_POWERPC=y
309CONFIG_XZ_DEC_IA64=y
310CONFIG_XZ_DEC_ARM=y
311CONFIG_XZ_DEC_ARMTHUMB=y
312CONFIG_XZ_DEC_SPARC=y
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index 32e105fb8adb..e3c0586260d8 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -150,7 +150,7 @@
150#define PDC_PAT_MEM_SETGM 9L /* Set Good Memory value */ 150#define PDC_PAT_MEM_SETGM 9L /* Set Good Memory value */
151#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */ 151#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
152#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */ 152#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */
153 /* Memory Address */ 153 /* Memory Address */
154#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */ 154#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
155#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */ 155#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
156#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */ 156#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
@@ -228,6 +228,17 @@ struct pdc_pat_mem_read_pd_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_READ */
228 unsigned long pdt_entries; 228 unsigned long pdt_entries;
229}; 229};
230 230
231struct pdc_pat_mem_phys_mem_location { /* PDC_PAT_MEM/PDC_PAT_MEM_ADDRESS */
232 u64 cabinet:8;
233 u64 ign1:8;
234 u64 ign2:8;
235 u64 cell_slot:8;
236 u64 ign3:8;
237 u64 dimm_slot:8; /* DIMM slot, e.g. 0x1A, 0x2B, show user hex value! */
238 u64 ign4:8;
239 u64 source:4; /* for mem: always 0x07 */
240 u64 source_detail:4; /* for mem: always 0x04 (SIMM or DIMM) */
241};
231 242
232struct pdc_pat_pd_addr_map_entry { 243struct pdc_pat_pd_addr_map_entry {
233 unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */ 244 unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
@@ -319,6 +330,9 @@ extern int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
319extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret, 330extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
320 unsigned long *pdt_entries_ptr, unsigned long count, 331 unsigned long *pdt_entries_ptr, unsigned long count,
321 unsigned long offset); 332 unsigned long offset);
333extern int pdc_pat_mem_get_dimm_phys_location(
334 struct pdc_pat_mem_phys_mem_location *pret,
335 unsigned long phys_addr);
322 336
323#endif /* __ASSEMBLY__ */ 337#endif /* __ASSEMBLY__ */
324 338
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c32a09095216..85a92db70afc 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
453 before it can be accessed through the kernel mapping. */ 453 before it can be accessed through the kernel mapping. */
454 preempt_disable(); 454 preempt_disable();
455 flush_dcache_page_asm(__pa(vfrom), vaddr); 455 flush_dcache_page_asm(__pa(vfrom), vaddr);
456 preempt_enable();
457 copy_page_asm(vto, vfrom); 456 copy_page_asm(vto, vfrom);
457 preempt_enable();
458} 458}
459EXPORT_SYMBOL(copy_user_page); 459EXPORT_SYMBOL(copy_user_page);
460 460
@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm)
539 struct vm_area_struct *vma; 539 struct vm_area_struct *vma;
540 pgd_t *pgd; 540 pgd_t *pgd;
541 541
542 /* Flush the TLB to avoid speculation if coherency is required. */
543 if (parisc_requires_coherency())
544 flush_tlb_all();
545
542 /* Flushing the whole cache on each cpu takes forever on 546 /* Flushing the whole cache on each cpu takes forever on
543 rp3440, etc. So, avoid it if the mm isn't too big. */ 547 rp3440, etc. So, avoid it if the mm isn't too big. */
544 if (mm_total_size(mm) >= parisc_cache_flush_threshold) { 548 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
@@ -577,33 +581,22 @@ void flush_cache_mm(struct mm_struct *mm)
577void flush_cache_range(struct vm_area_struct *vma, 581void flush_cache_range(struct vm_area_struct *vma,
578 unsigned long start, unsigned long end) 582 unsigned long start, unsigned long end)
579{ 583{
580 unsigned long addr;
581 pgd_t *pgd;
582
583 BUG_ON(!vma->vm_mm->context); 584 BUG_ON(!vma->vm_mm->context);
584 585
586 /* Flush the TLB to avoid speculation if coherency is required. */
587 if (parisc_requires_coherency())
588 flush_tlb_range(vma, start, end);
589
585 if ((end - start) >= parisc_cache_flush_threshold) { 590 if ((end - start) >= parisc_cache_flush_threshold) {
586 flush_cache_all(); 591 flush_cache_all();
587 return; 592 return;
588 } 593 }
589 594
590 if (vma->vm_mm->context == mfsp(3)) { 595 BUG_ON(vma->vm_mm->context != mfsp(3));
591 flush_user_dcache_range_asm(start, end);
592 if (vma->vm_flags & VM_EXEC)
593 flush_user_icache_range_asm(start, end);
594 return;
595 }
596 596
597 pgd = vma->vm_mm->pgd; 597 flush_user_dcache_range_asm(start, end);
598 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 598 if (vma->vm_flags & VM_EXEC)
599 unsigned long pfn; 599 flush_user_icache_range_asm(start, end);
600 pte_t *ptep = get_ptep(pgd, addr);
601 if (!ptep)
602 continue;
603 pfn = pte_pfn(*ptep);
604 if (pfn_valid(pfn))
605 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
606 }
607} 600}
608 601
609void 602void
@@ -612,7 +605,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
612 BUG_ON(!vma->vm_mm->context); 605 BUG_ON(!vma->vm_mm->context);
613 606
614 if (pfn_valid(pfn)) { 607 if (pfn_valid(pfn)) {
615 flush_tlb_page(vma, vmaddr); 608 if (parisc_requires_coherency())
609 flush_tlb_page(vma, vmaddr);
616 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 610 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
617 } 611 }
618} 612}
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 98190252c12f..f622a311d04a 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -1481,12 +1481,44 @@ int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
1481 unsigned long offset) 1481 unsigned long offset)
1482{ 1482{
1483 int retval; 1483 int retval;
1484 unsigned long flags; 1484 unsigned long flags, entries;
1485 1485
1486 spin_lock_irqsave(&pdc_lock, flags); 1486 spin_lock_irqsave(&pdc_lock, flags);
1487 retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ, 1487 retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ,
1488 __pa(&pret), __pa(pdt_entries_ptr), 1488 __pa(&pdc_result), __pa(pdt_entries_ptr),
1489 count, offset); 1489 count, offset);
1490
1491 if (retval == PDC_OK) {
1492 entries = min(pdc_result[0], count);
1493 pret->actual_count_bytes = entries;
1494 pret->pdt_entries = entries / sizeof(unsigned long);
1495 }
1496
1497 spin_unlock_irqrestore(&pdc_lock, flags);
1498
1499 return retval;
1500}
1501
1502/**
1503 * pdc_pat_mem_get_dimm_phys_location - Get physical DIMM slot via PAT firmware
1504 * @pret: ptr to hold returned information
1505 * @phys_addr: physical address to examine
1506 *
1507 */
1508int pdc_pat_mem_get_dimm_phys_location(
1509 struct pdc_pat_mem_phys_mem_location *pret,
1510 unsigned long phys_addr)
1511{
1512 int retval;
1513 unsigned long flags;
1514
1515 spin_lock_irqsave(&pdc_lock, flags);
1516 retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_ADDRESS,
1517 __pa(&pdc_result), phys_addr);
1518
1519 if (retval == PDC_OK)
1520 memcpy(pret, &pdc_result, sizeof(*pret));
1521
1490 spin_unlock_irqrestore(&pdc_lock, flags); 1522 spin_unlock_irqrestore(&pdc_lock, flags);
1491 1523
1492 return retval; 1524 return retval;
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index ba5e1c7b1f17..5404e4086cb9 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -413,6 +413,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
413 if (regs->sr[7]) 413 if (regs->sr[7])
414 return; 414 return;
415 415
416 /* exit if already in panic */
417 if (sysctl_panic_on_stackoverflow < 0)
418 return;
419
416 /* calculate kernel stack usage */ 420 /* calculate kernel stack usage */
417 stack_usage = sp - stack_start; 421 stack_usage = sp - stack_start;
418#ifdef CONFIG_IRQSTACKS 422#ifdef CONFIG_IRQSTACKS
@@ -454,8 +458,10 @@ check_kernel_stack:
454#ifdef CONFIG_IRQSTACKS 458#ifdef CONFIG_IRQSTACKS
455panic_check: 459panic_check:
456#endif 460#endif
457 if (sysctl_panic_on_stackoverflow) 461 if (sysctl_panic_on_stackoverflow) {
462 sysctl_panic_on_stackoverflow = -1; /* disable further checks */
458 panic("low stack detected by irq handler - check messages\n"); 463 panic("low stack detected by irq handler - check messages\n");
464 }
459#endif 465#endif
460} 466}
461 467
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index f3a797e670b0..d02874ecb94d 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -112,10 +112,12 @@ void __init pdc_pdt_init(void)
112#ifdef CONFIG_64BIT 112#ifdef CONFIG_64BIT
113 struct pdc_pat_mem_read_pd_retinfo pat_pret; 113 struct pdc_pat_mem_read_pd_retinfo pat_pret;
114 114
115 /* try old obsolete PAT firmware function first */
116 pdt_type = PDT_PAT_OLD;
115 ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry, 117 ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
116 MAX_PDT_ENTRIES); 118 MAX_PDT_ENTRIES);
117 if (ret != PDC_OK) { 119 if (ret != PDC_OK) {
118 pdt_type = PDT_PAT_OLD; 120 pdt_type = PDT_PAT_NEW;
119 ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry, 121 ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
120 MAX_PDT_TABLE_SIZE, 0); 122 MAX_PDT_TABLE_SIZE, 0);
121 } 123 }
@@ -131,11 +133,20 @@ void __init pdc_pdt_init(void)
131 } 133 }
132 134
133 for (i = 0; i < pdt_status.pdt_entries; i++) { 135 for (i = 0; i < pdt_status.pdt_entries; i++) {
134 if (i < 20) 136 struct pdc_pat_mem_phys_mem_location loc;
135 pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n", 137
136 i, 138 /* get DIMM slot number */
137 pdt_entry[i] & PAGE_MASK, 139 loc.dimm_slot = 0xff;
138 pdt_entry[i] & 1); 140#ifdef CONFIG_64BIT
141 pdc_pat_mem_get_dimm_phys_location(&loc, pdt_entry[i]);
142#endif
143
144 pr_warn("PDT: BAD PAGE #%d at 0x%08lx, "
145 "DIMM slot %02x (error_type = %lu)\n",
146 i,
147 pdt_entry[i] & PAGE_MASK,
148 loc.dimm_slot,
149 pdt_entry[i] & 1);
139 150
140 /* mark memory page bad */ 151 /* mark memory page bad */
141 memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE); 152 memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b64d7d21646e..a45a67d526f8 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -53,6 +53,7 @@
53#include <linux/uaccess.h> 53#include <linux/uaccess.h>
54#include <linux/rcupdate.h> 54#include <linux/rcupdate.h>
55#include <linux/random.h> 55#include <linux/random.h>
56#include <linux/nmi.h>
56 57
57#include <asm/io.h> 58#include <asm/io.h>
58#include <asm/asm-offsets.h> 59#include <asm/asm-offsets.h>
@@ -145,6 +146,7 @@ void machine_power_off(void)
145 146
146 /* prevent soft lockup/stalled CPU messages for endless loop. */ 147 /* prevent soft lockup/stalled CPU messages for endless loop. */
147 rcu_sysrq_start(); 148 rcu_sysrq_start();
149 lockup_detector_suspend();
148 for (;;); 150 for (;;);
149} 151}
150 152
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 3d6ef1b29c6a..ffe2cbf52d1a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -78,6 +78,8 @@ SECTIONS
78 *(.text.sys_exit) 78 *(.text.sys_exit)
79 *(.text.do_sigaltstack) 79 *(.text.do_sigaltstack)
80 *(.text.do_fork) 80 *(.text.do_fork)
81 *(.text.div)
82 *($$*) /* millicode routines */
81 *(.text.*) 83 *(.text.*)
82 *(.fixup) 84 *(.fixup)
83 *(.lock.text) /* out-of-line lock text */ 85 *(.lock.text) /* out-of-line lock text */
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 8d4ed73d5490..e2b3e7a00c9e 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -59,6 +59,19 @@ machine-$(CONFIG_PPC64) += 64
59machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le 59machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le
60UTS_MACHINE := $(subst $(space),,$(machine-y)) 60UTS_MACHINE := $(subst $(space),,$(machine-y))
61 61
62# XXX This needs to be before we override LD below
63ifdef CONFIG_PPC32
64KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
65else
66ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
67# Have the linker provide sfpr if possible.
68# There is a corresponding test in arch/powerpc/lib/Makefile
69KBUILD_LDFLAGS_MODULE += --save-restore-funcs
70else
71KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
72endif
73endif
74
62ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) 75ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
63override LD += -EL 76override LD += -EL
64LDEMULATION := lppc 77LDEMULATION := lppc
@@ -190,18 +203,6 @@ else
190CHECKFLAGS += -D__LITTLE_ENDIAN__ 203CHECKFLAGS += -D__LITTLE_ENDIAN__
191endif 204endif
192 205
193ifdef CONFIG_PPC32
194KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
195else
196ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
197# Have the linker provide sfpr if possible.
198# There is a corresponding test in arch/powerpc/lib/Makefile
199KBUILD_LDFLAGS_MODULE += --save-restore-funcs
200else
201KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
202endif
203endif
204
205ifeq ($(CONFIG_476FPE_ERR46),y) 206ifeq ($(CONFIG_476FPE_ERR46),y)
206 KBUILD_LDFLAGS_MODULE += --ppc476-workaround \ 207 KBUILD_LDFLAGS_MODULE += --ppc476-workaround \
207 -T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds 208 -T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 77529a3e3811..5b4023c616f7 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -59,13 +59,14 @@ extern struct patb_entry *partition_tb;
59#define PRTS_MASK 0x1f /* process table size field */ 59#define PRTS_MASK 0x1f /* process table size field */
60#define PRTB_MASK 0x0ffffffffffff000UL 60#define PRTB_MASK 0x0ffffffffffff000UL
61 61
62/* 62/* Number of supported PID bits */
63 * Limit process table to PAGE_SIZE table. This 63extern unsigned int mmu_pid_bits;
64 * also limit the max pid we can support. 64
65 * MAX_USER_CONTEXT * 16 bytes of space. 65/* Base PID to allocate from */
66 */ 66extern unsigned int mmu_base_pid;
67#define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4) 67
68#define PRTB_ENTRIES (1ul << CONTEXT_BITS) 68#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
69#define PRTB_ENTRIES (1ul << mmu_pid_bits)
69 70
70/* 71/*
71 * Power9 currently only support 64K partition table size. 72 * Power9 currently only support 64K partition table size.
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index da7e9432fa8f..0c76675394c5 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -45,7 +45,7 @@ extern void set_context(unsigned long id, pgd_t *pgd);
45 45
46#ifdef CONFIG_PPC_BOOK3S_64 46#ifdef CONFIG_PPC_BOOK3S_64
47extern void radix__switch_mmu_context(struct mm_struct *prev, 47extern void radix__switch_mmu_context(struct mm_struct *prev,
48 struct mm_struct *next); 48 struct mm_struct *next);
49static inline void switch_mmu_context(struct mm_struct *prev, 49static inline void switch_mmu_context(struct mm_struct *prev,
50 struct mm_struct *next, 50 struct mm_struct *next,
51 struct task_struct *tsk) 51 struct task_struct *tsk)
@@ -67,6 +67,12 @@ extern void __destroy_context(unsigned long context_id);
67extern void mmu_context_init(void); 67extern void mmu_context_init(void);
68#endif 68#endif
69 69
70#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
71extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
72#else
73static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
74#endif
75
70extern void switch_cop(struct mm_struct *next); 76extern void switch_cop(struct mm_struct *next);
71extern int use_cop(unsigned long acop, struct mm_struct *mm); 77extern int use_cop(unsigned long acop, struct mm_struct *mm);
72extern void drop_cop(unsigned long acop, struct mm_struct *mm); 78extern void drop_cop(unsigned long acop, struct mm_struct *mm);
@@ -79,9 +85,13 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
79 struct mm_struct *next, 85 struct mm_struct *next,
80 struct task_struct *tsk) 86 struct task_struct *tsk)
81{ 87{
88 bool new_on_cpu = false;
89
82 /* Mark this context has been used on the new CPU */ 90 /* Mark this context has been used on the new CPU */
83 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) 91 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
84 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 92 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
93 new_on_cpu = true;
94 }
85 95
86 /* 32-bit keeps track of the current PGDIR in the thread struct */ 96 /* 32-bit keeps track of the current PGDIR in the thread struct */
87#ifdef CONFIG_PPC32 97#ifdef CONFIG_PPC32
@@ -109,6 +119,10 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
109 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 119 if (cpu_has_feature(CPU_FTR_ALTIVEC))
110 asm volatile ("dssall"); 120 asm volatile ("dssall");
111#endif /* CONFIG_ALTIVEC */ 121#endif /* CONFIG_ALTIVEC */
122
123 if (new_on_cpu)
124 radix_kvm_prefetch_workaround(next);
125
112 /* 126 /*
113 * The actual HW switching method differs between the various 127 * The actual HW switching method differs between the various
114 * sub architectures. Out of line for now 128 * sub architectures. Out of line for now
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8cb0190e2a73..b42812e014c0 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -164,8 +164,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
164 goto out; 164 goto out;
165 } 165 }
166 166
167 if (kvm->arch.hpt.virt) 167 if (kvm->arch.hpt.virt) {
168 kvmppc_free_hpt(&kvm->arch.hpt); 168 kvmppc_free_hpt(&kvm->arch.hpt);
169 kvmppc_rmap_reset(kvm);
170 }
169 171
170 err = kvmppc_allocate_hpt(&info, order); 172 err = kvmppc_allocate_hpt(&info, order);
171 if (err < 0) 173 if (err < 0)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0b436df746fc..359c79cdf0cc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3211,6 +3211,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
3211 run->fail_entry.hardware_entry_failure_reason = 0; 3211 run->fail_entry.hardware_entry_failure_reason = 0;
3212 return -EINVAL; 3212 return -EINVAL;
3213 } 3213 }
3214 /* Enable TM so we can read the TM SPRs */
3215 mtmsr(mfmsr() | MSR_TM);
3214 current->thread.tm_tfhar = mfspr(SPRN_TFHAR); 3216 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
3215 current->thread.tm_tfiar = mfspr(SPRN_TFIAR); 3217 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
3216 current->thread.tm_texasr = mfspr(SPRN_TEXASR); 3218 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cb44065e2946..c52184a8efdf 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1443,12 +1443,14 @@ mc_cont:
1443 ori r6,r6,1 1443 ori r6,r6,1
1444 mtspr SPRN_CTRLT,r6 1444 mtspr SPRN_CTRLT,r6
14454: 14454:
1446 /* Read the guest SLB and save it away */ 1446 /* Check if we are running hash or radix and store it in cr2 */
1447 ld r5, VCPU_KVM(r9) 1447 ld r5, VCPU_KVM(r9)
1448 lbz r0, KVM_RADIX(r5) 1448 lbz r0, KVM_RADIX(r5)
1449 cmpwi r0, 0 1449 cmpwi cr2,r0,0
1450
1451 /* Read the guest SLB and save it away */
1450 li r5, 0 1452 li r5, 0
1451 bne 3f /* for radix, save 0 entries */ 1453 bne cr2, 3f /* for radix, save 0 entries */
1452 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1454 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1453 mtctr r0 1455 mtctr r0
1454 li r6,0 1456 li r6,0
@@ -1712,11 +1714,6 @@ BEGIN_FTR_SECTION_NESTED(96)
1712END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) 1714END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1713END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1715END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
171422: 171622:
1715 /* Clear out SLB */
1716 li r5,0
1717 slbmte r5,r5
1718 slbia
1719 ptesync
1720 1717
1721 /* Restore host values of some registers */ 1718 /* Restore host values of some registers */
1722BEGIN_FTR_SECTION 1719BEGIN_FTR_SECTION
@@ -1737,10 +1734,56 @@ BEGIN_FTR_SECTION
1737 mtspr SPRN_PID, r7 1734 mtspr SPRN_PID, r7
1738 mtspr SPRN_IAMR, r8 1735 mtspr SPRN_IAMR, r8
1739END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1736END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1737
1738#ifdef CONFIG_PPC_RADIX_MMU
1739 /*
1740 * Are we running hash or radix ?
1741 */
1742 beq cr2,3f
1743
1744 /* Radix: Handle the case where the guest used an illegal PID */
1745 LOAD_REG_ADDR(r4, mmu_base_pid)
1746 lwz r3, VCPU_GUEST_PID(r9)
1747 lwz r5, 0(r4)
1748 cmpw cr0,r3,r5
1749 blt 2f
1750
1751 /*
1752 * Illegal PID, the HW might have prefetched and cached in the TLB
1753 * some translations for the LPID 0 / guest PID combination which
1754 * Linux doesn't know about, so we need to flush that PID out of
1755 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1756 * the right context.
1757 */
1758 li r0,0
1759 mtspr SPRN_LPID,r0
1760 isync
1761
1762 /* Then do a congruence class local flush */
1763 ld r6,VCPU_KVM(r9)
1764 lwz r0,KVM_TLB_SETS(r6)
1765 mtctr r0
1766 li r7,0x400 /* IS field = 0b01 */
1767 ptesync
1768 sldi r0,r3,32 /* RS has PID */
17691: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1770 addi r7,r7,0x1000
1771 bdnz 1b
1772 ptesync
1773
17742: /* Flush the ERAT on radix P9 DD1 guest exit */
1740BEGIN_FTR_SECTION 1775BEGIN_FTR_SECTION
1741 PPC_INVALIDATE_ERAT 1776 PPC_INVALIDATE_ERAT
1742END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) 1777END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1778 b 4f
1779#endif /* CONFIG_PPC_RADIX_MMU */
1743 1780
1781 /* Hash: clear out SLB */
17823: li r5,0
1783 slbmte r5,r5
1784 slbia
1785 ptesync
17864:
1744 /* 1787 /*
1745 * POWER7/POWER8 guest -> host partition switch code. 1788 * POWER7/POWER8 guest -> host partition switch code.
1746 * We don't have to lock against tlbies but we do 1789 * We don't have to lock against tlbies but we do
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index abed1fe6992f..a75f63833284 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -126,9 +126,10 @@ static int hash__init_new_context(struct mm_struct *mm)
126static int radix__init_new_context(struct mm_struct *mm) 126static int radix__init_new_context(struct mm_struct *mm)
127{ 127{
128 unsigned long rts_field; 128 unsigned long rts_field;
129 int index; 129 int index, max_id;
130 130
131 index = alloc_context_id(1, PRTB_ENTRIES - 1); 131 max_id = (1 << mmu_pid_bits) - 1;
132 index = alloc_context_id(mmu_base_pid, max_id);
132 if (index < 0) 133 if (index < 0)
133 return index; 134 return index;
134 135
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 5cc50d47ce3f..671a45d86c18 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -25,6 +25,9 @@
25 25
26#include <trace/events/thp.h> 26#include <trace/events/thp.h>
27 27
28unsigned int mmu_pid_bits;
29unsigned int mmu_base_pid;
30
28static int native_register_process_table(unsigned long base, unsigned long pg_sz, 31static int native_register_process_table(unsigned long base, unsigned long pg_sz,
29 unsigned long table_size) 32 unsigned long table_size)
30{ 33{
@@ -261,11 +264,34 @@ static void __init radix_init_pgtable(void)
261 for_each_memblock(memory, reg) 264 for_each_memblock(memory, reg)
262 WARN_ON(create_physical_mapping(reg->base, 265 WARN_ON(create_physical_mapping(reg->base,
263 reg->base + reg->size)); 266 reg->base + reg->size));
267
268 /* Find out how many PID bits are supported */
269 if (cpu_has_feature(CPU_FTR_HVMODE)) {
270 if (!mmu_pid_bits)
271 mmu_pid_bits = 20;
272#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
273 /*
274 * When KVM is possible, we only use the top half of the
275 * PID space to avoid collisions between host and guest PIDs
276 * which can cause problems due to prefetch when exiting the
277 * guest with AIL=3
278 */
279 mmu_base_pid = 1 << (mmu_pid_bits - 1);
280#else
281 mmu_base_pid = 1;
282#endif
283 } else {
284 /* The guest uses the bottom half of the PID space */
285 if (!mmu_pid_bits)
286 mmu_pid_bits = 19;
287 mmu_base_pid = 1;
288 }
289
264 /* 290 /*
265 * Allocate Partition table and process table for the 291 * Allocate Partition table and process table for the
266 * host. 292 * host.
267 */ 293 */
268 BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large."); 294 BUG_ON(PRTB_SIZE_SHIFT > 36);
269 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); 295 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
270 /* 296 /*
271 * Fill in the process table. 297 * Fill in the process table.
@@ -339,6 +365,12 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
339 if (type == NULL || strcmp(type, "cpu") != 0) 365 if (type == NULL || strcmp(type, "cpu") != 0)
340 return 0; 366 return 0;
341 367
368 /* Find MMU PID size */
369 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
370 if (prop && size == 4)
371 mmu_pid_bits = be32_to_cpup(prop);
372
373 /* Grab page size encodings */
342 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); 374 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
343 if (!prop) 375 if (!prop)
344 return 0; 376 return 0;
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index e94fbd4c8845..781532d7bc4d 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -36,7 +36,7 @@ void subpage_prot_free(struct mm_struct *mm)
36 } 36 }
37 } 37 }
38 addr = 0; 38 addr = 0;
39 for (i = 0; i < 2; ++i) { 39 for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
40 p = spt->protptrs[i]; 40 p = spt->protptrs[i];
41 if (!p) 41 if (!p)
42 continue; 42 continue;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 744e0164ecf5..16ae1bbe13f0 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -12,12 +12,12 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/hugetlb.h> 13#include <linux/hugetlb.h>
14#include <linux/memblock.h> 14#include <linux/memblock.h>
15#include <asm/ppc-opcode.h>
16 15
16#include <asm/ppc-opcode.h>
17#include <asm/tlb.h> 17#include <asm/tlb.h>
18#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
19#include <asm/trace.h> 19#include <asm/trace.h>
20 20#include <asm/cputhreads.h>
21 21
22#define RIC_FLUSH_TLB 0 22#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1 23#define RIC_FLUSH_PWC 1
@@ -454,3 +454,44 @@ void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
454 else 454 else
455 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); 455 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
456} 456}
457
458#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
459extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
460{
461 unsigned int pid = mm->context.id;
462
463 if (unlikely(pid == MMU_NO_CONTEXT))
464 return;
465
466 /*
467 * If this context hasn't run on that CPU before and KVM is
468 * around, there's a slim chance that the guest on another
469 * CPU just brought in obsolete translation into the TLB of
470 * this CPU due to a bad prefetch using the guest PID on
471 * the way into the hypervisor.
472 *
473 * We work around this here. If KVM is possible, we check if
474 * any sibling thread is in KVM. If it is, the window may exist
475 * and thus we flush that PID from the core.
476 *
477 * A potential future improvement would be to mark which PIDs
478 * have never been used on the system and avoid it if the PID
479 * is new and the process has no other cpumask bit set.
480 */
481 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
482 int cpu = smp_processor_id();
483 int sib = cpu_first_thread_sibling(cpu);
484 bool flush = false;
485
486 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
487 if (sib == cpu)
488 continue;
489 if (paca[sib].kvm_hstate.kvm_vcpu)
490 flush = true;
491 }
492 if (flush)
493 _tlbiel_pid(pid, RIC_FLUSH_ALL);
494 }
495}
496EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
497#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index e5bf1e84047f..011ef2180fe6 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
82 82
83 of_detach_node(np); 83 of_detach_node(np);
84 of_node_put(parent); 84 of_node_put(parent);
85 of_node_put(np); /* Must decrement the refcount */
86 return 0; 85 return 0;
87} 86}
88 87
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 0c82f7903fc7..c1bf75ffb875 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -998,7 +998,7 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
998 psw_bits(regs.psw).ia = sfr->basic.ia; 998 psw_bits(regs.psw).ia = sfr->basic.ia;
999 psw_bits(regs.psw).dat = sfr->basic.T; 999 psw_bits(regs.psw).dat = sfr->basic.T;
1000 psw_bits(regs.psw).wait = sfr->basic.W; 1000 psw_bits(regs.psw).wait = sfr->basic.W;
1001 psw_bits(regs.psw).per = sfr->basic.P; 1001 psw_bits(regs.psw).pstate = sfr->basic.P;
1002 psw_bits(regs.psw).as = sfr->basic.AS; 1002 psw_bits(regs.psw).as = sfr->basic.AS;
1003 1003
1004 /* 1004 /*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3f2884e99ed4..af09d3437631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1324,7 +1324,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1324{ 1324{
1325 uint8_t *keys; 1325 uint8_t *keys;
1326 uint64_t hva; 1326 uint64_t hva;
1327 int i, r = 0; 1327 int srcu_idx, i, r = 0;
1328 1328
1329 if (args->flags != 0) 1329 if (args->flags != 0)
1330 return -EINVAL; 1330 return -EINVAL;
@@ -1342,6 +1342,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1342 return -ENOMEM; 1342 return -ENOMEM;
1343 1343
1344 down_read(&current->mm->mmap_sem); 1344 down_read(&current->mm->mmap_sem);
1345 srcu_idx = srcu_read_lock(&kvm->srcu);
1345 for (i = 0; i < args->count; i++) { 1346 for (i = 0; i < args->count; i++) {
1346 hva = gfn_to_hva(kvm, args->start_gfn + i); 1347 hva = gfn_to_hva(kvm, args->start_gfn + i);
1347 if (kvm_is_error_hva(hva)) { 1348 if (kvm_is_error_hva(hva)) {
@@ -1353,6 +1354,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1353 if (r) 1354 if (r)
1354 break; 1355 break;
1355 } 1356 }
1357 srcu_read_unlock(&kvm->srcu, srcu_idx);
1356 up_read(&current->mm->mmap_sem); 1358 up_read(&current->mm->mmap_sem);
1357 1359
1358 if (!r) { 1360 if (!r) {
@@ -1370,7 +1372,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1370{ 1372{
1371 uint8_t *keys; 1373 uint8_t *keys;
1372 uint64_t hva; 1374 uint64_t hva;
1373 int i, r = 0; 1375 int srcu_idx, i, r = 0;
1374 1376
1375 if (args->flags != 0) 1377 if (args->flags != 0)
1376 return -EINVAL; 1378 return -EINVAL;
@@ -1396,6 +1398,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1396 goto out; 1398 goto out;
1397 1399
1398 down_read(&current->mm->mmap_sem); 1400 down_read(&current->mm->mmap_sem);
1401 srcu_idx = srcu_read_lock(&kvm->srcu);
1399 for (i = 0; i < args->count; i++) { 1402 for (i = 0; i < args->count; i++) {
1400 hva = gfn_to_hva(kvm, args->start_gfn + i); 1403 hva = gfn_to_hva(kvm, args->start_gfn + i);
1401 if (kvm_is_error_hva(hva)) { 1404 if (kvm_is_error_hva(hva)) {
@@ -1413,6 +1416,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1413 if (r) 1416 if (r)
1414 break; 1417 break;
1415 } 1418 }
1419 srcu_read_unlock(&kvm->srcu, srcu_idx);
1416 up_read(&current->mm->mmap_sem); 1420 up_read(&current->mm->mmap_sem);
1417out: 1421out:
1418 kvfree(keys); 1422 kvfree(keys);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index d4d409ba206b..4a1f7366b17a 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -591,11 +591,11 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
591 unsigned long ptev; 591 unsigned long ptev;
592 pgste_t pgste; 592 pgste_t pgste;
593 593
594 /* Clear storage key */ 594 /* Clear storage key ACC and F, but set R/C */
595 preempt_disable(); 595 preempt_disable();
596 pgste = pgste_get_lock(ptep); 596 pgste = pgste_get_lock(ptep);
597 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | 597 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
598 PGSTE_GR_BIT | PGSTE_GC_BIT); 598 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
599 ptev = pte_val(*ptep); 599 ptev = pte_val(*ptep);
600 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) 600 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
601 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); 601 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 2c860ad4fe06..8a958274b54c 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -34,6 +34,7 @@ KBUILD_CFLAGS += $(cflags-y)
34KBUILD_CFLAGS += -mno-mmx -mno-sse 34KBUILD_CFLAGS += -mno-mmx -mno-sse
35KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 35KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
36KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) 36KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
37KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
37 38
38KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ 39KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
39GCOV_PROFILE := n 40GCOV_PROFILE := n
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 630e3664906b..16f49123d747 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -16,6 +16,15 @@
16#include "ctype.h" 16#include "ctype.h"
17#include "string.h" 17#include "string.h"
18 18
19/*
20 * Undef these macros so that the functions that we provide
21 * here will have the correct names regardless of how string.h
22 * may have chosen to #define them.
23 */
24#undef memcpy
25#undef memset
26#undef memcmp
27
19int memcmp(const void *s1, const void *s2, size_t len) 28int memcmp(const void *s1, const void *s2, size_t len)
20{ 29{
21 bool diff; 30 bool diff;
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a9a8027a6c0e..d271fb79248f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -705,6 +705,7 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
705#ifdef CONFIG_HAVE_KVM 705#ifdef CONFIG_HAVE_KVM
706apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi 706apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
707apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi 707apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
708apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi
708#endif 709#endif
709 710
710#ifdef CONFIG_X86_MCE_THRESHOLD 711#ifdef CONFIG_X86_MCE_THRESHOLD
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index dae2fedc1601..4f9127644b80 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -316,7 +316,7 @@
316#define SKX_UPI_PCI_PMON_CTL0 0x350 316#define SKX_UPI_PCI_PMON_CTL0 0x350
317#define SKX_UPI_PCI_PMON_CTR0 0x318 317#define SKX_UPI_PCI_PMON_CTR0 0x318
318#define SKX_UPI_PCI_PMON_BOX_CTL 0x378 318#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
319#define SKX_PMON_CTL_UMASK_EXT 0xff 319#define SKX_UPI_CTL_UMASK_EXT 0xffefff
320 320
321/* SKX M2M */ 321/* SKX M2M */
322#define SKX_M2M_PCI_PMON_CTL0 0x228 322#define SKX_M2M_PCI_PMON_CTL0 0x228
@@ -328,7 +328,7 @@ DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
328DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 328DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
329DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); 329DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
330DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 330DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
331DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39"); 331DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
332DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); 332DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
333DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 333DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
334DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); 334DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
@@ -351,7 +351,6 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
351DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); 351DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
352DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); 352DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
353DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); 353DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
354DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
355DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 354DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
356DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); 355DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
357DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 356DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
@@ -3302,7 +3301,6 @@ static struct attribute *skx_uncore_cha_formats_attr[] = {
3302 &format_attr_inv.attr, 3301 &format_attr_inv.attr,
3303 &format_attr_thresh8.attr, 3302 &format_attr_thresh8.attr,
3304 &format_attr_filter_tid4.attr, 3303 &format_attr_filter_tid4.attr,
3305 &format_attr_filter_link4.attr,
3306 &format_attr_filter_state5.attr, 3304 &format_attr_filter_state5.attr,
3307 &format_attr_filter_rem.attr, 3305 &format_attr_filter_rem.attr,
3308 &format_attr_filter_loc.attr, 3306 &format_attr_filter_loc.attr,
@@ -3312,7 +3310,6 @@ static struct attribute *skx_uncore_cha_formats_attr[] = {
3312 &format_attr_filter_opc_0.attr, 3310 &format_attr_filter_opc_0.attr,
3313 &format_attr_filter_opc_1.attr, 3311 &format_attr_filter_opc_1.attr,
3314 &format_attr_filter_nc.attr, 3312 &format_attr_filter_nc.attr,
3315 &format_attr_filter_c6.attr,
3316 &format_attr_filter_isoc.attr, 3313 &format_attr_filter_isoc.attr,
3317 NULL, 3314 NULL,
3318}; 3315};
@@ -3333,8 +3330,11 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = {
3333 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 3330 SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3334 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 3331 SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3335 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 3332 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3336 SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), 3333 SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3337 SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4), 3334 SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3335 SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3336 SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3337 EVENT_EXTRA_END
3338}; 3338};
3339 3339
3340static u64 skx_cha_filter_mask(int fields) 3340static u64 skx_cha_filter_mask(int fields)
@@ -3347,6 +3347,17 @@ static u64 skx_cha_filter_mask(int fields)
3347 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; 3347 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3348 if (fields & 0x4) 3348 if (fields & 0x4)
3349 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; 3349 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3350 if (fields & 0x8) {
3351 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3352 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3353 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3354 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3355 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3356 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3357 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3358 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3359 mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3360 }
3350 return mask; 3361 return mask;
3351} 3362}
3352 3363
@@ -3492,6 +3503,26 @@ static struct intel_uncore_type skx_uncore_irp = {
3492 .format_group = &skx_uncore_format_group, 3503 .format_group = &skx_uncore_format_group,
3493}; 3504};
3494 3505
3506static struct attribute *skx_uncore_pcu_formats_attr[] = {
3507 &format_attr_event.attr,
3508 &format_attr_umask.attr,
3509 &format_attr_edge.attr,
3510 &format_attr_inv.attr,
3511 &format_attr_thresh8.attr,
3512 &format_attr_occ_invert.attr,
3513 &format_attr_occ_edge_det.attr,
3514 &format_attr_filter_band0.attr,
3515 &format_attr_filter_band1.attr,
3516 &format_attr_filter_band2.attr,
3517 &format_attr_filter_band3.attr,
3518 NULL,
3519};
3520
3521static struct attribute_group skx_uncore_pcu_format_group = {
3522 .name = "format",
3523 .attrs = skx_uncore_pcu_formats_attr,
3524};
3525
3495static struct intel_uncore_ops skx_uncore_pcu_ops = { 3526static struct intel_uncore_ops skx_uncore_pcu_ops = {
3496 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 3527 IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3497 .hw_config = hswep_pcu_hw_config, 3528 .hw_config = hswep_pcu_hw_config,
@@ -3510,7 +3541,7 @@ static struct intel_uncore_type skx_uncore_pcu = {
3510 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 3541 .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
3511 .num_shared_regs = 1, 3542 .num_shared_regs = 1,
3512 .ops = &skx_uncore_pcu_ops, 3543 .ops = &skx_uncore_pcu_ops,
3513 .format_group = &snbep_uncore_pcu_format_group, 3544 .format_group = &skx_uncore_pcu_format_group,
3514}; 3545};
3515 3546
3516static struct intel_uncore_type *skx_msr_uncores[] = { 3547static struct intel_uncore_type *skx_msr_uncores[] = {
@@ -3603,8 +3634,8 @@ static struct intel_uncore_type skx_uncore_upi = {
3603 .perf_ctr_bits = 48, 3634 .perf_ctr_bits = 48,
3604 .perf_ctr = SKX_UPI_PCI_PMON_CTR0, 3635 .perf_ctr = SKX_UPI_PCI_PMON_CTR0,
3605 .event_ctl = SKX_UPI_PCI_PMON_CTL0, 3636 .event_ctl = SKX_UPI_PCI_PMON_CTL0,
3606 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 3637 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
3607 .event_mask_ext = SKX_PMON_CTL_UMASK_EXT, 3638 .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3608 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, 3639 .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
3609 .ops = &skx_upi_uncore_pci_ops, 3640 .ops = &skx_upi_uncore_pci_ops,
3610 .format_group = &skx_upi_uncore_format_group, 3641 .format_group = &skx_upi_uncore_format_group,
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index df002992d8fd..07b06955a05d 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -25,6 +25,8 @@ BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
25 smp_kvm_posted_intr_ipi) 25 smp_kvm_posted_intr_ipi)
26BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR, 26BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
27 smp_kvm_posted_intr_wakeup_ipi) 27 smp_kvm_posted_intr_wakeup_ipi)
28BUILD_INTERRUPT3(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR,
29 smp_kvm_posted_intr_nested_ipi)
28#endif 30#endif
29 31
30/* 32/*
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 9b76cd331990..ad1ed531febc 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -15,6 +15,7 @@ typedef struct {
15#ifdef CONFIG_HAVE_KVM 15#ifdef CONFIG_HAVE_KVM
16 unsigned int kvm_posted_intr_ipis; 16 unsigned int kvm_posted_intr_ipis;
17 unsigned int kvm_posted_intr_wakeup_ipis; 17 unsigned int kvm_posted_intr_wakeup_ipis;
18 unsigned int kvm_posted_intr_nested_ipis;
18#endif 19#endif
19 unsigned int x86_platform_ipis; /* arch dependent */ 20 unsigned int x86_platform_ipis; /* arch dependent */
20 unsigned int apic_perf_irqs; 21 unsigned int apic_perf_irqs;
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index b90e1053049b..d6dbafbd4207 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -30,6 +30,7 @@ extern asmlinkage void apic_timer_interrupt(void);
30extern asmlinkage void x86_platform_ipi(void); 30extern asmlinkage void x86_platform_ipi(void);
31extern asmlinkage void kvm_posted_intr_ipi(void); 31extern asmlinkage void kvm_posted_intr_ipi(void);
32extern asmlinkage void kvm_posted_intr_wakeup_ipi(void); 32extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
33extern asmlinkage void kvm_posted_intr_nested_ipi(void);
33extern asmlinkage void error_interrupt(void); 34extern asmlinkage void error_interrupt(void);
34extern asmlinkage void irq_work_interrupt(void); 35extern asmlinkage void irq_work_interrupt(void);
35 36
@@ -62,6 +63,7 @@ extern void trace_call_function_single_interrupt(void);
62#define trace_reboot_interrupt reboot_interrupt 63#define trace_reboot_interrupt reboot_interrupt
63#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi 64#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
64#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi 65#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi
66#define trace_kvm_posted_intr_nested_ipi kvm_posted_intr_nested_ipi
65#endif /* CONFIG_TRACING */ 67#endif /* CONFIG_TRACING */
66 68
67#ifdef CONFIG_X86_LOCAL_APIC 69#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6ca9fd6234e1..aaf8d28b5d00 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -83,7 +83,6 @@
83 */ 83 */
84#define X86_PLATFORM_IPI_VECTOR 0xf7 84#define X86_PLATFORM_IPI_VECTOR 0xf7
85 85
86#define POSTED_INTR_WAKEUP_VECTOR 0xf1
87/* 86/*
88 * IRQ work vector: 87 * IRQ work vector:
89 */ 88 */
@@ -98,6 +97,8 @@
98/* Vector for KVM to deliver posted interrupt IPI */ 97/* Vector for KVM to deliver posted interrupt IPI */
99#ifdef CONFIG_HAVE_KVM 98#ifdef CONFIG_HAVE_KVM
100#define POSTED_INTR_VECTOR 0xf2 99#define POSTED_INTR_VECTOR 0xf2
100#define POSTED_INTR_WAKEUP_VECTOR 0xf1
101#define POSTED_INTR_NESTED_VECTOR 0xf0
101#endif 102#endif
102 103
103/* 104/*
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 4aa03c5a14c9..4ed0aba8dbc8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -155,6 +155,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
155 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); 155 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
156 seq_puts(p, " Posted-interrupt notification event\n"); 156 seq_puts(p, " Posted-interrupt notification event\n");
157 157
158 seq_printf(p, "%*s: ", prec, "NPI");
159 for_each_online_cpu(j)
160 seq_printf(p, "%10u ",
161 irq_stats(j)->kvm_posted_intr_nested_ipis);
162 seq_puts(p, " Nested posted-interrupt event\n");
163
158 seq_printf(p, "%*s: ", prec, "PIW"); 164 seq_printf(p, "%*s: ", prec, "PIW");
159 for_each_online_cpu(j) 165 for_each_online_cpu(j)
160 seq_printf(p, "%10u ", 166 seq_printf(p, "%10u ",
@@ -313,6 +319,19 @@ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
313 exiting_irq(); 319 exiting_irq();
314 set_irq_regs(old_regs); 320 set_irq_regs(old_regs);
315} 321}
322
323/*
324 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
325 */
326__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
327{
328 struct pt_regs *old_regs = set_irq_regs(regs);
329
330 entering_ack_irq();
331 inc_irq_stat(kvm_posted_intr_nested_ipis);
332 exiting_irq();
333 set_irq_regs(old_regs);
334}
316#endif 335#endif
317 336
318__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs) 337__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 7468c6987547..c7fd18526c3e 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -150,6 +150,8 @@ static void __init apic_intr_init(void)
150 alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi); 150 alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi);
151 /* IPI for KVM to deliver interrupt to wake up tasks */ 151 /* IPI for KVM to deliver interrupt to wake up tasks */
152 alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi); 152 alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi);
153 /* IPI for KVM to deliver nested posted interrupt */
154 alloc_intr_gate(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi);
153#endif 155#endif
154 156
155 /* IPI vectors for APIC spurious and error interrupts */ 157 /* IPI vectors for APIC spurious and error interrupts */
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6b877807598b..f0153714ddac 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -457,6 +457,8 @@ static int arch_copy_kprobe(struct kprobe *p)
457 457
458int arch_prepare_kprobe(struct kprobe *p) 458int arch_prepare_kprobe(struct kprobe *p)
459{ 459{
460 int ret;
461
460 if (alternatives_text_reserved(p->addr, p->addr)) 462 if (alternatives_text_reserved(p->addr, p->addr))
461 return -EINVAL; 463 return -EINVAL;
462 464
@@ -467,7 +469,13 @@ int arch_prepare_kprobe(struct kprobe *p)
467 if (!p->ainsn.insn) 469 if (!p->ainsn.insn)
468 return -ENOMEM; 470 return -ENOMEM;
469 471
470 return arch_copy_kprobe(p); 472 ret = arch_copy_kprobe(p);
473 if (ret) {
474 free_insn_slot(p->ainsn.insn, 0);
475 p->ainsn.insn = NULL;
476 }
477
478 return ret;
471} 479}
472 480
473void arch_arm_kprobe(struct kprobe *p) 481void arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 67393fc88353..a56bf6051f4e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -471,12 +471,12 @@ static int __init reboot_init(void)
471 471
472 /* 472 /*
473 * The DMI quirks table takes precedence. If no quirks entry 473 * The DMI quirks table takes precedence. If no quirks entry
474 * matches and the ACPI Hardware Reduced bit is set, force EFI 474 * matches and the ACPI Hardware Reduced bit is set and EFI
475 * reboot. 475 * runtime services are enabled, force EFI reboot.
476 */ 476 */
477 rv = dmi_check_system(reboot_dmi_table); 477 rv = dmi_check_system(reboot_dmi_table);
478 478
479 if (!rv && efi_reboot_required()) 479 if (!rv && efi_reboot_required() && !efi_runtime_disabled())
480 reboot_type = BOOT_EFI; 480 reboot_type = BOOT_EFI;
481 481
482 return 0; 482 return 0;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2819d4c123eb..589dcc117086 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1495,11 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1495 1495
1496static void cancel_hv_timer(struct kvm_lapic *apic) 1496static void cancel_hv_timer(struct kvm_lapic *apic)
1497{ 1497{
1498 WARN_ON(preemptible());
1498 WARN_ON(!apic->lapic_timer.hv_timer_in_use); 1499 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1499 preempt_disable();
1500 kvm_x86_ops->cancel_hv_timer(apic->vcpu); 1500 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1501 apic->lapic_timer.hv_timer_in_use = false; 1501 apic->lapic_timer.hv_timer_in_use = false;
1502 preempt_enable();
1503} 1502}
1504 1503
1505static bool start_hv_timer(struct kvm_lapic *apic) 1504static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1507,6 +1506,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
1507 struct kvm_timer *ktimer = &apic->lapic_timer; 1506 struct kvm_timer *ktimer = &apic->lapic_timer;
1508 int r; 1507 int r;
1509 1508
1509 WARN_ON(preemptible());
1510 if (!kvm_x86_ops->set_hv_timer) 1510 if (!kvm_x86_ops->set_hv_timer)
1511 return false; 1511 return false;
1512 1512
@@ -1538,6 +1538,8 @@ static bool start_hv_timer(struct kvm_lapic *apic)
1538static void start_sw_timer(struct kvm_lapic *apic) 1538static void start_sw_timer(struct kvm_lapic *apic)
1539{ 1539{
1540 struct kvm_timer *ktimer = &apic->lapic_timer; 1540 struct kvm_timer *ktimer = &apic->lapic_timer;
1541
1542 WARN_ON(preemptible());
1541 if (apic->lapic_timer.hv_timer_in_use) 1543 if (apic->lapic_timer.hv_timer_in_use)
1542 cancel_hv_timer(apic); 1544 cancel_hv_timer(apic);
1543 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) 1545 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
@@ -1552,15 +1554,20 @@ static void start_sw_timer(struct kvm_lapic *apic)
1552 1554
1553static void restart_apic_timer(struct kvm_lapic *apic) 1555static void restart_apic_timer(struct kvm_lapic *apic)
1554{ 1556{
1557 preempt_disable();
1555 if (!start_hv_timer(apic)) 1558 if (!start_hv_timer(apic))
1556 start_sw_timer(apic); 1559 start_sw_timer(apic);
1560 preempt_enable();
1557} 1561}
1558 1562
1559void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) 1563void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1560{ 1564{
1561 struct kvm_lapic *apic = vcpu->arch.apic; 1565 struct kvm_lapic *apic = vcpu->arch.apic;
1562 1566
1563 WARN_ON(!apic->lapic_timer.hv_timer_in_use); 1567 preempt_disable();
1568 /* If the preempt notifier has already run, it also called apic_timer_expired */
1569 if (!apic->lapic_timer.hv_timer_in_use)
1570 goto out;
1564 WARN_ON(swait_active(&vcpu->wq)); 1571 WARN_ON(swait_active(&vcpu->wq));
1565 cancel_hv_timer(apic); 1572 cancel_hv_timer(apic);
1566 apic_timer_expired(apic); 1573 apic_timer_expired(apic);
@@ -1569,6 +1576,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1569 advance_periodic_target_expiration(apic); 1576 advance_periodic_target_expiration(apic);
1570 restart_apic_timer(apic); 1577 restart_apic_timer(apic);
1571 } 1578 }
1579out:
1580 preempt_enable();
1572} 1581}
1573EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); 1582EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1574 1583
@@ -1582,9 +1591,11 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1582{ 1591{
1583 struct kvm_lapic *apic = vcpu->arch.apic; 1592 struct kvm_lapic *apic = vcpu->arch.apic;
1584 1593
1594 preempt_disable();
1585 /* Possibly the TSC deadline timer is not enabled yet */ 1595 /* Possibly the TSC deadline timer is not enabled yet */
1586 if (apic->lapic_timer.hv_timer_in_use) 1596 if (apic->lapic_timer.hv_timer_in_use)
1587 start_sw_timer(apic); 1597 start_sw_timer(apic);
1598 preempt_enable();
1588} 1599}
1589EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer); 1600EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1590 1601
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 29fd8af5c347..39a6222bf968 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -563,7 +563,6 @@ struct vcpu_vmx {
563 struct kvm_vcpu vcpu; 563 struct kvm_vcpu vcpu;
564 unsigned long host_rsp; 564 unsigned long host_rsp;
565 u8 fail; 565 u8 fail;
566 bool nmi_known_unmasked;
567 u32 exit_intr_info; 566 u32 exit_intr_info;
568 u32 idt_vectoring_info; 567 u32 idt_vectoring_info;
569 ulong rflags; 568 ulong rflags;
@@ -4988,9 +4987,12 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
4988 } 4987 }
4989} 4988}
4990 4989
4991static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu) 4990static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
4991 bool nested)
4992{ 4992{
4993#ifdef CONFIG_SMP 4993#ifdef CONFIG_SMP
4994 int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
4995
4994 if (vcpu->mode == IN_GUEST_MODE) { 4996 if (vcpu->mode == IN_GUEST_MODE) {
4995 struct vcpu_vmx *vmx = to_vmx(vcpu); 4997 struct vcpu_vmx *vmx = to_vmx(vcpu);
4996 4998
@@ -5008,8 +5010,7 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
5008 */ 5010 */
5009 WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); 5011 WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
5010 5012
5011 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), 5013 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
5012 POSTED_INTR_VECTOR);
5013 return true; 5014 return true;
5014 } 5015 }
5015#endif 5016#endif
@@ -5024,7 +5025,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
5024 if (is_guest_mode(vcpu) && 5025 if (is_guest_mode(vcpu) &&
5025 vector == vmx->nested.posted_intr_nv) { 5026 vector == vmx->nested.posted_intr_nv) {
5026 /* the PIR and ON have been set by L1. */ 5027 /* the PIR and ON have been set by L1. */
5027 kvm_vcpu_trigger_posted_interrupt(vcpu); 5028 kvm_vcpu_trigger_posted_interrupt(vcpu, true);
5028 /* 5029 /*
5029 * If a posted intr is not recognized by hardware, 5030 * If a posted intr is not recognized by hardware,
5030 * we will accomplish it in the next vmentry. 5031 * we will accomplish it in the next vmentry.
@@ -5058,7 +5059,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
5058 if (pi_test_and_set_on(&vmx->pi_desc)) 5059 if (pi_test_and_set_on(&vmx->pi_desc))
5059 return; 5060 return;
5060 5061
5061 if (!kvm_vcpu_trigger_posted_interrupt(vcpu)) 5062 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
5062 kvm_vcpu_kick(vcpu); 5063 kvm_vcpu_kick(vcpu);
5063} 5064}
5064 5065
@@ -10041,6 +10042,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10041 vmcs12->vm_entry_instruction_len); 10042 vmcs12->vm_entry_instruction_len);
10042 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 10043 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
10043 vmcs12->guest_interruptibility_info); 10044 vmcs12->guest_interruptibility_info);
10045 vmx->loaded_vmcs->nmi_known_unmasked =
10046 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
10044 } else { 10047 } else {
10045 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 10048 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
10046 } 10049 }
@@ -10065,13 +10068,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10065 10068
10066 /* Posted interrupts setting is only taken from vmcs12. */ 10069 /* Posted interrupts setting is only taken from vmcs12. */
10067 if (nested_cpu_has_posted_intr(vmcs12)) { 10070 if (nested_cpu_has_posted_intr(vmcs12)) {
10068 /*
10069 * Note that we use L0's vector here and in
10070 * vmx_deliver_nested_posted_interrupt.
10071 */
10072 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 10071 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
10073 vmx->nested.pi_pending = false; 10072 vmx->nested.pi_pending = false;
10074 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); 10073 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
10075 } else { 10074 } else {
10076 exec_control &= ~PIN_BASED_POSTED_INTR; 10075 exec_control &= ~PIN_BASED_POSTED_INTR;
10077 } 10076 }
@@ -10942,7 +10941,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
10942 */ 10941 */
10943 vmx_flush_tlb(vcpu); 10942 vmx_flush_tlb(vcpu);
10944 } 10943 }
10945 10944 /* Restore posted intr vector. */
10945 if (nested_cpu_has_posted_intr(vmcs12))
10946 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
10946 10947
10947 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 10948 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
10948 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 10949 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 82a63c59f77b..6c97c82814c4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -597,8 +597,8 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
597 (unsigned long *)&vcpu->arch.regs_avail)) 597 (unsigned long *)&vcpu->arch.regs_avail))
598 return true; 598 return true;
599 599
600 gfn = (kvm_read_cr3(vcpu) & ~31ul) >> PAGE_SHIFT; 600 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
601 offset = (kvm_read_cr3(vcpu) & ~31ul) & (PAGE_SIZE - 1); 601 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
602 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), 602 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
603 PFERR_USER_MASK | PFERR_WRITE_MASK); 603 PFERR_USER_MASK | PFERR_WRITE_MASK);
604 if (r < 0) 604 if (r < 0)
diff --git a/block/blk-core.c b/block/blk-core.c
index 970b9c9638c5..dbecbf4a64e0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3421,6 +3421,10 @@ EXPORT_SYMBOL(blk_finish_plug);
3421 */ 3421 */
3422void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 3422void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3423{ 3423{
3424 /* not support for RQF_PM and ->rpm_status in blk-mq yet */
3425 if (q->mq_ops)
3426 return;
3427
3424 q->dev = dev; 3428 q->dev = dev;
3425 q->rpm_status = RPM_ACTIVE; 3429 q->rpm_status = RPM_ACTIVE;
3426 pm_runtime_set_autosuspend_delay(q->dev, -1); 3430 pm_runtime_set_autosuspend_delay(q->dev, -1);
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 4891f042a22f..9f8cffc8a701 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -17,9 +17,9 @@
17static int cpu_to_queue_index(unsigned int nr_queues, const int cpu) 17static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
18{ 18{
19 /* 19 /*
20 * Non online CPU will be mapped to queue index 0. 20 * Non present CPU will be mapped to queue index 0.
21 */ 21 */
22 if (!cpu_online(cpu)) 22 if (!cpu_present(cpu))
23 return 0; 23 return 0;
24 return cpu % nr_queues; 24 return cpu % nr_queues;
25} 25}
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 6f8f6b86bfe2..0cf5fefdb859 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
248 u8 *ihash = ohash + crypto_ahash_digestsize(auth); 248 u8 *ihash = ohash + crypto_ahash_digestsize(auth);
249 u32 tmp[2]; 249 u32 tmp[2];
250 250
251 if (!authsize)
252 goto decrypt;
253
251 /* Move high-order bits of sequence number back. */ 254 /* Move high-order bits of sequence number back. */
252 scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); 255 scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
253 scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); 256 scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
256 if (crypto_memneq(ihash, ohash, authsize)) 259 if (crypto_memneq(ihash, ohash, authsize))
257 return -EBADMSG; 260 return -EBADMSG;
258 261
262decrypt:
263
259 sg_init_table(areq_ctx->dst, 2); 264 sg_init_table(areq_ctx->dst, 2);
260 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); 265 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
261 266
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index fc6c416f8724..d5999eb41c00 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
180 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, 180 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
181 { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, 181 { "BRCM900D", APD_ADDR(vulcan_spi_desc) },
182 { "CAV900D", APD_ADDR(vulcan_spi_desc) }, 182 { "CAV900D", APD_ADDR(vulcan_spi_desc) },
183 { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, 183 { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
184 { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, 184 { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
185#endif 185#endif
186 { } 186 { }
187}; 187};
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e51a1e98e62f..f88caf5aab76 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
85}; 85};
86 86
87struct lpss_private_data { 87struct lpss_private_data {
88 struct acpi_device *adev;
88 void __iomem *mmio_base; 89 void __iomem *mmio_base;
89 resource_size_t mmio_size; 90 resource_size_t mmio_size;
90 unsigned int fixed_clk_rate; 91 unsigned int fixed_clk_rate;
@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
155 156
156static void byt_pwm_setup(struct lpss_private_data *pdata) 157static void byt_pwm_setup(struct lpss_private_data *pdata)
157{ 158{
159 struct acpi_device *adev = pdata->adev;
160
161 /* Only call pwm_add_table for the first PWM controller */
162 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
163 return;
164
158 if (!acpi_dev_present("INT33FD", NULL, -1)) 165 if (!acpi_dev_present("INT33FD", NULL, -1))
159 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); 166 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
160} 167}
@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
180 187
181static void bsw_pwm_setup(struct lpss_private_data *pdata) 188static void bsw_pwm_setup(struct lpss_private_data *pdata)
182{ 189{
190 struct acpi_device *adev = pdata->adev;
191
192 /* Only call pwm_add_table for the first PWM controller */
193 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
194 return;
195
183 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); 196 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
184} 197}
185 198
@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
456 goto err_out; 469 goto err_out;
457 } 470 }
458 471
472 pdata->adev = adev;
459 pdata->dev_desc = dev_desc; 473 pdata->dev_desc = dev_desc;
460 474
461 if (dev_desc->setup) 475 if (dev_desc->setup)
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 8c4e0a18460a..bf22c29d2517 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void)
86 86
87 found = false; 87 found = false;
88 resource_list_for_each_entry(rentry, &resource_list) { 88 resource_list_for_each_entry(rentry, &resource_list) {
89 if (resource_contains(rentry->res, &res)) { 89 if (rentry->res->flags == res.flags &&
90 resource_overlaps(rentry->res, &res)) {
91 if (res.start < rentry->res->start)
92 rentry->res->start = res.start;
93 if (res.end > rentry->res->end)
94 rentry->res->end = res.end;
90 found = true; 95 found = true;
91 break; 96 break;
92 } 97 }
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ddb01e9fa5b2..62068a5e814f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false;
151module_param(ec_freeze_events, bool, 0644); 151module_param(ec_freeze_events, bool, 0644);
152MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); 152MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
153 153
154static bool ec_no_wakeup __read_mostly;
155module_param(ec_no_wakeup, bool, 0644);
156MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
157
154struct acpi_ec_query_handler { 158struct acpi_ec_query_handler {
155 struct list_head node; 159 struct list_head node;
156 acpi_ec_query_func func; 160 acpi_ec_query_func func;
@@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
535 spin_unlock_irqrestore(&ec->lock, flags); 539 spin_unlock_irqrestore(&ec->lock, flags);
536 __acpi_ec_flush_event(ec); 540 __acpi_ec_flush_event(ec);
537} 541}
542
543void acpi_ec_flush_work(void)
544{
545 if (first_ec)
546 __acpi_ec_flush_event(first_ec);
547
548 flush_scheduled_work();
549}
538#endif /* CONFIG_PM_SLEEP */ 550#endif /* CONFIG_PM_SLEEP */
539 551
540static bool acpi_ec_guard_event(struct acpi_ec *ec) 552static bool acpi_ec_guard_event(struct acpi_ec *ec)
@@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev)
1880 return 0; 1892 return 0;
1881} 1893}
1882 1894
1895static int acpi_ec_suspend_noirq(struct device *dev)
1896{
1897 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1898
1899 /*
1900 * The SCI handler doesn't run at this point, so the GPE can be
1901 * masked at the low level without side effects.
1902 */
1903 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1904 ec->reference_count >= 1)
1905 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1906
1907 return 0;
1908}
1909
1910static int acpi_ec_resume_noirq(struct device *dev)
1911{
1912 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1913
1914 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1915 ec->reference_count >= 1)
1916 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
1917
1918 return 0;
1919}
1920
1883static int acpi_ec_resume(struct device *dev) 1921static int acpi_ec_resume(struct device *dev)
1884{ 1922{
1885 struct acpi_ec *ec = 1923 struct acpi_ec *ec =
@@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev)
1891#endif 1929#endif
1892 1930
1893static const struct dev_pm_ops acpi_ec_pm = { 1931static const struct dev_pm_ops acpi_ec_pm = {
1932 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
1894 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) 1933 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
1895}; 1934};
1896 1935
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9531d3276f65..58dd7ab3c653 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -193,6 +193,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
193 void *data); 193 void *data);
194void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); 194void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
195 195
196#ifdef CONFIG_PM_SLEEP
197void acpi_ec_flush_work(void);
198#endif
199
196 200
197/*-------------------------------------------------------------------------- 201/*--------------------------------------------------------------------------
198 Suspend/Resume 202 Suspend/Resume
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
443 * So go over all cpu entries in SRAT to get apicid to node mapping. 443 * So go over all cpu entries in SRAT to get apicid to node mapping.
444 */ 444 */
445 445
446 /* SRAT: Static Resource Affinity Table */ 446 /* SRAT: System Resource Affinity Table */
447 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 447 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
448 struct acpi_subtable_proc srat_proc[3]; 448 struct acpi_subtable_proc srat_proc[3];
449 449
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index be17664736b2..fa8243c5c062 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -777,11 +777,11 @@ static void acpi_freeze_sync(void)
777 /* 777 /*
778 * Process all pending events in case there are any wakeup ones. 778 * Process all pending events in case there are any wakeup ones.
779 * 779 *
780 * The EC driver uses the system workqueue, so that one needs to be 780 * The EC driver uses the system workqueue and an additional special
781 * flushed too. 781 * one, so those need to be flushed too.
782 */ 782 */
783 acpi_ec_flush_work();
783 acpi_os_wait_events_complete(); 784 acpi_os_wait_events_complete();
784 flush_scheduled_work();
785 s2idle_wakeup = false; 785 s2idle_wakeup = false;
786} 786}
787 787
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
25{ 25{
26 if (dev && dev->dma_mem) 26 if (dev && dev->dma_mem)
27 return dev->dma_mem; 27 return dev->dma_mem;
28 return dma_coherent_default_memory; 28 return NULL;
29} 29}
30 30
31static inline dma_addr_t dma_get_device_base(struct device *dev, 31static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
165} 165}
166EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 166EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
167 167
168/** 168static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
169 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area 169 ssize_t size, dma_addr_t *dma_handle)
170 *
171 * @dev: device from which we allocate memory
172 * @size: size of requested memory area
173 * @dma_handle: This will be filled with the correct dma handle
174 * @ret: This pointer will be filled with the virtual address
175 * to allocated area.
176 *
177 * This function should be only called from per-arch dma_alloc_coherent()
178 * to support allocation from per-device coherent memory pools.
179 *
180 * Returns 0 if dma_alloc_coherent should continue with allocating from
181 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
182 */
183int dma_alloc_from_coherent(struct device *dev, ssize_t size,
184 dma_addr_t *dma_handle, void **ret)
185{ 170{
186 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
187 int order = get_order(size); 171 int order = get_order(size);
188 unsigned long flags; 172 unsigned long flags;
189 int pageno; 173 int pageno;
190 int dma_memory_map; 174 int dma_memory_map;
175 void *ret;
191 176
192 if (!mem)
193 return 0;
194
195 *ret = NULL;
196 spin_lock_irqsave(&mem->spinlock, flags); 177 spin_lock_irqsave(&mem->spinlock, flags);
197 178
198 if (unlikely(size > (mem->size << PAGE_SHIFT))) 179 if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
203 goto err; 184 goto err;
204 185
205 /* 186 /*
206 * Memory was found in the per-device area. 187 * Memory was found in the coherent area.
207 */ 188 */
208 *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); 189 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
209 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 190 ret = mem->virt_base + (pageno << PAGE_SHIFT);
210 dma_memory_map = (mem->flags & DMA_MEMORY_MAP); 191 dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
211 spin_unlock_irqrestore(&mem->spinlock, flags); 192 spin_unlock_irqrestore(&mem->spinlock, flags);
212 if (dma_memory_map) 193 if (dma_memory_map)
213 memset(*ret, 0, size); 194 memset(ret, 0, size);
214 else 195 else
215 memset_io(*ret, 0, size); 196 memset_io(ret, 0, size);
216 197
217 return 1; 198 return ret;
218 199
219err: 200err:
220 spin_unlock_irqrestore(&mem->spinlock, flags); 201 spin_unlock_irqrestore(&mem->spinlock, flags);
202 return NULL;
203}
204
205/**
206 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
207 * @dev: device from which we allocate memory
208 * @size: size of requested memory area
209 * @dma_handle: This will be filled with the correct dma handle
210 * @ret: This pointer will be filled with the virtual address
211 * to allocated area.
212 *
213 * This function should be only called from per-arch dma_alloc_coherent()
214 * to support allocation from per-device coherent memory pools.
215 *
216 * Returns 0 if dma_alloc_coherent should continue with allocating from
217 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
218 */
219int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
220 dma_addr_t *dma_handle, void **ret)
221{
222 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
223
224 if (!mem)
225 return 0;
226
227 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
228 if (*ret)
229 return 1;
230
221 /* 231 /*
222 * In the case where the allocation can not be satisfied from the 232 * In the case where the allocation can not be satisfied from the
223 * per-device area, try to fall back to generic memory if the 233 * per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
225 */ 235 */
226 return mem->flags & DMA_MEMORY_EXCLUSIVE; 236 return mem->flags & DMA_MEMORY_EXCLUSIVE;
227} 237}
228EXPORT_SYMBOL(dma_alloc_from_coherent); 238EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
229 239
230/** 240void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
231 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
232 * @dev: device from which the memory was allocated
233 * @order: the order of pages allocated
234 * @vaddr: virtual address of allocated pages
235 *
236 * This checks whether the memory was allocated from the per-device
237 * coherent memory pool and if so, releases that memory.
238 *
239 * Returns 1 if we correctly released the memory, or 0 if
240 * dma_release_coherent() should proceed with releasing memory from
241 * generic pools.
242 */
243int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
244{ 241{
245 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 242 if (!dma_coherent_default_memory)
243 return NULL;
244
245 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
246 dma_handle);
247}
246 248
249static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
250 int order, void *vaddr)
251{
247 if (mem && vaddr >= mem->virt_base && vaddr < 252 if (mem && vaddr >= mem->virt_base && vaddr <
248 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 253 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
249 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 254 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
256 } 261 }
257 return 0; 262 return 0;
258} 263}
259EXPORT_SYMBOL(dma_release_from_coherent);
260 264
261/** 265/**
262 * dma_mmap_from_coherent() - try to mmap the memory allocated from 266 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
263 * per-device coherent memory pool to userspace
264 * @dev: device from which the memory was allocated 267 * @dev: device from which the memory was allocated
265 * @vma: vm_area for the userspace memory 268 * @order: the order of pages allocated
266 * @vaddr: cpu address returned by dma_alloc_from_coherent 269 * @vaddr: virtual address of allocated pages
267 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
268 * @ret: result from remap_pfn_range()
269 * 270 *
270 * This checks whether the memory was allocated from the per-device 271 * This checks whether the memory was allocated from the per-device
271 * coherent memory pool and if so, maps that memory to the provided vma. 272 * coherent memory pool and if so, releases that memory.
272 * 273 *
273 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 274 * Returns 1 if we correctly released the memory, or 0 if the caller should
274 * proceed with mapping memory from generic pools. 275 * proceed with releasing memory from generic pools.
275 */ 276 */
276int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 277int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
277 void *vaddr, size_t size, int *ret)
278{ 278{
279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
280 280
281 return __dma_release_from_coherent(mem, order, vaddr);
282}
283EXPORT_SYMBOL(dma_release_from_dev_coherent);
284
285int dma_release_from_global_coherent(int order, void *vaddr)
286{
287 if (!dma_coherent_default_memory)
288 return 0;
289
290 return __dma_release_from_coherent(dma_coherent_default_memory, order,
291 vaddr);
292}
293
294static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
295 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
296{
281 if (mem && vaddr >= mem->virt_base && vaddr + size <= 297 if (mem && vaddr >= mem->virt_base && vaddr + size <=
282 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 298 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
283 unsigned long off = vma->vm_pgoff; 299 unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
296 } 312 }
297 return 0; 313 return 0;
298} 314}
299EXPORT_SYMBOL(dma_mmap_from_coherent); 315
316/**
317 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
318 * @dev: device from which the memory was allocated
319 * @vma: vm_area for the userspace memory
320 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
321 * @size: size of the memory buffer allocated
322 * @ret: result from remap_pfn_range()
323 *
324 * This checks whether the memory was allocated from the per-device
325 * coherent memory pool and if so, maps that memory to the provided vma.
326 *
327 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
328 * proceed with mapping memory from generic pools.
329 */
330int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
331 void *vaddr, size_t size, int *ret)
332{
333 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
334
335 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
336}
337EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
338
339int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
340 size_t size, int *ret)
341{
342 if (!dma_coherent_default_memory)
343 return 0;
344
345 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
346 vaddr, size, ret);
347}
300 348
301/* 349/*
302 * Support for reserved memory regions defined in device tree 350 * Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
235 235
236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237 237
238 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 238 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
239 return ret; 239 return ret;
240 240
241 if (off < count && user_count <= (count - off)) { 241 if (off < count && user_count <= (count - off)) {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 87a0a29f6e7e..5bdf923294a5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -908,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
908 continue; 908 continue;
909 } 909 }
910 sk_set_memalloc(sock->sk); 910 sk_set_memalloc(sock->sk);
911 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 911 if (nbd->tag_set.timeout)
912 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
912 atomic_inc(&config->recv_threads); 913 atomic_inc(&config->recv_threads);
913 refcount_inc(&nbd->config_refs); 914 refcount_inc(&nbd->config_refs);
914 old = nsock->sock; 915 old = nsock->sock;
@@ -922,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
922 mutex_unlock(&nsock->tx_lock); 923 mutex_unlock(&nsock->tx_lock);
923 sockfd_put(old); 924 sockfd_put(old);
924 925
926 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
927
925 /* We take the tx_mutex in an error path in the recv_work, so we 928 /* We take the tx_mutex in an error path in the recv_work, so we
926 * need to queue_work outside of the tx_mutex. 929 * need to queue_work outside of the tx_mutex.
927 */ 930 */
@@ -978,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
978 int i, ret; 981 int i, ret;
979 982
980 for (i = 0; i < config->num_connections; i++) { 983 for (i = 0; i < config->num_connections; i++) {
984 struct nbd_sock *nsock = config->socks[i];
985
981 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 986 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
987 mutex_lock(&nsock->tx_lock);
982 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 988 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
983 if (ret <= 0) 989 if (ret <= 0)
984 dev_err(disk_to_dev(nbd->disk), 990 dev_err(disk_to_dev(nbd->disk),
985 "Send disconnect failed %d\n", ret); 991 "Send disconnect failed %d\n", ret);
992 mutex_unlock(&nsock->tx_lock);
986 } 993 }
987} 994}
988 995
@@ -991,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
991 struct nbd_config *config = nbd->config; 998 struct nbd_config *config = nbd->config;
992 999
993 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 1000 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
994 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, 1001 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
995 &config->runtime_flags)) 1002 send_disconnects(nbd);
996 send_disconnects(nbd);
997 return 0; 1003 return 0;
998} 1004}
999 1005
@@ -1074,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
1074 return -ENOMEM; 1080 return -ENOMEM;
1075 } 1081 }
1076 sk_set_memalloc(config->socks[i]->sock->sk); 1082 sk_set_memalloc(config->socks[i]->sock->sk);
1077 config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 1083 if (nbd->tag_set.timeout)
1084 config->socks[i]->sock->sk->sk_sndtimeo =
1085 nbd->tag_set.timeout;
1078 atomic_inc(&config->recv_threads); 1086 atomic_inc(&config->recv_threads);
1079 refcount_inc(&nbd->config_refs); 1087 refcount_inc(&nbd->config_refs);
1080 INIT_WORK(&args->work, recv_work); 1088 INIT_WORK(&args->work, recv_work);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4e02aa5fdac0..1498b899a593 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -541,12 +541,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
541 int i; 541 int i;
542 542
543 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); 543 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
544 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) 544 i = sysfs_match_string(virtblk_cache_types, buf);
545 if (sysfs_streq(buf, virtblk_cache_types[i]))
546 break;
547
548 if (i < 0) 545 if (i < 0)
549 return -EINVAL; 546 return i;
550 547
551 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); 548 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
552 virtblk_update_cache_mode(vdev); 549 virtblk_update_cache_mode(vdev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c852ed3c01d5..98e34e4c62b8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -111,7 +111,7 @@ struct blk_shadow {
111}; 111};
112 112
113struct blkif_req { 113struct blkif_req {
114 int error; 114 blk_status_t error;
115}; 115};
116 116
117static inline struct blkif_req *blkif_req(struct request *rq) 117static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
708 * existing persistent grants, or if we have to get new grants, 708 * existing persistent grants, or if we have to get new grants,
709 * as there are not sufficiently many free. 709 * as there are not sufficiently many free.
710 */ 710 */
711 bool new_persistent_gnts = false;
711 struct scatterlist *sg; 712 struct scatterlist *sg;
712 int num_sg, max_grefs, num_grant; 713 int num_sg, max_grefs, num_grant;
713 714
@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
719 */ 720 */
720 max_grefs += INDIRECT_GREFS(max_grefs); 721 max_grefs += INDIRECT_GREFS(max_grefs);
721 722
722 /* 723 /* Check if we have enough persistent grants to allocate a requests */
723 * We have to reserve 'max_grefs' grants because persistent 724 if (rinfo->persistent_gnts_c < max_grefs) {
724 * grants are shared by all rings. 725 new_persistent_gnts = true;
725 */ 726
726 if (max_grefs > 0) 727 if (gnttab_alloc_grant_references(
727 if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) { 728 max_grefs - rinfo->persistent_gnts_c,
729 &setup.gref_head) < 0) {
728 gnttab_request_free_callback( 730 gnttab_request_free_callback(
729 &rinfo->callback, 731 &rinfo->callback,
730 blkif_restart_queue_callback, 732 blkif_restart_queue_callback,
731 rinfo, 733 rinfo,
732 max_grefs); 734 max_grefs - rinfo->persistent_gnts_c);
733 return 1; 735 return 1;
734 } 736 }
737 }
735 738
736 /* Fill out a communications ring structure. */ 739 /* Fill out a communications ring structure. */
737 id = blkif_ring_get_request(rinfo, req, &ring_req); 740 id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
832 if (unlikely(require_extra_req)) 835 if (unlikely(require_extra_req))
833 rinfo->shadow[extra_id].req = *extra_ring_req; 836 rinfo->shadow[extra_id].req = *extra_ring_req;
834 837
835 if (max_grefs > 0) 838 if (new_persistent_gnts)
836 gnttab_free_grant_references(setup.gref_head); 839 gnttab_free_grant_references(setup.gref_head);
837 840
838 return 0; 841 return 0;
@@ -906,8 +909,8 @@ out_err:
906 return BLK_STS_IOERR; 909 return BLK_STS_IOERR;
907 910
908out_busy: 911out_busy:
909 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
910 blk_mq_stop_hw_queue(hctx); 912 blk_mq_stop_hw_queue(hctx);
913 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
911 return BLK_STS_RESOURCE; 914 return BLK_STS_RESOURCE;
912} 915}
913 916
@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1616 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1619 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1617 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1620 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1618 info->gd->disk_name, op_name(bret->operation)); 1621 info->gd->disk_name, op_name(bret->operation));
1619 blkif_req(req)->error = -EOPNOTSUPP; 1622 blkif_req(req)->error = BLK_STS_NOTSUPP;
1620 } 1623 }
1621 if (unlikely(bret->status == BLKIF_RSP_ERROR && 1624 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1622 rinfo->shadow[id].req.u.rw.nr_segments == 0)) { 1625 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 193204dfbf3a..4b75084fabad 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig"
655config CRYPTO_DEV_BCM_SPU 655config CRYPTO_DEV_BCM_SPU
656 tristate "Broadcom symmetric crypto/hash acceleration support" 656 tristate "Broadcom symmetric crypto/hash acceleration support"
657 depends on ARCH_BCM_IPROC 657 depends on ARCH_BCM_IPROC
658 depends on BCM_PDC_MBOX 658 depends on MAILBOX
659 default m 659 default m
660 select CRYPTO_DES 660 select CRYPTO_DES
661 select CRYPTO_MD5 661 select CRYPTO_MD5
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index ef04c9748317..bf7ac621c591 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
302 break; 302 break;
303 case HASH_ALG_SHA3_512: 303 case HASH_ALG_SHA3_512:
304 *spu2_type = SPU2_HASH_TYPE_SHA3_512; 304 *spu2_type = SPU2_HASH_TYPE_SHA3_512;
305 break;
305 case HASH_ALG_LAST: 306 case HASH_ALG_LAST:
306 default: 307 default:
307 err = -EINVAL; 308 err = -EINVAL;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index ae44a464cd2d..9ccefb9b7232 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -18,8 +18,9 @@
18#define SE_GROUP 0 18#define SE_GROUP 0
19 19
20#define DRIVER_VERSION "1.0" 20#define DRIVER_VERSION "1.0"
21#define FW_DIR "cavium/"
21/* SE microcode */ 22/* SE microcode */
22#define SE_FW "cnn55xx_se.fw" 23#define SE_FW FW_DIR "cnn55xx_se.fw"
23 24
24static const char nitrox_driver_name[] = "CNN55XX"; 25static const char nitrox_driver_name[] = "CNN55XX";
25 26
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index e7f87ac12685..1fabd4aee81b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev)
773 struct device *dev = &pdev->dev; 773 struct device *dev = &pdev->dev;
774 struct resource *res; 774 struct resource *res;
775 struct safexcel_crypto_priv *priv; 775 struct safexcel_crypto_priv *priv;
776 u64 dma_mask;
777 int i, ret; 776 int i, ret;
778 777
779 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 778 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev)
802 return -EPROBE_DEFER; 801 return -EPROBE_DEFER;
803 } 802 }
804 803
805 if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask)) 804 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
806 dma_mask = DMA_BIT_MASK(64);
807 ret = dma_set_mask_and_coherent(dev, dma_mask);
808 if (ret) 805 if (ret)
809 goto err_clk; 806 goto err_clk;
810 807
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
278} 278}
279EXPORT_SYMBOL_GPL(dax_write_cache); 279EXPORT_SYMBOL_GPL(dax_write_cache);
280 280
281bool dax_write_cache_enabled(struct dax_device *dax_dev)
282{
283 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
284}
285EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
286
281bool dax_alive(struct dax_device *dax_dev) 287bool dax_alive(struct dax_device *dax_dev)
282{ 288{
283 lockdep_assert_held(&dax_srcu); 289 lockdep_assert_held(&dax_srcu);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f621ee115c98..5e771bc11b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
198 result = idr_find(&fpriv->bo_list_handles, id); 198 result = idr_find(&fpriv->bo_list_handles, id);
199 199
200 if (result) { 200 if (result) {
201 if (kref_get_unless_zero(&result->refcount)) 201 if (kref_get_unless_zero(&result->refcount)) {
202 rcu_read_unlock();
202 mutex_lock(&result->lock); 203 mutex_lock(&result->lock);
203 else 204 } else {
205 rcu_read_unlock();
204 result = NULL; 206 result = NULL;
207 }
208 } else {
209 rcu_read_unlock();
205 } 210 }
206 rcu_read_unlock();
207 211
208 return result; 212 return result;
209} 213}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3a0b69b09ed6..c9b9c88231aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1475 1475
1476static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) 1476static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1477{ 1477{
1478 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1478 u32 data;
1479 1479
1480 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { 1480 if (instance == 0xffffffff)
1481 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1481 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1482 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1482 else
1483 } else if (se_num == 0xffffffff) { 1483 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1484 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1484
1485 if (se_num == 0xffffffff)
1485 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1486 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1486 } else if (sh_num == 0xffffffff) { 1487 else
1487 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1488 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1488 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1489 } else { 1489
1490 if (sh_num == 0xffffffff)
1491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1492 else
1490 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1493 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1494
1492 }
1493 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1495 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1494} 1496}
1495 1497
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d6f097f44b6c..197174e562d2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2128 pp_table->AvfsGbCksOff.m2_shift = 12; 2128 pp_table->AvfsGbCksOff.m2_shift = 12;
2129 pp_table->AvfsGbCksOff.b_shift = 0; 2129 pp_table->AvfsGbCksOff.b_shift = 0;
2130 2130
2131 for (i = 0; i < dep_table->count; i++) { 2131 for (i = 0; i < dep_table->count; i++)
2132 if (dep_table->entries[i].sclk_offset == 0) 2132 pp_table->StaticVoltageOffsetVid[i] =
2133 pp_table->StaticVoltageOffsetVid[i] = 248; 2133 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2134 else
2135 pp_table->StaticVoltageOffsetVid[i] =
2136 (uint8_t)(dep_table->entries[i].sclk_offset *
2137 VOLTAGE_VID_OFFSET_SCALE2 /
2138 VOLTAGE_VID_OFFSET_SCALE1);
2139 }
2140 2134
2141 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2135 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2142 data->disp_clk_quad_eqn_a) && 2136 data->disp_clk_quad_eqn_a) &&
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 213fb837e1c4..08af8d6b844b 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
544 DP_DETAILED_CAP_INFO_AVAILABLE; 544 DP_DETAILED_CAP_INFO_AVAILABLE;
545 int clk; 545 int clk;
546 int bpc; 546 int bpc;
547 char id[6]; 547 char id[7];
548 int len; 548 int len;
549 uint8_t rev[2]; 549 uint8_t rev[2];
550 int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; 550 int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
@@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
583 seq_puts(m, "\t\tType: N/A\n"); 583 seq_puts(m, "\t\tType: N/A\n");
584 } 584 }
585 585
586 memset(id, 0, sizeof(id));
586 drm_dp_downstream_id(aux, id); 587 drm_dp_downstream_id(aux, id);
587 seq_printf(m, "\t\tID: %s\n", id); 588 seq_printf(m, "\t\tID: %s\n", id);
588 589
@@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
591 seq_printf(m, "\t\tHW: %d.%d\n", 592 seq_printf(m, "\t\tHW: %d.%d\n",
592 (rev[0] & 0xf0) >> 4, rev[0] & 0xf); 593 (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
593 594
594 len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); 595 len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
595 if (len > 0) 596 if (len > 0)
596 seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); 597 seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
597 598
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d185347c64c..305dc3d4ff77 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -75,6 +75,7 @@ config DRM_EXYNOS_DP
75config DRM_EXYNOS_HDMI 75config DRM_EXYNOS_HDMI
76 bool "HDMI" 76 bool "HDMI"
77 depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON 77 depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
78 select CEC_CORE if CEC_NOTIFIER
78 help 79 help
79 Choose this option if you want to use Exynos HDMI for DRM. 80 Choose this option if you want to use Exynos HDMI for DRM.
80 81
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 35a8dfc93836..242bd50faa26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
453 struct component_match *match; 453 struct component_match *match;
454 454
455 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 455 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
456 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
457 456
458 match = exynos_drm_match_add(&pdev->dev); 457 match = exynos_drm_match_add(&pdev->dev);
459 if (IS_ERR(match)) 458 if (IS_ERR(match))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a11b79596e2f..b6a46d9a016e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1651 return ret; 1651 return ret;
1652 1652
1653 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); 1653 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
1654 if (!dsi->bridge_node)
1655 return -EINVAL;
1656 1654
1657 return 0; 1655 return 0;
1658} 1656}
@@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1687 return ret; 1685 return ret;
1688 } 1686 }
1689 1687
1690 bridge = of_drm_find_bridge(dsi->bridge_node); 1688 if (dsi->bridge_node) {
1691 if (bridge) 1689 bridge = of_drm_find_bridge(dsi->bridge_node);
1692 drm_bridge_attach(encoder, bridge, NULL); 1690 if (bridge)
1691 drm_bridge_attach(encoder, bridge, NULL);
1692 }
1693 1693
1694 return mipi_dsi_host_register(&dsi->dsi_host); 1694 return mipi_dsi_host_register(&dsi->dsi_host);
1695} 1695}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e45720543a45..16bbee897e0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master,
340 void *data) 340 void *data)
341{ 341{
342 struct exynos_mic *mic = dev_get_drvdata(dev); 342 struct exynos_mic *mic = dev_get_drvdata(dev);
343 int ret;
344 343
345 mic->bridge.funcs = &mic_bridge_funcs;
346 mic->bridge.of_node = dev->of_node;
347 mic->bridge.driver_private = mic; 344 mic->bridge.driver_private = mic;
348 ret = drm_bridge_add(&mic->bridge);
349 if (ret)
350 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
351 345
352 return ret; 346 return 0;
353} 347}
354 348
355static void exynos_mic_unbind(struct device *dev, struct device *master, 349static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
365 359
366already_disabled: 360already_disabled:
367 mutex_unlock(&mic_mutex); 361 mutex_unlock(&mic_mutex);
368
369 drm_bridge_remove(&mic->bridge);
370} 362}
371 363
372static const struct component_ops exynos_mic_component_ops = { 364static const struct component_ops exynos_mic_component_ops = {
@@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev)
461 453
462 platform_set_drvdata(pdev, mic); 454 platform_set_drvdata(pdev, mic);
463 455
456 mic->bridge.funcs = &mic_bridge_funcs;
457 mic->bridge.of_node = dev->of_node;
458
459 ret = drm_bridge_add(&mic->bridge);
460 if (ret) {
461 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
462 return ret;
463 }
464
464 pm_runtime_enable(dev); 465 pm_runtime_enable(dev);
465 466
466 ret = component_add(dev, &exynos_mic_component_ops); 467 ret = component_add(dev, &exynos_mic_component_ops);
@@ -479,8 +480,13 @@ err:
479 480
480static int exynos_mic_remove(struct platform_device *pdev) 481static int exynos_mic_remove(struct platform_device *pdev)
481{ 482{
483 struct exynos_mic *mic = platform_get_drvdata(pdev);
484
482 component_del(&pdev->dev, &exynos_mic_component_ops); 485 component_del(&pdev->dev, &exynos_mic_component_ops);
483 pm_runtime_disable(&pdev->dev); 486 pm_runtime_disable(&pdev->dev);
487
488 drm_bridge_remove(&mic->bridge);
489
484 return 0; 490 return 0;
485} 491}
486 492
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 06bfbe400cf1..d3b69d66736f 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
1501 */ 1501 */
1502 cancel_delayed_work(&hdata->hotplug_work); 1502 cancel_delayed_work(&hdata->hotplug_work);
1503 cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); 1503 cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
1504
1505 hdmiphy_disable(hdata);
1506} 1504}
1507 1505
1508static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { 1506static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1676 return hdmi_bridge_init(hdata); 1674 return hdmi_bridge_init(hdata);
1677} 1675}
1678 1676
1679static struct of_device_id hdmi_match_types[] = { 1677static const struct of_device_id hdmi_match_types[] = {
1680 { 1678 {
1681 .compatible = "samsung,exynos4210-hdmi", 1679 .compatible = "samsung,exynos4210-hdmi",
1682 .data = &exynos4210_hdmi_driver_data, 1680 .data = &exynos4210_hdmi_driver_data,
@@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev)
1934 return 0; 1932 return 0;
1935} 1933}
1936 1934
1937#ifdef CONFIG_PM 1935static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
1938static int exynos_hdmi_suspend(struct device *dev)
1939{ 1936{
1940 struct hdmi_context *hdata = dev_get_drvdata(dev); 1937 struct hdmi_context *hdata = dev_get_drvdata(dev);
1941 1938
@@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev)
1944 return 0; 1941 return 0;
1945} 1942}
1946 1943
1947static int exynos_hdmi_resume(struct device *dev) 1944static int __maybe_unused exynos_hdmi_resume(struct device *dev)
1948{ 1945{
1949 struct hdmi_context *hdata = dev_get_drvdata(dev); 1946 struct hdmi_context *hdata = dev_get_drvdata(dev);
1950 int ret; 1947 int ret;
@@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev)
1955 1952
1956 return 0; 1953 return 0;
1957} 1954}
1958#endif
1959 1955
1960static const struct dev_pm_ops exynos_hdmi_pm_ops = { 1956static const struct dev_pm_ops exynos_hdmi_pm_ops = {
1961 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) 1957 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6bed4f3ffcd6..a998a8dd783c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1094 .atomic_check = mixer_atomic_check, 1094 .atomic_check = mixer_atomic_check,
1095}; 1095};
1096 1096
1097static struct mixer_drv_data exynos5420_mxr_drv_data = { 1097static const struct mixer_drv_data exynos5420_mxr_drv_data = {
1098 .version = MXR_VER_128_0_0_184, 1098 .version = MXR_VER_128_0_0_184,
1099 .is_vp_enabled = 0, 1099 .is_vp_enabled = 0,
1100}; 1100};
1101 1101
1102static struct mixer_drv_data exynos5250_mxr_drv_data = { 1102static const struct mixer_drv_data exynos5250_mxr_drv_data = {
1103 .version = MXR_VER_16_0_33_0, 1103 .version = MXR_VER_16_0_33_0,
1104 .is_vp_enabled = 0, 1104 .is_vp_enabled = 0,
1105}; 1105};
1106 1106
1107static struct mixer_drv_data exynos4212_mxr_drv_data = { 1107static const struct mixer_drv_data exynos4212_mxr_drv_data = {
1108 .version = MXR_VER_0_0_0_16, 1108 .version = MXR_VER_0_0_0_16,
1109 .is_vp_enabled = 1, 1109 .is_vp_enabled = 1,
1110}; 1110};
1111 1111
1112static struct mixer_drv_data exynos4210_mxr_drv_data = { 1112static const struct mixer_drv_data exynos4210_mxr_drv_data = {
1113 .version = MXR_VER_0_0_0_16, 1113 .version = MXR_VER_0_0_0_16,
1114 .is_vp_enabled = 1, 1114 .is_vp_enabled = 1,
1115 .has_sclk = 1, 1115 .has_sclk = 1,
1116}; 1116};
1117 1117
1118static struct of_device_id mixer_match_types[] = { 1118static const struct of_device_id mixer_match_types[] = {
1119 { 1119 {
1120 .compatible = "samsung,exynos4210-mixer", 1120 .compatible = "samsung,exynos4210-mixer",
1121 .data = &exynos4210_mxr_drv_data, 1121 .data = &exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 2deb05f618fb..7cb0818a13de 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
323{ 323{
324 struct intel_gvt_irq *irq = &gvt->irq; 324 struct intel_gvt_irq *irq = &gvt->irq;
325 struct intel_vgpu *vgpu; 325 struct intel_vgpu *vgpu;
326 bool have_enabled_pipe = false;
327 int pipe, id; 326 int pipe, id;
328 327
329 if (WARN_ON(!mutex_is_locked(&gvt->lock))) 328 if (WARN_ON(!mutex_is_locked(&gvt->lock)))
330 return; 329 return;
331 330
332 hrtimer_cancel(&irq->vblank_timer.timer);
333
334 for_each_active_vgpu(gvt, vgpu, id) { 331 for_each_active_vgpu(gvt, vgpu, id) {
335 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { 332 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
336 have_enabled_pipe = 333 if (pipe_is_enabled(vgpu, pipe))
337 pipe_is_enabled(vgpu, pipe); 334 goto out;
338 if (have_enabled_pipe)
339 break;
340 } 335 }
341 } 336 }
342 337
343 if (have_enabled_pipe) 338 /* all the pipes are disabled */
344 hrtimer_start(&irq->vblank_timer.timer, 339 hrtimer_cancel(&irq->vblank_timer.timer);
345 ktime_add_ns(ktime_get(), irq->vblank_timer.period), 340 return;
346 HRTIMER_MODE_ABS); 341
342out:
343 hrtimer_start(&irq->vblank_timer.timer,
344 ktime_add_ns(ktime_get(), irq->vblank_timer.period),
345 HRTIMER_MODE_ABS);
346
347} 347}
348 348
349static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) 349static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 152f16c11878..348b29a845c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
114 return NOTIFY_DONE; 114 return NOTIFY_DONE;
115} 115}
116 116
117void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 117bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
118 unsigned int flags) 118 unsigned int flags)
119{ 119{
120 struct clflush *clflush; 120 struct clflush *clflush;
@@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
128 */ 128 */
129 if (!i915_gem_object_has_struct_page(obj)) { 129 if (!i915_gem_object_has_struct_page(obj)) {
130 obj->cache_dirty = false; 130 obj->cache_dirty = false;
131 return; 131 return false;
132 } 132 }
133 133
134 /* If the GPU is snooping the contents of the CPU cache, 134 /* If the GPU is snooping the contents of the CPU cache,
@@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
140 * tracking. 140 * tracking.
141 */ 141 */
142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) 142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
143 return; 143 return false;
144 144
145 trace_i915_gem_object_clflush(obj); 145 trace_i915_gem_object_clflush(obj);
146 146
@@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
179 } 179 }
180 180
181 obj->cache_dirty = false; 181 obj->cache_dirty = false;
182 return true;
182} 183}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index 2455a7820937..f390247561b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,7 @@
28struct drm_i915_private; 28struct drm_i915_private;
29struct drm_i915_gem_object; 29struct drm_i915_gem_object;
30 30
31void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 31bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
32 unsigned int flags); 32 unsigned int flags);
33#define I915_CLFLUSH_FORCE BIT(0) 33#define I915_CLFLUSH_FORCE BIT(0)
34#define I915_CLFLUSH_SYNC BIT(1) 34#define I915_CLFLUSH_SYNC BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 054b2e54cdaf..e9503f6d1100 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
560 eb->args->flags |= __EXEC_HAS_RELOC; 560 eb->args->flags |= __EXEC_HAS_RELOC;
561 } 561 }
562 562
563 entry->flags |= __EXEC_OBJECT_HAS_PIN;
564 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
565
566 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { 563 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
567 err = i915_vma_get_fence(vma); 564 err = i915_vma_get_fence(vma);
568 if (unlikely(err)) { 565 if (unlikely(err)) {
@@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
574 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 571 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
575 } 572 }
576 573
574 entry->flags |= __EXEC_OBJECT_HAS_PIN;
575 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
576
577 return 0; 577 return 0;
578} 578}
579 579
@@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1458 * to read. However, if the array is not writable the user loses 1458 * to read. However, if the array is not writable the user loses
1459 * the updated relocation values. 1459 * the updated relocation values.
1460 */ 1460 */
1461 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) 1461 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
1462 return -EFAULT; 1462 return -EFAULT;
1463 1463
1464 do { 1464 do {
@@ -1775,7 +1775,7 @@ out:
1775 } 1775 }
1776 } 1776 }
1777 1777
1778 return err ?: have_copy; 1778 return err;
1779} 1779}
1780 1780
1781static int eb_relocate(struct i915_execbuffer *eb) 1781static int eb_relocate(struct i915_execbuffer *eb)
@@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
1825 int err; 1825 int err;
1826 1826
1827 for (i = 0; i < count; i++) { 1827 for (i = 0; i < count; i++) {
1828 const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 1828 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
1829 struct i915_vma *vma = exec_to_vma(entry); 1829 struct i915_vma *vma = exec_to_vma(entry);
1830 struct drm_i915_gem_object *obj = vma->obj; 1830 struct drm_i915_gem_object *obj = vma->obj;
1831 1831
@@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
1841 eb->request->capture_list = capture; 1841 eb->request->capture_list = capture;
1842 } 1842 }
1843 1843
1844 if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
1845 if (i915_gem_clflush_object(obj, 0))
1846 entry->flags &= ~EXEC_OBJECT_ASYNC;
1847 }
1848
1844 if (entry->flags & EXEC_OBJECT_ASYNC) 1849 if (entry->flags & EXEC_OBJECT_ASYNC)
1845 goto skip_flushes; 1850 goto skip_flushes;
1846 1851
1847 if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1848 i915_gem_clflush_object(obj, 0);
1849
1850 err = i915_gem_request_await_object 1852 err = i915_gem_request_await_object
1851 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); 1853 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
1852 if (err) 1854 if (err)
@@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2209 goto err_unlock; 2211 goto err_unlock;
2210 2212
2211 err = eb_relocate(&eb); 2213 err = eb_relocate(&eb);
2212 if (err) 2214 if (err) {
2213 /* 2215 /*
2214 * If the user expects the execobject.offset and 2216 * If the user expects the execobject.offset and
2215 * reloc.presumed_offset to be an exact match, 2217 * reloc.presumed_offset to be an exact match,
@@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2218 * relocation. 2220 * relocation.
2219 */ 2221 */
2220 args->flags &= ~__EXEC_HAS_RELOC; 2222 args->flags &= ~__EXEC_HAS_RELOC;
2221 if (err < 0)
2222 goto err_vma; 2223 goto err_vma;
2224 }
2223 2225
2224 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { 2226 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
2225 DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); 2227 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4a673fc1a432..20cf272c97b1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
284 284
285static inline void __i915_vma_unpin(struct i915_vma *vma) 285static inline void __i915_vma_unpin(struct i915_vma *vma)
286{ 286{
287 GEM_BUG_ON(!i915_vma_is_pinned(vma));
288 vma->flags--; 287 vma->flags--;
289} 288}
290 289
291static inline void i915_vma_unpin(struct i915_vma *vma) 290static inline void i915_vma_unpin(struct i915_vma *vma)
292{ 291{
292 GEM_BUG_ON(!i915_vma_is_pinned(vma));
293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
294 __i915_vma_unpin(vma); 294 __i915_vma_unpin(vma);
295} 295}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 80e96f1f49d2..9edeaaef77ad 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); 1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
1897 val &= ~LOADGEN_SELECT; 1897 val &= ~LOADGEN_SELECT;
1898 1898
1899 if (((rate < 600000) && (width == 4) && (ln >= 1)) || 1899 if ((rate <= 600000 && width == 4 && ln >= 1) ||
1900 ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { 1900 (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
1901 val |= LOADGEN_SELECT; 1901 val |= LOADGEN_SELECT;
1902 } 1902 }
1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); 1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec9e58545a1..9471c88d449e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3428} 3428}
3429 3429
3430static void intel_update_primary_planes(struct drm_device *dev)
3431{
3432 struct drm_crtc *crtc;
3433
3434 for_each_crtc(dev, crtc) {
3435 struct intel_plane *plane = to_intel_plane(crtc->primary);
3436 struct intel_plane_state *plane_state =
3437 to_intel_plane_state(plane->base.state);
3438
3439 if (plane_state->base.visible) {
3440 trace_intel_update_plane(&plane->base,
3441 to_intel_crtc(crtc));
3442
3443 plane->update_plane(plane,
3444 to_intel_crtc_state(crtc->state),
3445 plane_state);
3446 }
3447 }
3448}
3449
3450static int 3430static int
3451__intel_display_resume(struct drm_device *dev, 3431__intel_display_resume(struct drm_device *dev,
3452 struct drm_atomic_state *state, 3432 struct drm_atomic_state *state,
@@ -3499,6 +3479,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3499 struct drm_atomic_state *state; 3479 struct drm_atomic_state *state;
3500 int ret; 3480 int ret;
3501 3481
3482
3483 /* reset doesn't touch the display */
3484 if (!i915.force_reset_modeset_test &&
3485 !gpu_reset_clobbers_display(dev_priv))
3486 return;
3487
3502 /* 3488 /*
3503 * Need mode_config.mutex so that we don't 3489 * Need mode_config.mutex so that we don't
3504 * trample ongoing ->detect() and whatnot. 3490 * trample ongoing ->detect() and whatnot.
@@ -3512,12 +3498,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3512 3498
3513 drm_modeset_backoff(ctx); 3499 drm_modeset_backoff(ctx);
3514 } 3500 }
3515
3516 /* reset doesn't touch the display, but flips might get nuked anyway, */
3517 if (!i915.force_reset_modeset_test &&
3518 !gpu_reset_clobbers_display(dev_priv))
3519 return;
3520
3521 /* 3501 /*
3522 * Disabling the crtcs gracefully seems nicer. Also the 3502 * Disabling the crtcs gracefully seems nicer. Also the
3523 * g33 docs say we should at least disable all the planes. 3503 * g33 docs say we should at least disable all the planes.
@@ -3547,6 +3527,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3547 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3527 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3548 int ret; 3528 int ret;
3549 3529
3530 /* reset doesn't touch the display */
3531 if (!i915.force_reset_modeset_test &&
3532 !gpu_reset_clobbers_display(dev_priv))
3533 return;
3534
3535 if (!state)
3536 goto unlock;
3537
3550 /* 3538 /*
3551 * Flips in the rings will be nuked by the reset, 3539 * Flips in the rings will be nuked by the reset,
3552 * so complete all pending flips so that user space 3540 * so complete all pending flips so that user space
@@ -3558,22 +3546,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3558 3546
3559 /* reset doesn't touch the display */ 3547 /* reset doesn't touch the display */
3560 if (!gpu_reset_clobbers_display(dev_priv)) { 3548 if (!gpu_reset_clobbers_display(dev_priv)) {
3561 if (!state) { 3549 /* for testing only restore the display */
3562 /* 3550 ret = __intel_display_resume(dev, state, ctx);
3563 * Flips in the rings have been nuked by the reset,
3564 * so update the base address of all primary
3565 * planes to the the last fb to make sure we're
3566 * showing the correct fb after a reset.
3567 *
3568 * FIXME: Atomic will make this obsolete since we won't schedule
3569 * CS-based flips (which might get lost in gpu resets) any more.
3570 */
3571 intel_update_primary_planes(dev);
3572 } else {
3573 ret = __intel_display_resume(dev, state, ctx);
3574 if (ret) 3551 if (ret)
3575 DRM_ERROR("Restoring old state failed with %i\n", ret); 3552 DRM_ERROR("Restoring old state failed with %i\n", ret);
3576 }
3577 } else { 3553 } else {
3578 /* 3554 /*
3579 * The display has been reset as well, 3555 * The display has been reset as well,
@@ -3597,8 +3573,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3597 intel_hpd_init(dev_priv); 3573 intel_hpd_init(dev_priv);
3598 } 3574 }
3599 3575
3600 if (state) 3576 drm_atomic_state_put(state);
3601 drm_atomic_state_put(state); 3577unlock:
3602 drm_modeset_drop_locks(ctx); 3578 drm_modeset_drop_locks(ctx);
3603 drm_modeset_acquire_fini(ctx); 3579 drm_modeset_acquire_fini(ctx);
3604 mutex_unlock(&dev->mode_config.mutex); 3580 mutex_unlock(&dev->mode_config.mutex);
@@ -9117,6 +9093,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9117 u64 power_domain_mask; 9093 u64 power_domain_mask;
9118 bool active; 9094 bool active;
9119 9095
9096 if (INTEL_GEN(dev_priv) >= 9) {
9097 intel_crtc_init_scalers(crtc, pipe_config);
9098
9099 pipe_config->scaler_state.scaler_id = -1;
9100 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9101 }
9102
9120 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9103 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9121 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9104 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9122 return false; 9105 return false;
@@ -9145,13 +9128,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9145 pipe_config->gamma_mode = 9128 pipe_config->gamma_mode =
9146 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9129 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9147 9130
9148 if (INTEL_GEN(dev_priv) >= 9) {
9149 intel_crtc_init_scalers(crtc, pipe_config);
9150
9151 pipe_config->scaler_state.scaler_id = -1;
9152 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9153 }
9154
9155 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9131 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9156 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9132 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9157 power_domain_mask |= BIT_ULL(power_domain); 9133 power_domain_mask |= BIT_ULL(power_domain);
@@ -9540,7 +9516,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
9540 * On some platforms writing CURCNTR first will also 9516 * On some platforms writing CURCNTR first will also
9541 * cause CURPOS to be armed by the CURBASE write. 9517 * cause CURPOS to be armed by the CURBASE write.
9542 * Without the CURCNTR write the CURPOS write would 9518 * Without the CURCNTR write the CURPOS write would
9543 * arm itself. 9519 * arm itself. Thus we always start the full update
9520 * with a CURCNTR write.
9521 *
9522 * On other platforms CURPOS always requires the
9523 * CURBASE write to arm the update. Additonally
9524 * a write to any of the cursor register will cancel
9525 * an already armed cursor update. Thus leaving out
9526 * the CURBASE write after CURPOS could lead to a
9527 * cursor that doesn't appear to move, or even change
9528 * shape. Thus we always write CURBASE.
9544 * 9529 *
9545 * CURCNTR and CUR_FBC_CTL are always 9530 * CURCNTR and CUR_FBC_CTL are always
9546 * armed by the CURBASE write only. 9531 * armed by the CURBASE write only.
@@ -9559,6 +9544,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
9559 plane->cursor.cntl = cntl; 9544 plane->cursor.cntl = cntl;
9560 } else { 9545 } else {
9561 I915_WRITE_FW(CURPOS(pipe), pos); 9546 I915_WRITE_FW(CURPOS(pipe), pos);
9547 I915_WRITE_FW(CURBASE(pipe), base);
9562 } 9548 }
9563 9549
9564 POSTING_READ_FW(CURBASE(pipe)); 9550 POSTING_READ_FW(CURBASE(pipe));
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
45 return true; 45 return true;
46 if (IS_SKYLAKE(dev_priv)) 46 if (IS_SKYLAKE(dev_priv))
47 return true; 47 return true;
48 if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) 48 if (IS_KABYLAKE(dev_priv))
49 return true; 49 return true;
50 return false; 50 return false;
51} 51}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..40b224b44d1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && 4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
4464 (plane_bytes_per_line / 512 < 1)) 4464 (plane_bytes_per_line / 512 < 1))
4465 selected_result = method2; 4465 selected_result = method2;
4466 else if ((ddb_allocation && ddb_allocation / 4466 else if (ddb_allocation >=
4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) 4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line))
4468 selected_result = min_fixed_16_16(method1, method2); 4468 selected_result = min_fixed_16_16(method1, method2);
4469 else if (latency >= linetime_us) 4469 else if (latency >= linetime_us)
4470 selected_result = min_fixed_16_16(method1, method2); 4470 selected_result = min_fixed_16_16(method1, method2);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..8cdec455cf7d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void)
206 mkwrite_device_info(i915)->ring_mask = BIT(0); 206 mkwrite_device_info(i915)->ring_mask = BIT(0);
207 i915->engine[RCS] = mock_engine(i915, "mock"); 207 i915->engine[RCS] = mock_engine(i915, "mock");
208 if (!i915->engine[RCS]) 208 if (!i915->engine[RCS])
209 goto err_dependencies; 209 goto err_priorities;
210 210
211 i915->kernel_context = mock_context(i915, NULL); 211 i915->kernel_context = mock_context(i915, NULL);
212 if (!i915->kernel_context) 212 if (!i915->kernel_context)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 147b22163f9f..dab78c660dd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
1158 return -ENODEV; 1158 return -ENODEV;
1159 if (WARN_ON(msg->size > 16)) 1159 if (WARN_ON(msg->size > 16))
1160 return -E2BIG; 1160 return -E2BIG;
1161 if (msg->size == 0)
1162 return msg->size;
1163 1161
1164 ret = nvkm_i2c_aux_acquire(aux); 1162 ret = nvkm_i2c_aux_acquire(aux);
1165 if (ret) 1163 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8d1df5678eaa..f362c9fa8b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
409 struct nouveau_display *disp = nouveau_display(dev); 409 struct nouveau_display *disp = nouveau_display(dev);
410 struct nouveau_drm *drm = nouveau_drm(dev); 410 struct nouveau_drm *drm = nouveau_drm(dev);
411 struct drm_connector *connector; 411 struct drm_connector *connector;
412 struct drm_crtc *crtc;
413 412
414 if (!suspend) { 413 if (!suspend) {
415 if (drm_drv_uses_atomic_modeset(dev)) 414 if (drm_drv_uses_atomic_modeset(dev))
@@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
418 drm_crtc_force_disable_all(dev); 417 drm_crtc_force_disable_all(dev);
419 } 418 }
420 419
421 /* Make sure that drm and hw vblank irqs get properly disabled. */
422 drm_for_each_crtc(crtc, dev)
423 drm_crtc_vblank_off(crtc);
424
425 /* disable flip completion events */ 420 /* disable flip completion events */
426 nvif_notify_put(&drm->flip); 421 nvif_notify_put(&drm->flip);
427 422
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e3132a2ce34d..2bc0dc985214 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3674 drm_mode_connector_attach_encoder(connector, encoder); 3674 drm_mode_connector_attach_encoder(connector, encoder);
3675 3675
3676 if (dcbe->type == DCB_OUTPUT_DP) { 3676 if (dcbe->type == DCB_OUTPUT_DP) {
3677 struct nv50_disp *disp = nv50_disp(encoder->dev);
3677 struct nvkm_i2c_aux *aux = 3678 struct nvkm_i2c_aux *aux =
3678 nvkm_i2c_aux_find(i2c, dcbe->i2c_index); 3679 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3679 if (aux) { 3680 if (aux) {
3680 nv_encoder->i2c = &nv_connector->aux.ddc; 3681 if (disp->disp->oclass < GF110_DISP) {
3682 /* HW has no support for address-only
3683 * transactions, so we're required to
3684 * use custom I2C-over-AUX code.
3685 */
3686 nv_encoder->i2c = &aux->i2c;
3687 } else {
3688 nv_encoder->i2c = &nv_connector->aux.ddc;
3689 }
3681 nv_encoder->aux = aux; 3690 nv_encoder->aux = aux;
3682 } 3691 }
3683 3692
3684 /*TODO: Use DP Info Table to check for support. */ 3693 /*TODO: Use DP Info Table to check for support. */
3685 if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { 3694 if (disp->disp->oclass >= GF110_DISP) {
3686 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, 3695 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3687 nv_connector->base.base.id, 3696 nv_connector->base.base.id,
3688 &nv_encoder->dp.mstm); 3697 &nv_encoder->dp.mstm);
@@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3931 3940
3932 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, 3941 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3933 asyh->clr.mask, asyh->set.mask); 3942 asyh->clr.mask, asyh->set.mask);
3943 if (crtc_state->active && !asyh->state.active)
3944 drm_crtc_vblank_off(crtc);
3934 3945
3935 if (asyh->clr.mask) { 3946 if (asyh->clr.mask) {
3936 nv50_head_flush_clr(head, asyh, atom->flush_disable); 3947 nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4016 nv50_head_flush_set(head, asyh); 4027 nv50_head_flush_set(head, asyh);
4017 interlock_core = 1; 4028 interlock_core = 1;
4018 } 4029 }
4019 }
4020 4030
4021 for_each_crtc_in_state(state, crtc, crtc_state, i) { 4031 if (asyh->state.active) {
4022 if (crtc->state->event) 4032 if (!crtc_state->active)
4023 drm_crtc_vblank_get(crtc); 4033 drm_crtc_vblank_on(crtc);
4034 if (asyh->state.event)
4035 drm_crtc_vblank_get(crtc);
4036 }
4024 } 4037 }
4025 4038
4026 /* Update plane(s). */ 4039 /* Update plane(s). */
@@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4067 if (crtc->state->event) { 4080 if (crtc->state->event) {
4068 unsigned long flags; 4081 unsigned long flags;
4069 /* Get correct count/ts if racing with vblank irq */ 4082 /* Get correct count/ts if racing with vblank irq */
4070 drm_accurate_vblank_count(crtc); 4083 if (crtc->state->active)
4084 drm_accurate_vblank_count(crtc);
4071 spin_lock_irqsave(&crtc->dev->event_lock, flags); 4085 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4072 drm_crtc_send_vblank_event(crtc, crtc->state->event); 4086 drm_crtc_send_vblank_event(crtc, crtc->state->event);
4073 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4087 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4074 crtc->state->event = NULL; 4088 crtc->state->event = NULL;
4075 drm_crtc_vblank_put(crtc); 4089 if (crtc->state->active)
4090 drm_crtc_vblank_put(crtc);
4076 } 4091 }
4077 } 4092 }
4078 4093
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a24312fb0228..a1e8bf48b778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -22,6 +22,7 @@ struct nvkm_ior {
22 unsigned proto_evo:4; 22 unsigned proto_evo:4;
23 enum nvkm_ior_proto { 23 enum nvkm_ior_proto {
24 CRT, 24 CRT,
25 TV,
25 TMDS, 26 TMDS,
26 LVDS, 27 LVDS,
27 DP, 28 DP,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 19c635663399..6ea19466f436 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -22,7 +22,7 @@ struct nv50_disp {
22 u8 type[3]; 22 u8 type[3];
23 } pior; 23 } pior;
24 24
25 struct nv50_disp_chan *chan[17]; 25 struct nv50_disp_chan *chan[21];
26}; 26};
27 27
28void nv50_disp_super_1(struct nv50_disp *); 28void nv50_disp_super_1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 85aff85394ac..be9e7f8c3b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
62 case 0: 62 case 0:
63 switch (outp->info.type) { 63 switch (outp->info.type) {
64 case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; 64 case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
65 case DCB_OUTPUT_TV : *type = DAC; return TV;
65 case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; 66 case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
66 case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; 67 case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
67 case DCB_OUTPUT_DP : *type = SOR; return DP; 68 case DCB_OUTPUT_DP : *type = SOR; return DP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..6d8f21290aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
129 129
130 if (bar->bar[0].mem) { 130 if (bar->bar[0].mem) {
131 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; 131 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
132 nvkm_wr32(device, 0x001714, 0xc0000000 | addr); 132 nvkm_wr32(device, 0x001714, 0x80000000 | addr);
133 } 133 }
134 134
135 return 0; 135 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 48f01e40b8fc..b768e66a472b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
25 25
26nvkm-y += nvkm/subdev/i2c/aux.o 26nvkm-y += nvkm/subdev/i2c/aux.o
27nvkm-y += nvkm/subdev/i2c/auxg94.o 27nvkm-y += nvkm/subdev/i2c/auxg94.o
28nvkm-y += nvkm/subdev/i2c/auxgf119.o
28nvkm-y += nvkm/subdev/i2c/auxgm200.o 29nvkm-y += nvkm/subdev/i2c/auxgm200.o
29 30
30nvkm-y += nvkm/subdev/i2c/anx9805.o 31nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index d172e42dd228..4c1f547da463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -117,6 +117,10 @@ int
117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, 117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
118 u32 addr, u8 *data, u8 *size) 118 u32 addr, u8 *data, u8 *size)
119{ 119{
120 if (!*size && !aux->func->address_only) {
121 AUX_ERR(aux, "address-only transaction dropped");
122 return -ENOSYS;
123 }
120 return aux->func->xfer(aux, retry, type, addr, data, size); 124 return aux->func->xfer(aux, retry, type, addr, data, size);
121} 125}
122 126
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 27a4a39c87f0..9587ab456d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,7 @@
3#include "pad.h" 3#include "pad.h"
4 4
5struct nvkm_i2c_aux_func { 5struct nvkm_i2c_aux_func {
6 bool address_only;
6 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, 7 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
7 u32 addr, u8 *data, u8 *size); 8 u32 addr, u8 *data, u8 *size);
8 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, 9 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
17int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, 18int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
18 u32 addr, u8 *data, u8 *size); 19 u32 addr, u8 *data, u8 *size);
19 20
21int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
22 int, u8, struct nvkm_i2c_aux **);
23
20int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 24int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
25int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
26int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
21int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 27int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
22 28
23#define AUX_MSG(b,l,f,a...) do { \ 29#define AUX_MSG(b,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index ab8cb196c34e..c8ab1b5741a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
72 return 0; 72 return 0;
73} 73}
74 74
75static int 75int
76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, 76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
77 u8 type, u32 addr, u8 *data, u8 *size) 77 u8 type, u32 addr, u8 *data, u8 *size)
78{ 78{
@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
105 } 105 }
106 106
107 ctrl = nvkm_rd32(device, 0x00e4e4 + base); 107 ctrl = nvkm_rd32(device, 0x00e4e4 + base);
108 ctrl &= ~0x0001f0ff; 108 ctrl &= ~0x0001f1ff;
109 ctrl |= type << 12; 109 ctrl |= type << 12;
110 ctrl |= *size - 1; 110 ctrl |= (*size ? (*size - 1) : 0x00000100);
111 nvkm_wr32(device, 0x00e4e0 + base, addr); 111 nvkm_wr32(device, 0x00e4e0 + base, addr);
112 112
113 /* (maybe) retry transaction a number of times on failure... */ 113 /* (maybe) retry transaction a number of times on failure... */
@@ -160,14 +160,10 @@ out:
160 return ret < 0 ? ret : (stat & 0x000f0000) >> 16; 160 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
161} 161}
162 162
163static const struct nvkm_i2c_aux_func
164g94_i2c_aux_func = {
165 .xfer = g94_i2c_aux_xfer,
166};
167
168int 163int
169g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, 164g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
170 struct nvkm_i2c_aux **paux) 165 struct nvkm_i2c_pad *pad, int index, u8 drive,
166 struct nvkm_i2c_aux **paux)
171{ 167{
172 struct g94_i2c_aux *aux; 168 struct g94_i2c_aux *aux;
173 169
@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
175 return -ENOMEM; 171 return -ENOMEM;
176 *paux = &aux->base; 172 *paux = &aux->base;
177 173
178 nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); 174 nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
179 aux->ch = drive; 175 aux->ch = drive;
180 aux->base.intr = 1 << aux->ch; 176 aux->base.intr = 1 << aux->ch;
181 return 0; 177 return 0;
182} 178}
179
180static const struct nvkm_i2c_aux_func
181g94_i2c_aux = {
182 .xfer = g94_i2c_aux_xfer,
183};
184
185int
186g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
187 struct nvkm_i2c_aux **paux)
188{
189 return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
190}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
new file mode 100644
index 000000000000..dab40cd8fe3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "aux.h"
23
24static const struct nvkm_i2c_aux_func
25gf119_i2c_aux = {
26 .address_only = true,
27 .xfer = g94_i2c_aux_xfer,
28};
29
30int
31gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
32 struct nvkm_i2c_aux **paux)
33{
34 return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
35}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ee091fa79628..7ef60895f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
105 } 105 }
106 106
107 ctrl = nvkm_rd32(device, 0x00d954 + base); 107 ctrl = nvkm_rd32(device, 0x00d954 + base);
108 ctrl &= ~0x0001f0ff; 108 ctrl &= ~0x0001f1ff;
109 ctrl |= type << 12; 109 ctrl |= type << 12;
110 ctrl |= *size - 1; 110 ctrl |= (*size ? (*size - 1) : 0x00000100);
111 nvkm_wr32(device, 0x00d950 + base, addr); 111 nvkm_wr32(device, 0x00d950 + base, addr);
112 112
113 /* (maybe) retry transaction a number of times on failure... */ 113 /* (maybe) retry transaction a number of times on failure... */
@@ -162,6 +162,7 @@ out:
162 162
163static const struct nvkm_i2c_aux_func 163static const struct nvkm_i2c_aux_func
164gm200_i2c_aux_func = { 164gm200_i2c_aux_func = {
165 .address_only = true,
165 .xfer = gm200_i2c_aux_xfer, 166 .xfer = gm200_i2c_aux_xfer,
166}; 167};
167 168
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index d53212f1aa52..3bc4d0310076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -28,7 +28,7 @@
28static const struct nvkm_i2c_pad_func 28static const struct nvkm_i2c_pad_func
29gf119_i2c_pad_s_func = { 29gf119_i2c_pad_s_func = {
30 .bus_new_4 = gf119_i2c_bus_new, 30 .bus_new_4 = gf119_i2c_bus_new,
31 .aux_new_6 = g94_i2c_aux_new, 31 .aux_new_6 = gf119_i2c_aux_new,
32 .mode = g94_i2c_pad_mode, 32 .mode = g94_i2c_pad_mode,
33}; 33};
34 34
@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
41static const struct nvkm_i2c_pad_func 41static const struct nvkm_i2c_pad_func
42gf119_i2c_pad_x_func = { 42gf119_i2c_pad_x_func = {
43 .bus_new_4 = gf119_i2c_bus_new, 43 .bus_new_4 = gf119_i2c_bus_new,
44 .aux_new_6 = g94_i2c_aux_new, 44 .aux_new_6 = gf119_i2c_aux_new,
45}; 45};
46 46
47int 47int
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 50c41c0a50ef..dcc539ba85d6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -5,6 +5,10 @@ config DRM_ROCKCHIP
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_PANEL 6 select DRM_PANEL
7 select VIDEOMODE_HELPERS 7 select VIDEOMODE_HELPERS
8 select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
9 select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
10 select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
11 select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
8 help 12 help
9 Choose this option if you have a Rockchip soc chipset. 13 Choose this option if you have a Rockchip soc chipset.
10 This driver provides kernel mode setting and buffer 14 This driver provides kernel mode setting and buffer
@@ -12,10 +16,10 @@ config DRM_ROCKCHIP
12 2D or 3D acceleration; acceleration is performed by other 16 2D or 3D acceleration; acceleration is performed by other
13 IP found on the SoC. 17 IP found on the SoC.
14 18
19if DRM_ROCKCHIP
20
15config ROCKCHIP_ANALOGIX_DP 21config ROCKCHIP_ANALOGIX_DP
16 bool "Rockchip specific extensions for Analogix DP driver" 22 bool "Rockchip specific extensions for Analogix DP driver"
17 depends on DRM_ROCKCHIP
18 select DRM_ANALOGIX_DP
19 help 23 help
20 This selects support for Rockchip SoC specific extensions 24 This selects support for Rockchip SoC specific extensions
21 for the Analogix Core DP driver. If you want to enable DP 25 for the Analogix Core DP driver. If you want to enable DP
@@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
23 27
24config ROCKCHIP_CDN_DP 28config ROCKCHIP_CDN_DP
25 bool "Rockchip cdn DP" 29 bool "Rockchip cdn DP"
26 depends on DRM_ROCKCHIP 30 depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
27 depends on EXTCON
28 select SND_SOC_HDMI_CODEC if SND_SOC
29 help 31 help
30 This selects support for Rockchip SoC specific extensions 32 This selects support for Rockchip SoC specific extensions
31 for the cdn DP driver. If you want to enable Dp on 33 for the cdn DP driver. If you want to enable Dp on
@@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP
34 36
35config ROCKCHIP_DW_HDMI 37config ROCKCHIP_DW_HDMI
36 bool "Rockchip specific extensions for Synopsys DW HDMI" 38 bool "Rockchip specific extensions for Synopsys DW HDMI"
37 depends on DRM_ROCKCHIP
38 select DRM_DW_HDMI
39 help 39 help
40 This selects support for Rockchip SoC specific extensions 40 This selects support for Rockchip SoC specific extensions
41 for the Synopsys DesignWare HDMI driver. If you want to 41 for the Synopsys DesignWare HDMI driver. If you want to
@@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI
44 44
45config ROCKCHIP_DW_MIPI_DSI 45config ROCKCHIP_DW_MIPI_DSI
46 bool "Rockchip specific extensions for Synopsys DW MIPI DSI" 46 bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
47 depends on DRM_ROCKCHIP
48 select DRM_MIPI_DSI
49 help 47 help
50 This selects support for Rockchip SoC specific extensions 48 This selects support for Rockchip SoC specific extensions
51 for the Synopsys DesignWare HDMI driver. If you want to 49 for the Synopsys DesignWare HDMI driver. If you want to
@@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI
54 52
55config ROCKCHIP_INNO_HDMI 53config ROCKCHIP_INNO_HDMI
56 bool "Rockchip specific extensions for Innosilicon HDMI" 54 bool "Rockchip specific extensions for Innosilicon HDMI"
57 depends on DRM_ROCKCHIP
58 help 55 help
59 This selects support for Rockchip SoC specific extensions 56 This selects support for Rockchip SoC specific extensions
60 for the Innosilicon HDMI driver. If you want to enable 57 for the Innosilicon HDMI driver. If you want to enable
61 HDMI on RK3036 based SoC, you should select this option. 58 HDMI on RK3036 based SoC, you should select this option.
59
60endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 35bf781e418e..c7056322211c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,49 +30,49 @@
30#include <drm/ttm/ttm_placement.h> 30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h> 31#include <drm/ttm/ttm_page_alloc.h>
32 32
33static struct ttm_place vram_placement_flags = { 33static const struct ttm_place vram_placement_flags = {
34 .fpfn = 0, 34 .fpfn = 0,
35 .lpfn = 0, 35 .lpfn = 0,
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED 36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37}; 37};
38 38
39static struct ttm_place vram_ne_placement_flags = { 39static const struct ttm_place vram_ne_placement_flags = {
40 .fpfn = 0, 40 .fpfn = 0,
41 .lpfn = 0, 41 .lpfn = 0,
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43}; 43};
44 44
45static struct ttm_place sys_placement_flags = { 45static const struct ttm_place sys_placement_flags = {
46 .fpfn = 0, 46 .fpfn = 0,
47 .lpfn = 0, 47 .lpfn = 0,
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED 48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49}; 49};
50 50
51static struct ttm_place sys_ne_placement_flags = { 51static const struct ttm_place sys_ne_placement_flags = {
52 .fpfn = 0, 52 .fpfn = 0,
53 .lpfn = 0, 53 .lpfn = 0,
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55}; 55};
56 56
57static struct ttm_place gmr_placement_flags = { 57static const struct ttm_place gmr_placement_flags = {
58 .fpfn = 0, 58 .fpfn = 0,
59 .lpfn = 0, 59 .lpfn = 0,
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61}; 61};
62 62
63static struct ttm_place gmr_ne_placement_flags = { 63static const struct ttm_place gmr_ne_placement_flags = {
64 .fpfn = 0, 64 .fpfn = 0,
65 .lpfn = 0, 65 .lpfn = 0,
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67}; 67};
68 68
69static struct ttm_place mob_placement_flags = { 69static const struct ttm_place mob_placement_flags = {
70 .fpfn = 0, 70 .fpfn = 0,
71 .lpfn = 0, 71 .lpfn = 0,
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73}; 73};
74 74
75static struct ttm_place mob_ne_placement_flags = { 75static const struct ttm_place mob_ne_placement_flags = {
76 .fpfn = 0, 76 .fpfn = 0,
77 .lpfn = 0, 77 .lpfn = 0,
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
@@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
85 .busy_placement = &vram_placement_flags 85 .busy_placement = &vram_placement_flags
86}; 86};
87 87
88static struct ttm_place vram_gmr_placement_flags[] = { 88static const struct ttm_place vram_gmr_placement_flags[] = {
89 { 89 {
90 .fpfn = 0, 90 .fpfn = 0,
91 .lpfn = 0, 91 .lpfn = 0,
@@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
97 } 97 }
98}; 98};
99 99
100static struct ttm_place gmr_vram_placement_flags[] = { 100static const struct ttm_place gmr_vram_placement_flags[] = {
101 { 101 {
102 .fpfn = 0, 102 .fpfn = 0,
103 .lpfn = 0, 103 .lpfn = 0,
@@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
116 .busy_placement = &gmr_placement_flags 116 .busy_placement = &gmr_placement_flags
117}; 117};
118 118
119static struct ttm_place vram_gmr_ne_placement_flags[] = { 119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120 { 120 {
121 .fpfn = 0, 121 .fpfn = 0,
122 .lpfn = 0, 122 .lpfn = 0,
@@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
165 .busy_placement = &sys_ne_placement_flags 165 .busy_placement = &sys_ne_placement_flags
166}; 166};
167 167
168static struct ttm_place evictable_placement_flags[] = { 168static const struct ttm_place evictable_placement_flags[] = {
169 { 169 {
170 .fpfn = 0, 170 .fpfn = 0,
171 .lpfn = 0, 171 .lpfn = 0,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 99a7f4ab7d97..86178796de6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
779 if (ret) 779 if (ret)
780 return ret; 780 return ret;
781 781
782 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, 782 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
783 &header->handle); 783 &header->handle);
784 if (!header->cb_header) { 784 if (!header->cb_header) {
785 ret = -ENOMEM; 785 ret = -ENOMEM;
786 goto out_no_cb_header; 786 goto out_no_cb_header;
@@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
790 cb_hdr = header->cb_header; 790 cb_hdr = header->cb_header;
791 offset = header->node.start << PAGE_SHIFT; 791 offset = header->node.start << PAGE_SHIFT;
792 header->cmd = man->map + offset; 792 header->cmd = man->map + offset;
793 memset(cb_hdr, 0, sizeof(*cb_hdr));
794 if (man->using_mob) { 793 if (man->using_mob) {
795 cb_hdr->flags = SVGA_CB_FLAG_MOB; 794 cb_hdr->flags = SVGA_CB_FLAG_MOB;
796 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 795 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
@@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
827 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 826 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
828 return -ENOMEM; 827 return -ENOMEM;
829 828
830 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, 829 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
831 &header->handle); 830 &header->handle);
832 if (!dheader) 831 if (!dheader)
833 return -ENOMEM; 832 return -ENOMEM;
834 833
@@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
837 cb_hdr = &dheader->cb_header; 836 cb_hdr = &dheader->cb_header;
838 header->cb_header = cb_hdr; 837 header->cb_header = cb_hdr;
839 header->cmd = dheader->cmd; 838 header->cmd = dheader->cmd;
840 memset(dheader, 0, sizeof(*dheader));
841 cb_hdr->status = SVGA_CB_STATUS_NONE; 839 cb_hdr->status = SVGA_CB_STATUS_NONE;
842 cb_hdr->flags = SVGA_CB_FLAG_NONE; 840 cb_hdr->flags = SVGA_CB_FLAG_NONE;
843 cb_hdr->ptr.pa = (u64)header->handle + 841 cb_hdr->ptr.pa = (u64)header->handle +
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..36c7b6c839c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
205 int ret; 205 int ret;
206 206
207 cres = kzalloc(sizeof(*cres), GFP_KERNEL); 207 cres = kzalloc(sizeof(*cres), GFP_KERNEL);
208 if (unlikely(cres == NULL)) 208 if (unlikely(!cres))
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 cres->hash.key = user_key | (res_type << 24); 211 cres->hash.key = user_key | (res_type << 24);
@@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
291 int ret; 291 int ret;
292 292
293 man = kzalloc(sizeof(*man), GFP_KERNEL); 293 man = kzalloc(sizeof(*man), GFP_KERNEL);
294 if (man == NULL) 294 if (!man)
295 return ERR_PTR(-ENOMEM); 295 return ERR_PTR(-ENOMEM);
296 296
297 man->dev_priv = dev_priv; 297 man->dev_priv = dev_priv;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index bcc6d4136c87..4212b3e673bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
210 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 210 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
211 uctx->cotables[i] = vmw_cotable_alloc(dev_priv, 211 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
212 &uctx->res, i); 212 &uctx->res, i);
213 if (unlikely(uctx->cotables[i] == NULL)) { 213 if (unlikely(IS_ERR(uctx->cotables[i]))) {
214 ret = -ENOMEM; 214 ret = PTR_ERR(uctx->cotables[i]);
215 goto out_cotables; 215 goto out_cotables;
216 } 216 }
217 } 217 }
@@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
777 } 777 }
778 778
779 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 779 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
780 if (unlikely(ctx == NULL)) { 780 if (unlikely(!ctx)) {
781 ttm_mem_global_free(vmw_mem_glob(dev_priv), 781 ttm_mem_global_free(vmw_mem_glob(dev_priv),
782 vmw_user_context_size); 782 vmw_user_context_size);
783 ret = -ENOMEM; 783 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 6c026d75c180..d87861bbe971 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
584 return ERR_PTR(ret); 584 return ERR_PTR(ret);
585 585
586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); 586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
587 if (unlikely(vcotbl == NULL)) { 587 if (unlikely(!vcotbl)) {
588 ret = -ENOMEM; 588 ret = -ENOMEM;
589 goto out_no_alloc; 589 goto out_no_alloc;
590 } 590 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a641555b960..4436d53ae16c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
227 DRM_AUTH | DRM_RENDER_ALLOW), 227 DRM_AUTH | DRM_RENDER_ALLOW),
228}; 228};
229 229
230static struct pci_device_id vmw_pci_id_list[] = { 230static const struct pci_device_id vmw_pci_id_list[] = {
231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
232 {0, 0, 0} 232 {0, 0, 0}
233}; 233};
@@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
630 char host_log[100] = {0}; 630 char host_log[100] = {0};
631 631
632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
633 if (unlikely(dev_priv == NULL)) { 633 if (unlikely(!dev_priv)) {
634 DRM_ERROR("Failed allocating a device private struct.\n"); 634 DRM_ERROR("Failed allocating a device private struct.\n");
635 return -ENOMEM; 635 return -ENOMEM;
636 } 636 }
@@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1035 int ret = -ENOMEM; 1035 int ret = -ENOMEM;
1036 1036
1037 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1037 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1038 if (unlikely(vmw_fp == NULL)) 1038 if (unlikely(!vmw_fp))
1039 return ret; 1039 return ret;
1040 1040
1041 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1041 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
@@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev,
1196 struct vmw_master *vmaster; 1196 struct vmw_master *vmaster;
1197 1197
1198 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 1198 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1199 if (unlikely(vmaster == NULL)) 1199 if (unlikely(!vmaster))
1200 return -ENOMEM; 1200 return -ENOMEM;
1201 1201
1202 vmw_master_init(vmaster); 1202 vmw_master_init(vmaster);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c7b53d987f06..2cfb3c93f42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
264 } 264 }
265 265
266 node = kzalloc(sizeof(*node), GFP_KERNEL); 266 node = kzalloc(sizeof(*node), GFP_KERNEL);
267 if (unlikely(node == NULL)) { 267 if (unlikely(!node)) {
268 DRM_ERROR("Failed to allocate a resource validation " 268 DRM_ERROR("Failed to allocate a resource validation "
269 "entry.\n"); 269 "entry.\n");
270 return -ENOMEM; 270 return -ENOMEM;
@@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
452 struct vmw_resource_relocation *rel; 452 struct vmw_resource_relocation *rel;
453 453
454 rel = kmalloc(sizeof(*rel), GFP_KERNEL); 454 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
455 if (unlikely(rel == NULL)) { 455 if (unlikely(!rel)) {
456 DRM_ERROR("Failed to allocate a resource relocation.\n"); 456 DRM_ERROR("Failed to allocate a resource relocation.\n");
457 return -ENOMEM; 457 return -ENOMEM;
458 } 458 }
@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
519 struct vmw_sw_context *sw_context, 519 struct vmw_sw_context *sw_context,
520 SVGA3dCmdHeader *header) 520 SVGA3dCmdHeader *header)
521{ 521{
522 return capable(CAP_SYS_ADMIN) ? : -EINVAL; 522 return -EINVAL;
523} 523}
524 524
525static int vmw_cmd_ok(struct vmw_private *dev_priv, 525static int vmw_cmd_ok(struct vmw_private *dev_priv,
@@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2584 2584
2585/** 2585/**
2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an 2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2587 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. 2587 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2588 * 2588 *
2589 * @dev_priv: Pointer to a device private struct. 2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch. 2590 * @sw_context: The software context being used for this batch.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6b2708b4eafe..b8bc5bc7de7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
284{ 284{
285 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); 285 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
286 286
287 if (unlikely(fman == NULL)) 287 if (unlikely(!fman))
288 return NULL; 288 return NULL;
289 289
290 fman->dev_priv = dev_priv; 290 fman->dev_priv = dev_priv;
@@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
541 int ret; 541 int ret;
542 542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL)) 544 if (unlikely(!fence))
545 return -ENOMEM; 545 return -ENOMEM;
546 546
547 ret = vmw_fence_obj_init(fman, fence, seqno, 547 ret = vmw_fence_obj_init(fman, fence, seqno,
@@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
606 return ret; 606 return ret;
607 607
608 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); 608 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
609 if (unlikely(ufence == NULL)) { 609 if (unlikely(!ufence)) {
610 ret = -ENOMEM; 610 ret = -ENOMEM;
611 goto out_no_object; 611 goto out_no_object;
612 } 612 }
@@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
966 struct vmw_fence_manager *fman = fman_from_fence(fence); 966 struct vmw_fence_manager *fman = fman_from_fence(fence);
967 967
968 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); 968 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
969 if (unlikely(eaction == NULL)) 969 if (unlikely(!eaction))
970 return -ENOMEM; 970 return -ENOMEM;
971 971
972 eaction->event = event; 972 eaction->event = event;
@@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
1002 int ret; 1002 int ret;
1003 1003
1004 event = kzalloc(sizeof(*event), GFP_KERNEL); 1004 event = kzalloc(sizeof(*event), GFP_KERNEL);
1005 if (unlikely(event == NULL)) { 1005 if (unlikely(!event)) {
1006 DRM_ERROR("Failed to allocate an event.\n"); 1006 DRM_ERROR("Failed to allocate an event.\n");
1007 ret = -ENOMEM; 1007 ret = -ENOMEM;
1008 goto out_no_space; 1008 goto out_no_space;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c1900f4390a4..d2b03d4a3c86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
121 struct vmwgfx_gmrid_man *gman = 121 struct vmwgfx_gmrid_man *gman =
122 kzalloc(sizeof(*gman), GFP_KERNEL); 122 kzalloc(sizeof(*gman), GFP_KERNEL);
123 123
124 if (unlikely(gman == NULL)) 124 if (unlikely(!gman))
125 return -ENOMEM; 125 return -ENOMEM;
126 126
127 spin_lock_init(&gman->lock); 127 spin_lock_init(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3d94ea67a825..61e06f0e8cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
384 384
385 hotspot_x = du->hotspot_x; 385 hotspot_x = du->hotspot_x;
386 hotspot_y = du->hotspot_y; 386 hotspot_y = du->hotspot_y;
387
388 if (plane->fb) {
389 hotspot_x += plane->fb->hot_x;
390 hotspot_y += plane->fb->hot_y;
391 }
392
387 du->cursor_surface = vps->surf; 393 du->cursor_surface = vps->surf;
388 du->cursor_dmabuf = vps->dmabuf; 394 du->cursor_dmabuf = vps->dmabuf;
389 395
@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
411 vmw_cursor_update_position(dev_priv, true, 417 vmw_cursor_update_position(dev_priv, true,
412 du->cursor_x + hotspot_x, 418 du->cursor_x + hotspot_x,
413 du->cursor_y + hotspot_y); 419 du->cursor_y + hotspot_y);
420
421 du->core_hotspot_x = hotspot_x - du->hotspot_x;
422 du->core_hotspot_y = hotspot_y - du->hotspot_y;
414 } else { 423 } else {
415 DRM_ERROR("Failed to update cursor image\n"); 424 DRM_ERROR("Failed to update cursor image\n");
416 } 425 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 941bcfd131ff..b17f08fc50d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
320 320
321 if (dev_priv->has_dx) { 321 if (dev_priv->has_dx) {
322 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); 322 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
323 if (*otables == NULL) 323 if (!(*otables))
324 return -ENOMEM; 324 return -ENOMEM;
325 325
326 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); 326 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
327 } else { 327 } else {
328 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), 328 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
329 GFP_KERNEL); 329 GFP_KERNEL);
330 if (*otables == NULL) 330 if (!(*otables))
331 return -ENOMEM; 331 return -ENOMEM;
332 332
333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); 333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
@@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
407{ 407{
408 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); 408 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
409 409
410 if (unlikely(mob == NULL)) 410 if (unlikely(!mob))
411 return NULL; 411 return NULL;
412 412
413 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 413 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6063c9636d4a..97000996b8dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
244 244
245 reply_len = ebx; 245 reply_len = ebx;
246 reply = kzalloc(reply_len + 1, GFP_KERNEL); 246 reply = kzalloc(reply_len + 1, GFP_KERNEL);
247 if (reply == NULL) { 247 if (!reply) {
248 DRM_ERROR("Cannot allocate memory for reply\n"); 248 DRM_ERROR("Cannot allocate memory for reply\n");
249 return -ENOMEM; 249 return -ENOMEM;
250 } 250 }
@@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
340 340
341 msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; 341 msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
342 msg = kzalloc(msg_len, GFP_KERNEL); 342 msg = kzalloc(msg_len, GFP_KERNEL);
343 if (msg == NULL) { 343 if (!msg) {
344 DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); 344 DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
345 return -ENOMEM; 345 return -ENOMEM;
346 } 346 }
@@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
400 400
401 msg_len = strlen(log) + strlen("log ") + 1; 401 msg_len = strlen(log) + strlen("log ") + 1;
402 msg = kzalloc(msg_len, GFP_KERNEL); 402 msg = kzalloc(msg_len, GFP_KERNEL);
403 if (msg == NULL) { 403 if (!msg) {
404 DRM_ERROR("Cannot allocate memory for log message\n"); 404 DRM_ERROR("Cannot allocate memory for log message\n");
405 return -ENOMEM; 405 return -ENOMEM;
406 } 406 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7d591f653dfa..a96f90f017d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
446 int ret; 446 int ret;
447 447
448 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 448 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
449 if (unlikely(user_bo == NULL)) { 449 if (unlikely(!user_bo)) {
450 DRM_ERROR("Failed to allocate a buffer.\n"); 450 DRM_ERROR("Failed to allocate a buffer.\n");
451 return -ENOMEM; 451 return -ENOMEM;
452 } 452 }
@@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
836 } 836 }
837 837
838 backup = kzalloc(sizeof(*backup), GFP_KERNEL); 838 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
839 if (unlikely(backup == NULL)) 839 if (unlikely(!backup))
840 return -ENOMEM; 840 return -ENOMEM;
841 841
842 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, 842 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 68f135c5b0d8..9b832f136813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
751 } 751 }
752 752
753 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 753 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
754 if (unlikely(ushader == NULL)) { 754 if (unlikely(!ushader)) {
755 ttm_mem_global_free(vmw_mem_glob(dev_priv), 755 ttm_mem_global_free(vmw_mem_glob(dev_priv),
756 vmw_user_shader_size); 756 vmw_user_shader_size);
757 ret = -ENOMEM; 757 ret = -ENOMEM;
@@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
821 } 821 }
822 822
823 shader = kzalloc(sizeof(*shader), GFP_KERNEL); 823 shader = kzalloc(sizeof(*shader), GFP_KERNEL);
824 if (unlikely(shader == NULL)) { 824 if (unlikely(!shader)) {
825 ttm_mem_global_free(vmw_mem_glob(dev_priv), 825 ttm_mem_global_free(vmw_mem_glob(dev_priv),
826 vmw_shader_size); 826 vmw_shader_size);
827 ret = -ENOMEM; 827 ret = -ENOMEM;
@@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
981 981
982 /* Allocate and pin a DMA buffer */ 982 /* Allocate and pin a DMA buffer */
983 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 983 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
984 if (unlikely(buf == NULL)) 984 if (unlikely(!buf))
985 return -ENOMEM; 985 return -ENOMEM;
986 986
987 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, 987 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 50be1f034f9e..5284e8d2f7ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
1640 * something arbitrarily large and we will reject any layout 1640 * something arbitrarily large and we will reject any layout
1641 * that doesn't fit prim_bb_mem later 1641 * that doesn't fit prim_bb_mem later
1642 */ 1642 */
1643 dev->mode_config.max_width = 16384; 1643 dev->mode_config.max_width = 8192;
1644 dev->mode_config.max_height = 16384; 1644 dev->mode_config.max_height = 8192;
1645 } 1645 }
1646 1646
1647 vmw_kms_create_implicit_placement_property(dev_priv, false); 1647 vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2c58a390123a..778272514164 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev)
186 return -ENOMEM; 186 return -ENOMEM;
187 187
188 err = iommu_attach_device(host->domain, &pdev->dev); 188 err = iommu_attach_device(host->domain, &pdev->dev);
189 if (err) 189 if (err == -ENODEV) {
190 iommu_domain_free(host->domain);
191 host->domain = NULL;
192 goto skip_iommu;
193 } else if (err) {
190 goto fail_free_domain; 194 goto fail_free_domain;
195 }
191 196
192 geometry = &host->domain->geometry; 197 geometry = &host->domain->geometry;
193 198
@@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev)
198 host->iova_end = geometry->aperture_end; 203 host->iova_end = geometry->aperture_end;
199 } 204 }
200 205
206skip_iommu:
201 err = host1x_channel_list_init(&host->channel_list, 207 err = host1x_channel_list_init(&host->channel_list,
202 host->info->nb_channels); 208 host->info->nb_channels);
203 if (err) { 209 if (err) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6fd01a692197..9017dcc14502 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2216#if IS_ENABLED(CONFIG_HID_ORTEK) 2216#if IS_ENABLED(CONFIG_HID_ORTEK)
2217 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 2217 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
2218 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 2218 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
2219 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
2219 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 2220 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
2220#endif 2221#endif
2221#if IS_ENABLED(CONFIG_HID_PANTHERLORD) 2222#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3d911bfd91cf..c9ba4c6db74c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -824,6 +824,7 @@
824#define USB_VENDOR_ID_ORTEK 0x05a4 824#define USB_VENDOR_ID_ORTEK 0x05a4
825#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 825#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
826#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 826#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
827#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
827 828
828#define USB_VENDOR_ID_PLANTRONICS 0x047f 829#define USB_VENDOR_ID_PLANTRONICS 0x047f
829 830
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 6620f15fec22..8783a064cdcf 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Ortek PKB-1700 6 * Ortek PKB-1700
7 * Ortek WKB-2000 7 * Ortek WKB-2000
8 * iHome IMAC-A210S
8 * Skycable wireless presenter 9 * Skycable wireless presenter
9 * 10 *
10 * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> 11 * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
28 unsigned int *rsize) 29 unsigned int *rsize)
29{ 30{
30 if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { 31 if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
31 hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n"); 32 hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
32 rdesc[55] = 0x92; 33 rdesc[55] = 0x92;
33 } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { 34 } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
34 hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); 35 hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
35 rdesc[53] = 0x65; 36 rdesc[53] = 0x65;
36 } 37 }
37 return rdesc; 38 return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
40static const struct hid_device_id ortek_devices[] = { 41static const struct hid_device_id ortek_devices[] = {
41 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 42 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
42 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 43 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
44 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
43 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 45 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
44 { } 46 { }
45}; 47};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76013eb5cb7f..c008847e0b20 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
680 struct usbhid_device *usbhid = hid->driver_data; 680 struct usbhid_device *usbhid = hid->driver_data;
681 int res; 681 int res;
682 682
683 set_bit(HID_OPENED, &usbhid->iofl);
684
683 if (hid->quirks & HID_QUIRK_ALWAYS_POLL) 685 if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
684 return 0; 686 return 0;
685 687
686 res = usb_autopm_get_interface(usbhid->intf); 688 res = usb_autopm_get_interface(usbhid->intf);
687 /* the device must be awake to reliably request remote wakeup */ 689 /* the device must be awake to reliably request remote wakeup */
688 if (res < 0) 690 if (res < 0) {
691 clear_bit(HID_OPENED, &usbhid->iofl);
689 return -EIO; 692 return -EIO;
693 }
690 694
691 usbhid->intf->needs_remote_wakeup = 1; 695 usbhid->intf->needs_remote_wakeup = 1;
692 696
693 set_bit(HID_RESUME_RUNNING, &usbhid->iofl); 697 set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
694 set_bit(HID_OPENED, &usbhid->iofl);
695 set_bit(HID_IN_POLLING, &usbhid->iofl); 698 set_bit(HID_IN_POLLING, &usbhid->iofl);
696 699
697 res = hid_start_in(hid); 700 res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
727{ 730{
728 struct usbhid_device *usbhid = hid->driver_data; 731 struct usbhid_device *usbhid = hid->driver_data;
729 732
730 if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
731 return;
732
733 /* 733 /*
734 * Make sure we don't restart data acquisition due to 734 * Make sure we don't restart data acquisition due to
735 * a resumption we no longer care about by avoiding racing 735 * a resumption we no longer care about by avoiding racing
736 * with hid_start_in(). 736 * with hid_start_in().
737 */ 737 */
738 spin_lock_irq(&usbhid->lock); 738 spin_lock_irq(&usbhid->lock);
739 clear_bit(HID_IN_POLLING, &usbhid->iofl);
740 clear_bit(HID_OPENED, &usbhid->iofl); 739 clear_bit(HID_OPENED, &usbhid->iofl);
740 if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
741 clear_bit(HID_IN_POLLING, &usbhid->iofl);
741 spin_unlock_irq(&usbhid->lock); 742 spin_unlock_irq(&usbhid->lock);
742 743
744 if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
745 return;
746
743 hid_cancel_delayed_stuff(usbhid); 747 hid_cancel_delayed_stuff(usbhid);
744 usb_kill_urb(usbhid->urbin); 748 usb_kill_urb(usbhid->urbin);
745 usbhid->intf->needs_remote_wakeup = 0; 749 usbhid->intf->needs_remote_wakeup = 0;
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 5ecc154f6831..9bc32578a766 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -657,7 +657,7 @@ try:
657 * be directed to disk. 657 * be directed to disk.
658 */ 658 */
659int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 659int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
660 struct ppa_addr ppa, int bio_iter) 660 struct ppa_addr ppa, int bio_iter, bool advanced_bio)
661{ 661{
662 struct pblk *pblk = container_of(rb, struct pblk, rwb); 662 struct pblk *pblk = container_of(rb, struct pblk, rwb);
663 struct pblk_rb_entry *entry; 663 struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
694 * filled with data from the cache). If part of the data resides on the 694 * filled with data from the cache). If part of the data resides on the
695 * media, we will read later on 695 * media, we will read later on
696 */ 696 */
697 if (unlikely(!bio->bi_iter.bi_idx)) 697 if (unlikely(!advanced_bio))
698 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); 698 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
699 699
700 data = bio_data(bio); 700 data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4e5c48f3de62..d682e89e6493 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,7 +26,7 @@
26 */ 26 */
27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, 27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa, 28 sector_t lba, struct ppa_addr ppa,
29 int bio_iter) 29 int bio_iter, bool advanced_bio)
30{ 30{
31#ifdef CONFIG_NVM_DEBUG 31#ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */ 32 /* Callers must ensure that the ppa points to a cache address */
@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
34 BUG_ON(!pblk_addr_in_cache(ppa)); 34 BUG_ON(!pblk_addr_in_cache(ppa));
35#endif 35#endif
36 36
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter); 37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 bio_iter, advanced_bio);
38} 39}
39 40
40static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, 41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
44 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; 45 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
45 sector_t blba = pblk_get_lba(bio); 46 sector_t blba = pblk_get_lba(bio);
46 int nr_secs = rqd->nr_ppas; 47 int nr_secs = rqd->nr_ppas;
47 int advanced_bio = 0; 48 bool advanced_bio = false;
48 int i, j = 0; 49 int i, j = 0;
49 50
50 /* logic error: lba out-of-bounds. Ignore read request */ 51 /* logic error: lba out-of-bounds. Ignore read request */
@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
62retry: 63retry:
63 if (pblk_ppa_empty(p)) { 64 if (pblk_ppa_empty(p)) {
64 WARN_ON(test_and_set_bit(i, read_bitmap)); 65 WARN_ON(test_and_set_bit(i, read_bitmap));
65 continue; 66
67 if (unlikely(!advanced_bio)) {
68 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
69 advanced_bio = true;
70 }
71
72 goto next;
66 } 73 }
67 74
68 /* Try to read from write buffer. The address is later checked 75 /* Try to read from write buffer. The address is later checked
69 * on the write buffer to prevent retrieving overwritten data. 76 * on the write buffer to prevent retrieving overwritten data.
70 */ 77 */
71 if (pblk_addr_in_cache(p)) { 78 if (pblk_addr_in_cache(p)) {
72 if (!pblk_read_from_cache(pblk, bio, lba, p, i)) { 79 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
80 advanced_bio)) {
73 pblk_lookup_l2p_seq(pblk, &p, lba, 1); 81 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
74 goto retry; 82 goto retry;
75 } 83 }
76 WARN_ON(test_and_set_bit(i, read_bitmap)); 84 WARN_ON(test_and_set_bit(i, read_bitmap));
77 advanced_bio = 1; 85 advanced_bio = true;
78#ifdef CONFIG_NVM_DEBUG 86#ifdef CONFIG_NVM_DEBUG
79 atomic_long_inc(&pblk->cache_reads); 87 atomic_long_inc(&pblk->cache_reads);
80#endif 88#endif
@@ -83,6 +91,7 @@ retry:
83 rqd->ppa_list[j++] = p; 91 rqd->ppa_list[j++] = p;
84 } 92 }
85 93
94next:
86 if (advanced_bio) 95 if (advanced_bio)
87 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); 96 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
88 } 97 }
@@ -282,7 +291,7 @@ retry:
282 * write buffer to prevent retrieving overwritten data. 291 * write buffer to prevent retrieving overwritten data.
283 */ 292 */
284 if (pblk_addr_in_cache(ppa)) { 293 if (pblk_addr_in_cache(ppa)) {
285 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) { 294 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
286 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); 295 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
287 goto retry; 296 goto retry;
288 } 297 }
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 0c5692cc2f60..67e623bd5c2d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
670 struct list_head *list, 670 struct list_head *list,
671 unsigned int max); 671 unsigned int max);
672int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 672int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
673 struct ppa_addr ppa, int bio_iter); 673 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
674unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); 674unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
675 675
676unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); 676unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..44f4a8ac95bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1258 */ 1258 */
1259int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) 1259int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1260{ 1260{
1261 blk_status_t a; 1261 int a, f;
1262 int f;
1263 unsigned long buffers_processed = 0; 1262 unsigned long buffers_processed = 0;
1264 struct dm_buffer *b, *tmp; 1263 struct dm_buffer *b, *tmp;
1265 1264
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..3acce09bba35 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1587,16 +1587,18 @@ retry:
1587 if (likely(ic->mode == 'J')) { 1587 if (likely(ic->mode == 'J')) {
1588 if (dio->write) { 1588 if (dio->write) {
1589 unsigned next_entry, i, pos; 1589 unsigned next_entry, i, pos;
1590 unsigned ws, we; 1590 unsigned ws, we, range_sectors;
1591 1591
1592 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); 1592 dio->range.n_sectors = min(dio->range.n_sectors,
1593 ic->free_sectors << ic->sb->log2_sectors_per_block);
1593 if (unlikely(!dio->range.n_sectors)) 1594 if (unlikely(!dio->range.n_sectors))
1594 goto sleep; 1595 goto sleep;
1595 ic->free_sectors -= dio->range.n_sectors; 1596 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1597 ic->free_sectors -= range_sectors;
1596 journal_section = ic->free_section; 1598 journal_section = ic->free_section;
1597 journal_entry = ic->free_section_entry; 1599 journal_entry = ic->free_section_entry;
1598 1600
1599 next_entry = ic->free_section_entry + dio->range.n_sectors; 1601 next_entry = ic->free_section_entry + range_sectors;
1600 ic->free_section_entry = next_entry % ic->journal_section_entries; 1602 ic->free_section_entry = next_entry % ic->journal_section_entries;
1601 ic->free_section += next_entry / ic->journal_section_entries; 1603 ic->free_section += next_entry / ic->journal_section_entries;
1602 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 1604 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
1727 wraparound_section(ic, &ic->free_section); 1729 wraparound_section(ic, &ic->free_section);
1728 ic->n_uncommitted_sections++; 1730 ic->n_uncommitted_sections++;
1729 } 1731 }
1732 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1733 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
1730} 1734}
1731 1735
1732static void integrity_commit(struct work_struct *w) 1736static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1821{ 1825{
1822 unsigned i, j, n; 1826 unsigned i, j, n;
1823 struct journal_completion comp; 1827 struct journal_completion comp;
1828 struct blk_plug plug;
1829
1830 blk_start_plug(&plug);
1824 1831
1825 comp.ic = ic; 1832 comp.ic = ic;
1826 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1833 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1952,8 @@ skip_io:
1945 1952
1946 dm_bufio_write_dirty_buffers_async(ic->bufio); 1953 dm_bufio_write_dirty_buffers_async(ic->bufio);
1947 1954
1955 blk_finish_plug(&plug);
1956
1948 complete_journal_op(&comp); 1957 complete_journal_op(&comp);
1949 wait_for_completion_io(&comp.comp); 1958 wait_for_completion_io(&comp.comp);
1950 1959
@@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3019 ti->error = "Block size doesn't match the information in superblock"; 3028 ti->error = "Block size doesn't match the information in superblock";
3020 goto bad; 3029 goto bad;
3021 } 3030 }
3031 if (!le32_to_cpu(ic->sb->journal_sections)) {
3032 r = -EINVAL;
3033 ti->error = "Corrupted superblock, journal_sections is 0";
3034 goto bad;
3035 }
3022 /* make sure that ti->max_io_len doesn't overflow */ 3036 /* make sure that ti->max_io_len doesn't overflow */
3023 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 3037 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3024 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 3038 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
208#define RT_FLAG_RS_BITMAP_LOADED 2 208#define RT_FLAG_RS_BITMAP_LOADED 2
209#define RT_FLAG_UPDATE_SBS 3 209#define RT_FLAG_UPDATE_SBS 3
210#define RT_FLAG_RESHAPE_RS 4 210#define RT_FLAG_RESHAPE_RS 4
211#define RT_FLAG_RS_SUSPENDED 5
211 212
212/* Array elements of 64 bit needed for rebuild/failed disk bits */ 213/* Array elements of 64 bit needed for rebuild/failed disk bits */
213#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) 214#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
564 if (__raid10_near_copies(layout) > 1) 565 if (__raid10_near_copies(layout) > 1)
565 return "near"; 566 return "near";
566 567
567 WARN_ON(__raid10_far_copies(layout) < 2); 568 if (__raid10_far_copies(layout) > 1)
569 return "far";
568 570
569 return "far"; 571 return "unknown";
570} 572}
571 573
572/* Return md raid10 algorithm for @name */ 574/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2540 if (!freshest) 2542 if (!freshest)
2541 return 0; 2543 return 0;
2542 2544
2543 if (validate_raid_redundancy(rs)) {
2544 rs->ti->error = "Insufficient redundancy to activate array";
2545 return -EINVAL;
2546 }
2547
2548 /* 2545 /*
2549 * Validation of the freshest device provides the source of 2546 * Validation of the freshest device provides the source of
2550 * validation for the remaining devices. 2547 * validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2553 if (super_validate(rs, freshest)) 2550 if (super_validate(rs, freshest))
2554 return -EINVAL; 2551 return -EINVAL;
2555 2552
2553 if (validate_raid_redundancy(rs)) {
2554 rs->ti->error = "Insufficient redundancy to activate array";
2555 return -EINVAL;
2556 }
2557
2556 rdev_for_each(rdev, mddev) 2558 rdev_for_each(rdev, mddev)
2557 if (!test_bit(Journal, &rdev->flags) && 2559 if (!test_bit(Journal, &rdev->flags) &&
2558 rdev != freshest && 2560 rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3168 } 3170 }
3169 3171
3170 mddev_suspend(&rs->md); 3172 mddev_suspend(&rs->md);
3173 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3171 3174
3172 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ 3175 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3173 if (rs_is_raid456(rs)) { 3176 if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
3625{ 3628{
3626 struct raid_set *rs = ti->private; 3629 struct raid_set *rs = ti->private;
3627 3630
3628 if (!rs->md.suspended) 3631 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3629 mddev_suspend(&rs->md); 3632 mddev_suspend(&rs->md);
3630 3633
3631 rs->md.ro = 1; 3634 rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
3759 return r; 3762 return r;
3760 3763
3761 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ 3764 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3762 if (mddev->suspended) 3765 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3763 mddev_resume(mddev); 3766 mddev_resume(mddev);
3764 3767
3765 /* 3768 /*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
3786 } 3789 }
3787 3790
3788 /* Suspend because a resume will happen in raid_resume() */ 3791 /* Suspend because a resume will happen in raid_resume() */
3789 if (!mddev->suspended) 3792 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3790 mddev_suspend(mddev); 3793 mddev_suspend(mddev);
3791 3794
3792 /* 3795 /*
3793 * Now reshape got set up, update superblocks to 3796 * Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
3883 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) 3886 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
3884 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3887 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3885 3888
3886 if (mddev->suspended) 3889 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3887 mddev_resume(mddev); 3890 mddev_resume(mddev);
3888} 3891}
3889 3892
3890static struct target_type raid_target = { 3893static struct target_type raid_target = {
3891 .name = "raid", 3894 .name = "raid",
3892 .version = {1, 11, 1}, 3895 .version = {1, 12, 1},
3893 .module = THIS_MODULE, 3896 .module = THIS_MODULE,
3894 .ctr = raid_ctr, 3897 .ctr = raid_ctr,
3895 .dtr = raid_dtr, 3898 .dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <linux/blk-mq.h> 21#include <linux/blk-mq.h>
22#include <linux/mount.h> 22#include <linux/mount.h>
23#include <linux/dax.h>
23 24
24#define DM_MSG_PREFIX "table" 25#define DM_MSG_PREFIX "table"
25 26
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1630 return false; 1631 return false;
1631} 1632}
1632 1633
1634static int device_dax_write_cache_enabled(struct dm_target *ti,
1635 struct dm_dev *dev, sector_t start,
1636 sector_t len, void *data)
1637{
1638 struct dax_device *dax_dev = dev->dax_dev;
1639
1640 if (!dax_dev)
1641 return false;
1642
1643 if (dax_write_cache_enabled(dax_dev))
1644 return true;
1645 return false;
1646}
1647
1648static int dm_table_supports_dax_write_cache(struct dm_table *t)
1649{
1650 struct dm_target *ti;
1651 unsigned i;
1652
1653 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1654 ti = dm_table_get_target(t, i);
1655
1656 if (ti->type->iterate_devices &&
1657 ti->type->iterate_devices(ti,
1658 device_dax_write_cache_enabled, NULL))
1659 return true;
1660 }
1661
1662 return false;
1663}
1664
1633static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, 1665static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1634 sector_t start, sector_t len, void *data) 1666 sector_t start, sector_t len, void *data)
1635{ 1667{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1785 } 1817 }
1786 blk_queue_write_cache(q, wc, fua); 1818 blk_queue_write_cache(q, wc, fua);
1787 1819
1820 if (dm_table_supports_dax_write_cache(t))
1821 dax_write_cache(t->md->dax_dev, true);
1822
1788 /* Ensure that all underlying devices are non-rotational. */ 1823 /* Ensure that all underlying devices are non-rotational. */
1789 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1824 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1790 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1825 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
308{ 308{
309 unsigned n; 309 unsigned n;
310 310
311 if (!fio->rs) { 311 if (!fio->rs)
312 fio->rs = mempool_alloc(v->fec->rs_pool, 0); 312 fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
313 if (unlikely(!fio->rs)) {
314 DMERR("failed to allocate RS");
315 return -ENOMEM;
316 }
317 }
318 313
319 fec_for_each_prealloc_buffer(n) { 314 fec_for_each_prealloc_buffer(n) {
320 if (fio->bufs[n]) 315 if (fio->bufs[n])
321 continue; 316 continue;
322 317
323 fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); 318 fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
324 if (unlikely(!fio->bufs[n])) { 319 if (unlikely(!fio->bufs[n])) {
325 DMERR("failed to allocate FEC buffer"); 320 DMERR("failed to allocate FEC buffer");
326 return -ENOMEM; 321 return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
332 if (fio->bufs[n]) 327 if (fio->bufs[n])
333 continue; 328 continue;
334 329
335 fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); 330 fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
336 /* we can manage with even one buffer if necessary */ 331 /* we can manage with even one buffer if necessary */
337 if (unlikely(!fio->bufs[n])) 332 if (unlikely(!fio->bufs[n]))
338 break; 333 break;
339 } 334 }
340 fio->nbufs = n; 335 fio->nbufs = n;
341 336
342 if (!fio->output) { 337 if (!fio->output)
343 fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); 338 fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
344 339
345 if (!fio->output) {
346 DMERR("failed to allocate FEC page");
347 return -ENOMEM;
348 }
349 }
350
351 return 0; 340 return 0;
352} 341}
353 342
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..a4fa2ada6883 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
624 624
625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); 625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
626 if (ret == 0) 626 if (ret == 0)
627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
628 628
629 return ret; 629 return ret;
630} 630}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
658 658
659 /* Flush drive cache (this will also sync data) */ 659 /* Flush drive cache (this will also sync data) */
660 if (ret == 0) 660 if (ret == 0)
661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
662 662
663 return ret; 663 return ret;
664} 664}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
722 722
723 /* If there are no dirty metadata blocks, just flush the device cache */ 723 /* If there are no dirty metadata blocks, just flush the device cache */
724 if (list_empty(&write_list)) { 724 if (list_empty(&write_list)) {
725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
726 goto out; 726 goto out;
727 } 727 }
728 728
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); 927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
928 } 928 }
929 929
930 page = alloc_page(GFP_KERNEL); 930 page = alloc_page(GFP_NOIO);
931 if (!page) 931 if (!page)
932 return -ENOMEM; 932 return -ENOMEM;
933 933
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1183 1183
1184 /* Get zone information from disk */ 1184 /* Get zone information from disk */
1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
1186 &blkz, &nr_blkz, GFP_KERNEL); 1186 &blkz, &nr_blkz, GFP_NOIO);
1187 if (ret) { 1187 if (ret) {
1188 dmz_dev_err(zmd->dev, "Get zone %u report failed", 1188 dmz_dev_err(zmd->dev, "Get zone %u report failed",
1189 dmz_id(zmd, zone)); 1189 dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1257 1257
1258 ret = blkdev_reset_zones(dev->bdev, 1258 ret = blkdev_reset_zones(dev->bdev,
1259 dmz_start_sect(zmd, zone), 1259 dmz_start_sect(zmd, zone),
1260 dev->zone_nr_sectors, GFP_KERNEL); 1260 dev->zone_nr_sectors, GFP_NOIO);
1261 if (ret) { 1261 if (ret) {
1262 dmz_dev_err(dev, "Reset zone %u failed %d", 1262 dmz_dev_err(dev, "Reset zone %u failed %d",
1263 dmz_id(zmd, zone), ret); 1263 dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
75 nr_blocks = block - wp_block; 75 nr_blocks = block - wp_block;
76 ret = blkdev_issue_zeroout(zrc->dev->bdev, 76 ret = blkdev_issue_zeroout(zrc->dev->bdev,
77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), 77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
78 dmz_blk2sect(nr_blocks), GFP_NOFS, false); 78 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
79 if (ret) { 79 if (ret) {
80 dmz_dev_err(zrc->dev, 80 dmz_dev_err(zrc->dev,
81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", 81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b08bbbd4d902 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
541 int ret; 541 int ret;
542 542
543 /* Create a new chunk work */ 543 /* Create a new chunk work */
544 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS); 544 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
545 if (!cw) 545 if (!cw)
546 goto out; 546 goto out;
547 547
@@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
588 588
589 bio->bi_bdev = dev->bdev; 589 bio->bi_bdev = dev->bdev;
590 590
591 if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE)) 591 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
592 return DM_MAPIO_REMAPPED; 592 return DM_MAPIO_REMAPPED;
593 593
594 /* The BIO should be block aligned */ 594 /* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
603 bioctx->status = BLK_STS_OK; 603 bioctx->status = BLK_STS_OK;
604 604
605 /* Set the BIO pending in the flush list */ 605 /* Set the BIO pending in the flush list */
606 if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) { 606 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
607 spin_lock(&dmz->flush_lock); 607 spin_lock(&dmz->flush_lock);
608 bio_list_add(&dmz->flush_list, bio); 608 bio_list_add(&dmz->flush_list, bio);
609 spin_unlock(&dmz->flush_lock); 609 spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
785 785
786 /* Chunk BIO work */ 786 /* Chunk BIO work */
787 mutex_init(&dmz->chunk_lock); 787 mutex_init(&dmz->chunk_lock);
788 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS); 788 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
789 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 789 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
790 0, dev->name); 790 0, dev->name);
791 if (!dmz->chunk_wq) { 791 if (!dmz->chunk_wq) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..c99634612fc4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
2287 2287
2288static bool set_in_sync(struct mddev *mddev) 2288static bool set_in_sync(struct mddev *mddev)
2289{ 2289{
2290 WARN_ON_ONCE(!spin_is_locked(&mddev->lock)); 2290 WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
2291 if (!mddev->in_sync) { 2291 if (!mddev->in_sync) {
2292 mddev->sync_checkers++; 2292 mddev->sync_checkers++;
2293 spin_unlock(&mddev->lock); 2293 spin_unlock(&mddev->lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b50eb4ac1b82..09db03455801 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
731 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 731 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
732 mddev->queue->limits.max_write_zeroes_sectors = 0; 732 mddev->queue->limits.max_write_zeroes_sectors = 0;
733} 733}
734
735/* Maximum size of each resync request */
736#define RESYNC_BLOCK_SIZE (64*1024)
737#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
738
739/* for managing resync I/O pages */
740struct resync_pages {
741 unsigned idx; /* for get/put page from the pool */
742 void *raid_bio;
743 struct page *pages[RESYNC_PAGES];
744};
745
746static inline int resync_alloc_pages(struct resync_pages *rp,
747 gfp_t gfp_flags)
748{
749 int i;
750
751 for (i = 0; i < RESYNC_PAGES; i++) {
752 rp->pages[i] = alloc_page(gfp_flags);
753 if (!rp->pages[i])
754 goto out_free;
755 }
756
757 return 0;
758
759out_free:
760 while (--i >= 0)
761 put_page(rp->pages[i]);
762 return -ENOMEM;
763}
764
765static inline void resync_free_pages(struct resync_pages *rp)
766{
767 int i;
768
769 for (i = 0; i < RESYNC_PAGES; i++)
770 put_page(rp->pages[i]);
771}
772
773static inline void resync_get_all_pages(struct resync_pages *rp)
774{
775 int i;
776
777 for (i = 0; i < RESYNC_PAGES; i++)
778 get_page(rp->pages[i]);
779}
780
781static inline struct page *resync_fetch_page(struct resync_pages *rp,
782 unsigned idx)
783{
784 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
785 return NULL;
786 return rp->pages[idx];
787}
788#endif /* _MD_MD_H */ 734#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
1/* Maximum size of each resync request */
2#define RESYNC_BLOCK_SIZE (64*1024)
3#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
4
5/* for managing resync I/O pages */
6struct resync_pages {
7 void *raid_bio;
8 struct page *pages[RESYNC_PAGES];
9};
10
11static inline int resync_alloc_pages(struct resync_pages *rp,
12 gfp_t gfp_flags)
13{
14 int i;
15
16 for (i = 0; i < RESYNC_PAGES; i++) {
17 rp->pages[i] = alloc_page(gfp_flags);
18 if (!rp->pages[i])
19 goto out_free;
20 }
21
22 return 0;
23
24out_free:
25 while (--i >= 0)
26 put_page(rp->pages[i]);
27 return -ENOMEM;
28}
29
30static inline void resync_free_pages(struct resync_pages *rp)
31{
32 int i;
33
34 for (i = 0; i < RESYNC_PAGES; i++)
35 put_page(rp->pages[i]);
36}
37
38static inline void resync_get_all_pages(struct resync_pages *rp)
39{
40 int i;
41
42 for (i = 0; i < RESYNC_PAGES; i++)
43 get_page(rp->pages[i]);
44}
45
46static inline struct page *resync_fetch_page(struct resync_pages *rp,
47 unsigned idx)
48{
49 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
50 return NULL;
51 return rp->pages[idx];
52}
53
54/*
55 * 'strct resync_pages' stores actual pages used for doing the resync
56 * IO, and it is per-bio, so make .bi_private points to it.
57 */
58static inline struct resync_pages *get_resync_pages(struct bio *bio)
59{
60 return bio->bi_private;
61}
62
63/* generally called after bio_reset() for reseting bvec */
64static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
65 int size)
66{
67 int idx = 0;
68
69 /* initialize bvec table again */
70 do {
71 struct page *page = resync_fetch_page(rp, idx);
72 int len = min_t(int, size, PAGE_SIZE);
73
74 /*
75 * won't fail because the vec table is big
76 * enough to hold all these pages
77 */
78 bio_add_page(bio, page, len, 0);
79 size -= len;
80 } while (idx++ < RESYNC_PAGES && size > 0);
81}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f50958ded9f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
81#define raid1_log(md, fmt, args...) \ 81#define raid1_log(md, fmt, args...) \
82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
83 83
84/* 84#include "raid1-10.c"
85 * 'strct resync_pages' stores actual pages used for doing the resync
86 * IO, and it is per-bio, so make .bi_private points to it.
87 */
88static inline struct resync_pages *get_resync_pages(struct bio *bio)
89{
90 return bio->bi_private;
91}
92 85
93/* 86/*
94 * for resync bio, r1bio pointer can be retrieved from the per-bio 87 * for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
170 resync_get_all_pages(rp); 163 resync_get_all_pages(rp);
171 } 164 }
172 165
173 rp->idx = 0;
174 rp->raid_bio = r1_bio; 166 rp->raid_bio = r1_bio;
175 bio->bi_private = rp; 167 bio->bi_private = rp;
176 } 168 }
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
492 } 484 }
493 485
494 if (behind) { 486 if (behind) {
495 /* we release behind master bio when all write are done */
496 if (r1_bio->behind_master_bio == bio)
497 to_put = NULL;
498
499 if (test_bit(WriteMostly, &rdev->flags)) 487 if (test_bit(WriteMostly, &rdev->flags))
500 atomic_dec(&r1_bio->behind_remaining); 488 atomic_dec(&r1_bio->behind_remaining);
501 489
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
802 bio->bi_next = NULL; 790 bio->bi_next = NULL;
803 bio->bi_bdev = rdev->bdev; 791 bio->bi_bdev = rdev->bdev;
804 if (test_bit(Faulty, &rdev->flags)) { 792 if (test_bit(Faulty, &rdev->flags)) {
805 bio->bi_status = BLK_STS_IOERR; 793 bio_io_error(bio);
806 bio_endio(bio);
807 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
808 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 795 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
809 /* Just ignore it */ 796 /* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
1088 wake_up(&conf->wait_barrier); 1075 wake_up(&conf->wait_barrier);
1089} 1076}
1090 1077
1091static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, 1078static void alloc_behind_master_bio(struct r1bio *r1_bio,
1092 struct bio *bio) 1079 struct bio *bio)
1093{ 1080{
1094 int size = bio->bi_iter.bi_size; 1081 int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
1098 1085
1099 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1086 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1100 if (!behind_bio) 1087 if (!behind_bio)
1101 goto fail; 1088 return;
1102 1089
1103 /* discard op, we don't support writezero/writesame yet */ 1090 /* discard op, we don't support writezero/writesame yet */
1104 if (!bio_has_data(bio)) 1091 if (!bio_has_data(bio)) {
1092 behind_bio->bi_iter.bi_size = size;
1105 goto skip_copy; 1093 goto skip_copy;
1094 }
1106 1095
1107 while (i < vcnt && size) { 1096 while (i < vcnt && size) {
1108 struct page *page; 1097 struct page *page;
@@ -1123,14 +1112,13 @@ skip_copy:
1123 r1_bio->behind_master_bio = behind_bio;; 1112 r1_bio->behind_master_bio = behind_bio;;
1124 set_bit(R1BIO_BehindIO, &r1_bio->state); 1113 set_bit(R1BIO_BehindIO, &r1_bio->state);
1125 1114
1126 return behind_bio; 1115 return;
1127 1116
1128free_pages: 1117free_pages:
1129 pr_debug("%dB behind alloc failed, doing sync I/O\n", 1118 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1130 bio->bi_iter.bi_size); 1119 bio->bi_iter.bi_size);
1131 bio_free_pages(behind_bio); 1120 bio_free_pages(behind_bio);
1132fail: 1121 bio_put(behind_bio);
1133 return behind_bio;
1134} 1122}
1135 1123
1136struct raid1_plug_cb { 1124struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1483 (atomic_read(&bitmap->behind_writes) 1471 (atomic_read(&bitmap->behind_writes)
1484 < mddev->bitmap_info.max_write_behind) && 1472 < mddev->bitmap_info.max_write_behind) &&
1485 !waitqueue_active(&bitmap->behind_wait)) { 1473 !waitqueue_active(&bitmap->behind_wait)) {
1486 mbio = alloc_behind_master_bio(r1_bio, bio); 1474 alloc_behind_master_bio(r1_bio, bio);
1487 } 1475 }
1488 1476
1489 bitmap_startwrite(bitmap, r1_bio->sector, 1477 bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1493 first_clone = 0; 1481 first_clone = 0;
1494 } 1482 }
1495 1483
1496 if (!mbio) { 1484 if (r1_bio->behind_master_bio)
1497 if (r1_bio->behind_master_bio) 1485 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1498 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1486 GFP_NOIO, mddev->bio_set);
1499 GFP_NOIO, 1487 else
1500 mddev->bio_set); 1488 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1501 else
1502 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1503 }
1504 1489
1505 if (r1_bio->behind_master_bio) { 1490 if (r1_bio->behind_master_bio) {
1506 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1491 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
2086 /* Fix variable parts of all bios */ 2071 /* Fix variable parts of all bios */
2087 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 2072 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2088 for (i = 0; i < conf->raid_disks * 2; i++) { 2073 for (i = 0; i < conf->raid_disks * 2; i++) {
2089 int j;
2090 int size;
2091 blk_status_t status; 2074 blk_status_t status;
2092 struct bio_vec *bi;
2093 struct bio *b = r1_bio->bios[i]; 2075 struct bio *b = r1_bio->bios[i];
2094 struct resync_pages *rp = get_resync_pages(b); 2076 struct resync_pages *rp = get_resync_pages(b);
2095 if (b->bi_end_io != end_sync_read) 2077 if (b->bi_end_io != end_sync_read)
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
2098 status = b->bi_status; 2080 status = b->bi_status;
2099 bio_reset(b); 2081 bio_reset(b);
2100 b->bi_status = status; 2082 b->bi_status = status;
2101 b->bi_vcnt = vcnt;
2102 b->bi_iter.bi_size = r1_bio->sectors << 9;
2103 b->bi_iter.bi_sector = r1_bio->sector + 2083 b->bi_iter.bi_sector = r1_bio->sector +
2104 conf->mirrors[i].rdev->data_offset; 2084 conf->mirrors[i].rdev->data_offset;
2105 b->bi_bdev = conf->mirrors[i].rdev->bdev; 2085 b->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
2107 rp->raid_bio = r1_bio; 2087 rp->raid_bio = r1_bio;
2108 b->bi_private = rp; 2088 b->bi_private = rp;
2109 2089
2110 size = b->bi_iter.bi_size; 2090 /* initialize bvec table again */
2111 bio_for_each_segment_all(bi, b, j) { 2091 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2112 bi->bv_offset = 0;
2113 if (size > PAGE_SIZE)
2114 bi->bv_len = PAGE_SIZE;
2115 else
2116 bi->bv_len = size;
2117 size -= PAGE_SIZE;
2118 }
2119 } 2092 }
2120 for (primary = 0; primary < conf->raid_disks * 2; primary++) 2093 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2121 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 2094 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2366 wbio = bio_clone_fast(r1_bio->behind_master_bio, 2339 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2367 GFP_NOIO, 2340 GFP_NOIO,
2368 mddev->bio_set); 2341 mddev->bio_set);
2369 /* We really need a _all clone */
2370 wbio->bi_iter = (struct bvec_iter){ 0 };
2371 } else { 2342 } else {
2372 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2343 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2373 mddev->bio_set); 2344 mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2619 int good_sectors = RESYNC_SECTORS; 2590 int good_sectors = RESYNC_SECTORS;
2620 int min_bad = 0; /* number of sectors that are bad in all devices */ 2591 int min_bad = 0; /* number of sectors that are bad in all devices */
2621 int idx = sector_to_idx(sector_nr); 2592 int idx = sector_to_idx(sector_nr);
2593 int page_idx = 0;
2622 2594
2623 if (!conf->r1buf_pool) 2595 if (!conf->r1buf_pool)
2624 if (init_resync(conf)) 2596 if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2846 bio = r1_bio->bios[i]; 2818 bio = r1_bio->bios[i];
2847 rp = get_resync_pages(bio); 2819 rp = get_resync_pages(bio);
2848 if (bio->bi_end_io) { 2820 if (bio->bi_end_io) {
2849 page = resync_fetch_page(rp, rp->idx++); 2821 page = resync_fetch_page(rp, page_idx);
2850 2822
2851 /* 2823 /*
2852 * won't fail because the vec table is big 2824 * won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2858 nr_sectors += len>>9; 2830 nr_sectors += len>>9;
2859 sector_nr += len>>9; 2831 sector_nr += len>>9;
2860 sync_blocks -= (len>>9); 2832 sync_blocks -= (len>>9);
2861 } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); 2833 } while (++page_idx < RESYNC_PAGES);
2862 2834
2863 r1_bio->sectors = nr_sectors; 2835 r1_bio->sectors = nr_sectors;
2864 2836
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..f55d4cc085f6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
110#define raid10_log(md, fmt, args...) \ 110#define raid10_log(md, fmt, args...) \
111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) 111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
112 112
113/* 113#include "raid1-10.c"
114 * 'strct resync_pages' stores actual pages used for doing the resync
115 * IO, and it is per-bio, so make .bi_private points to it.
116 */
117static inline struct resync_pages *get_resync_pages(struct bio *bio)
118{
119 return bio->bi_private;
120}
121 114
122/* 115/*
123 * for resync bio, r10bio pointer can be retrieved from the per-bio 116 * for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
221 resync_get_all_pages(rp); 214 resync_get_all_pages(rp);
222 } 215 }
223 216
224 rp->idx = 0;
225 rp->raid_bio = r10_bio; 217 rp->raid_bio = r10_bio;
226 bio->bi_private = rp; 218 bio->bi_private = rp;
227 if (rbio) { 219 if (rbio) {
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf)
913 bio->bi_next = NULL; 905 bio->bi_next = NULL;
914 bio->bi_bdev = rdev->bdev; 906 bio->bi_bdev = rdev->bdev;
915 if (test_bit(Faulty, &rdev->flags)) { 907 if (test_bit(Faulty, &rdev->flags)) {
916 bio->bi_status = BLK_STS_IOERR; 908 bio_io_error(bio);
917 bio_endio(bio);
918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 909 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 910 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
920 /* Just ignore it */ 911 /* Just ignore it */
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1098 bio->bi_next = NULL; 1089 bio->bi_next = NULL;
1099 bio->bi_bdev = rdev->bdev; 1090 bio->bi_bdev = rdev->bdev;
1100 if (test_bit(Faulty, &rdev->flags)) { 1091 if (test_bit(Faulty, &rdev->flags)) {
1101 bio->bi_status = BLK_STS_IOERR; 1092 bio_io_error(bio);
1102 bio_endio(bio);
1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1093 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1094 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1105 /* Just ignore it */ 1095 /* Just ignore it */
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2087 rp = get_resync_pages(tbio); 2077 rp = get_resync_pages(tbio);
2088 bio_reset(tbio); 2078 bio_reset(tbio);
2089 2079
2090 tbio->bi_vcnt = vcnt; 2080 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2091 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 2081
2092 rp->raid_bio = r10_bio; 2082 rp->raid_bio = r10_bio;
2093 tbio->bi_private = rp; 2083 tbio->bi_private = rp;
2094 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2084 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2853 sector_t sectors_skipped = 0; 2843 sector_t sectors_skipped = 0;
2854 int chunks_skipped = 0; 2844 int chunks_skipped = 0;
2855 sector_t chunk_mask = conf->geo.chunk_mask; 2845 sector_t chunk_mask = conf->geo.chunk_mask;
2846 int page_idx = 0;
2856 2847
2857 if (!conf->r10buf_pool) 2848 if (!conf->r10buf_pool)
2858 if (init_resync(conf)) 2849 if (init_resync(conf))
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3355 break; 3346 break;
3356 for (bio= biolist ; bio ; bio=bio->bi_next) { 3347 for (bio= biolist ; bio ; bio=bio->bi_next) {
3357 struct resync_pages *rp = get_resync_pages(bio); 3348 struct resync_pages *rp = get_resync_pages(bio);
3358 page = resync_fetch_page(rp, rp->idx++); 3349 page = resync_fetch_page(rp, page_idx);
3359 /* 3350 /*
3360 * won't fail because the vec table is big enough 3351 * won't fail because the vec table is big enough
3361 * to hold all these pages 3352 * to hold all these pages
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3364 } 3355 }
3365 nr_sectors += len>>9; 3356 nr_sectors += len>>9;
3366 sector_nr += len>>9; 3357 sector_nr += len>>9;
3367 } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); 3358 } while (++page_idx < RESYNC_PAGES);
3368 r10_bio->sectors = nr_sectors; 3359 r10_bio->sectors = nr_sectors;
3369 3360
3370 while (biolist) { 3361 while (biolist) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index aeeb8d6854e2..0fc2748aaf95 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3381 sh->dev[i].sector + STRIPE_SECTORS) { 3381 sh->dev[i].sector + STRIPE_SECTORS) {
3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3383 3383
3384 bi->bi_status = BLK_STS_IOERR;
3385 md_write_end(conf->mddev); 3384 md_write_end(conf->mddev);
3386 bio_endio(bi); 3385 bio_io_error(bi);
3387 bi = nextbi; 3386 bi = nextbi;
3388 } 3387 }
3389 if (bitmap_end) 3388 if (bitmap_end)
@@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3403 sh->dev[i].sector + STRIPE_SECTORS) { 3402 sh->dev[i].sector + STRIPE_SECTORS) {
3404 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3403 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3405 3404
3406 bi->bi_status = BLK_STS_IOERR;
3407 md_write_end(conf->mddev); 3405 md_write_end(conf->mddev);
3408 bio_endio(bi); 3406 bio_io_error(bi);
3409 bi = bi2; 3407 bi = bi2;
3410 } 3408 }
3411 3409
@@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3429 struct bio *nextbi = 3427 struct bio *nextbi =
3430 r5_next_bio(bi, sh->dev[i].sector); 3428 r5_next_bio(bi, sh->dev[i].sector);
3431 3429
3432 bi->bi_status = BLK_STS_IOERR; 3430 bio_io_error(bi);
3433 bio_endio(bi);
3434 bi = nextbi; 3431 bi = nextbi;
3435 } 3432 }
3436 } 3433 }
@@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work)
6237 pr_debug("%d stripes handled\n", handled); 6234 pr_debug("%d stripes handled\n", handled);
6238 6235
6239 spin_unlock_irq(&conf->device_lock); 6236 spin_unlock_irq(&conf->device_lock);
6237
6238 async_tx_issue_pending_all();
6240 blk_finish_plug(&plug); 6239 blk_finish_plug(&plug);
6241 6240
6242 pr_debug("--- raid5worker inactive\n"); 6241 pr_debug("--- raid5worker inactive\n");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9dfb26972f2..250dc6ec4c82 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2957 } 2957 }
2958 2958
2959 /* find out number of slots supported */ 2959 /* find out number of slots supported */
2960 if (device_property_read_u32(dev, "num-slots", &pdata->num_slots)) 2960 if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
2961 dev_info(dev, "'num-slots' was deprecated.\n"); 2961 dev_info(dev, "'num-slots' was deprecated.\n");
2962 2962
2963 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 2963 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7c12f3715676..04ff3c97a535 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
356 struct mmc_host *mmc = host->mmc; 356 struct mmc_host *mmc = host->mmc;
357 int ret = 0; 357 int ret = 0;
358 358
359 if (mmc_pdata(host)->set_power)
360 return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
361
362 /* 359 /*
363 * If we don't see a Vcc regulator, assume it's a fixed 360 * If we don't see a Vcc regulator, assume it's a fixed
364 * voltage always-on regulator. 361 * voltage always-on regulator.
@@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
366 if (IS_ERR(mmc->supply.vmmc)) 363 if (IS_ERR(mmc->supply.vmmc))
367 return 0; 364 return 0;
368 365
369 if (mmc_pdata(host)->before_set_reg)
370 mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
371
372 ret = omap_hsmmc_set_pbias(host, false, 0); 366 ret = omap_hsmmc_set_pbias(host, false, 0);
373 if (ret) 367 if (ret)
374 return ret; 368 return ret;
@@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
400 return ret; 394 return ret;
401 } 395 }
402 396
403 if (mmc_pdata(host)->after_set_reg)
404 mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
405
406 return 0; 397 return 0;
407 398
408err_set_voltage: 399err_set_voltage:
@@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
469 int ret; 460 int ret;
470 struct mmc_host *mmc = host->mmc; 461 struct mmc_host *mmc = host->mmc;
471 462
472 if (mmc_pdata(host)->set_power)
473 return 0;
474 463
475 ret = mmc_regulator_get_supply(mmc); 464 ret = mmc_regulator_get_supply(mmc);
476 if (ret == -EPROBE_DEFER) 465 if (ret == -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d6fa2214aaae..0fb4e4c119e1 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
793 } 793 }
794 mmc_writel(host, REG_CLKCR, rval); 794 mmc_writel(host, REG_CLKCR, rval);
795 795
796 if (host->cfg->needs_new_timings) 796 if (host->cfg->needs_new_timings) {
797 mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE); 797 /* Don't touch the delay bits */
798 rval = mmc_readl(host, REG_SD_NTSR);
799 rval |= SDXC_2X_TIMING_MODE;
800 mmc_writel(host, REG_SD_NTSR, rval);
801 }
798 802
799 ret = sunxi_mmc_clk_set_phase(host, ios, rate); 803 ret = sunxi_mmc_clk_set_phase(host, ios, rate);
800 if (ret) 804 if (ret)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 99a26a9efec1..f41ab0ea942a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2743,9 +2743,9 @@ module_init(virtio_net_driver_init);
2743 2743
2744static __exit void virtio_net_driver_exit(void) 2744static __exit void virtio_net_driver_exit(void)
2745{ 2745{
2746 unregister_virtio_driver(&virtio_net_driver);
2746 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 2747 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2747 cpuhp_remove_multi_state(virtionet_online); 2748 cpuhp_remove_multi_state(virtionet_online);
2748 unregister_virtio_driver(&virtio_net_driver);
2749} 2749}
2750module_exit(virtio_net_driver_exit); 2750module_exit(virtio_net_driver_exit);
2751 2751
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3b77cfe5aa1e..c49f1f8b2e57 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1995,6 +1995,9 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
1995 int serial_len = sizeof(ctrl->serial); 1995 int serial_len = sizeof(ctrl->serial);
1996 int model_len = sizeof(ctrl->model); 1996 int model_len = sizeof(ctrl->model);
1997 1997
1998 if (!uuid_is_null(&ns->uuid))
1999 return sprintf(buf, "uuid.%pU\n", &ns->uuid);
2000
1998 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) 2001 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
1999 return sprintf(buf, "eui.%16phN\n", ns->nguid); 2002 return sprintf(buf, "eui.%16phN\n", ns->nguid);
2000 2003
@@ -2709,7 +2712,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2709 mutex_lock(&ctrl->namespaces_mutex); 2712 mutex_lock(&ctrl->namespaces_mutex);
2710 2713
2711 /* Forcibly unquiesce queues to avoid blocking dispatch */ 2714 /* Forcibly unquiesce queues to avoid blocking dispatch */
2712 blk_mq_unquiesce_queue(ctrl->admin_q); 2715 if (ctrl->admin_q)
2716 blk_mq_unquiesce_queue(ctrl->admin_q);
2713 2717
2714 list_for_each_entry(ns, &ctrl->namespaces, list) { 2718 list_for_each_entry(ns, &ctrl->namespaces, list) {
2715 /* 2719 /*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d666ada39a9b..5c2a08ef08ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1888 * the target device is present 1888 * the target device is present
1889 */ 1889 */
1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1891 return BLK_STS_IOERR; 1891 goto busy;
1892 1892
1893 if (!nvme_fc_ctrl_get(ctrl)) 1893 if (!nvme_fc_ctrl_get(ctrl))
1894 return BLK_STS_IOERR; 1894 return BLK_STS_IOERR;
@@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1958 queue->lldd_handle, &op->fcp_req); 1958 queue->lldd_handle, &op->fcp_req);
1959 1959
1960 if (ret) { 1960 if (ret) {
1961 if (op->rq) /* normal request */ 1961 if (!(op->flags & FCOP_FLAGS_AEN))
1962 nvme_fc_unmap_data(ctrl, op->rq, op); 1962 nvme_fc_unmap_data(ctrl, op->rq, op);
1963 /* else - aen. no cleanup needed */
1964 1963
1965 nvme_fc_ctrl_put(ctrl); 1964 nvme_fc_ctrl_put(ctrl);
1966 1965
1967 if (ret != -EBUSY) 1966 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
1967 ret != -EBUSY)
1968 return BLK_STS_IOERR; 1968 return BLK_STS_IOERR;
1969 1969
1970 if (op->rq) 1970 goto busy;
1971 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1972
1973 return BLK_STS_RESOURCE;
1974 } 1971 }
1975 1972
1976 return BLK_STS_OK; 1973 return BLK_STS_OK;
1974
1975busy:
1976 if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
1977 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1978
1979 return BLK_STS_RESOURCE;
1977} 1980}
1978 1981
1979static blk_status_t 1982static blk_status_t
@@ -2802,66 +2805,70 @@ out_fail:
2802 return ERR_PTR(ret); 2805 return ERR_PTR(ret);
2803} 2806}
2804 2807
2805enum {
2806 FCT_TRADDR_ERR = 0,
2807 FCT_TRADDR_WWNN = 1 << 0,
2808 FCT_TRADDR_WWPN = 1 << 1,
2809};
2810 2808
2811struct nvmet_fc_traddr { 2809struct nvmet_fc_traddr {
2812 u64 nn; 2810 u64 nn;
2813 u64 pn; 2811 u64 pn;
2814}; 2812};
2815 2813
2816static const match_table_t traddr_opt_tokens = {
2817 { FCT_TRADDR_WWNN, "nn-%s" },
2818 { FCT_TRADDR_WWPN, "pn-%s" },
2819 { FCT_TRADDR_ERR, NULL }
2820};
2821
2822static int 2814static int
2823nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf) 2815__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2824{ 2816{
2825 substring_t args[MAX_OPT_ARGS];
2826 char *options, *o, *p;
2827 int token, ret = 0;
2828 u64 token64; 2817 u64 token64;
2829 2818
2830 options = o = kstrdup(buf, GFP_KERNEL); 2819 if (match_u64(sstr, &token64))
2831 if (!options) 2820 return -EINVAL;
2832 return -ENOMEM; 2821 *val = token64;
2833 2822
2834 while ((p = strsep(&o, ":\n")) != NULL) { 2823 return 0;
2835 if (!*p) 2824}
2836 continue;
2837 2825
2838 token = match_token(p, traddr_opt_tokens, args); 2826/*
2839 switch (token) { 2827 * This routine validates and extracts the WWN's from the TRADDR string.
2840 case FCT_TRADDR_WWNN: 2828 * As kernel parsers need the 0x to determine number base, universally
2841 if (match_u64(args, &token64)) { 2829 * build string to parse with 0x prefix before parsing name strings.
2842 ret = -EINVAL; 2830 */
2843 goto out; 2831static int
2844 } 2832nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2845 traddr->nn = token64; 2833{
2846 break; 2834 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2847 case FCT_TRADDR_WWPN: 2835 substring_t wwn = { name, &name[sizeof(name)-1] };
2848 if (match_u64(args, &token64)) { 2836 int nnoffset, pnoffset;
2849 ret = -EINVAL; 2837
2850 goto out; 2838 /* validate it string one of the 2 allowed formats */
2851 } 2839 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2852 traddr->pn = token64; 2840 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2853 break; 2841 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2854 default: 2842 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2855 pr_warn("unknown traddr token or missing value '%s'\n", 2843 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2856 p); 2844 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2857 ret = -EINVAL; 2845 NVME_FC_TRADDR_OXNNLEN;
2858 goto out; 2846 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2859 } 2847 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2860 } 2848 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2849 "pn-", NVME_FC_TRADDR_NNLEN))) {
2850 nnoffset = NVME_FC_TRADDR_NNLEN;
2851 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2852 } else
2853 goto out_einval;
2861 2854
2862out: 2855 name[0] = '0';
2863 kfree(options); 2856 name[1] = 'x';
2864 return ret; 2857 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2858
2859 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2860 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2861 goto out_einval;
2862
2863 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2864 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2865 goto out_einval;
2866
2867 return 0;
2868
2869out_einval:
2870 pr_warn("%s: bad traddr string\n", __func__);
2871 return -EINVAL;
2865} 2872}
2866 2873
2867static struct nvme_ctrl * 2874static struct nvme_ctrl *
@@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2875 unsigned long flags; 2882 unsigned long flags;
2876 int ret; 2883 int ret;
2877 2884
2878 ret = nvme_fc_parse_address(&raddr, opts->traddr); 2885 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
2879 if (ret || !raddr.nn || !raddr.pn) 2886 if (ret || !raddr.nn || !raddr.pn)
2880 return ERR_PTR(-EINVAL); 2887 return ERR_PTR(-EINVAL);
2881 2888
2882 ret = nvme_fc_parse_address(&laddr, opts->host_traddr); 2889 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
2883 if (ret || !laddr.nn || !laddr.pn) 2890 if (ret || !laddr.nn || !laddr.pn)
2884 return ERR_PTR(-EINVAL); 2891 return ERR_PTR(-EINVAL);
2885 2892
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8569ee771269..cd888a47d0fc 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1619,7 +1619,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1619static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 1619static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
1620{ 1620{
1621 struct nvme_host_mem_buf_desc *descs; 1621 struct nvme_host_mem_buf_desc *descs;
1622 u32 chunk_size, max_entries; 1622 u32 chunk_size, max_entries, len;
1623 int i = 0; 1623 int i = 0;
1624 void **bufs; 1624 void **bufs;
1625 u64 size = 0, tmp; 1625 u64 size = 0, tmp;
@@ -1638,10 +1638,10 @@ retry:
1638 if (!bufs) 1638 if (!bufs)
1639 goto out_free_descs; 1639 goto out_free_descs;
1640 1640
1641 for (size = 0; size < preferred; size += chunk_size) { 1641 for (size = 0; size < preferred; size += len) {
1642 u32 len = min_t(u64, chunk_size, preferred - size);
1643 dma_addr_t dma_addr; 1642 dma_addr_t dma_addr;
1644 1643
1644 len = min_t(u64, chunk_size, preferred - size);
1645 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 1645 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
1646 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1646 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1647 if (!bufs[i]) 1647 if (!bufs[i])
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index d5801c150b1c..31ca55dfcb1d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -2293,66 +2293,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2293} 2293}
2294EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2294EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2295 2295
2296enum {
2297 FCT_TRADDR_ERR = 0,
2298 FCT_TRADDR_WWNN = 1 << 0,
2299 FCT_TRADDR_WWPN = 1 << 1,
2300};
2301 2296
2302struct nvmet_fc_traddr { 2297struct nvmet_fc_traddr {
2303 u64 nn; 2298 u64 nn;
2304 u64 pn; 2299 u64 pn;
2305}; 2300};
2306 2301
2307static const match_table_t traddr_opt_tokens = {
2308 { FCT_TRADDR_WWNN, "nn-%s" },
2309 { FCT_TRADDR_WWPN, "pn-%s" },
2310 { FCT_TRADDR_ERR, NULL }
2311};
2312
2313static int 2302static int
2314nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf) 2303__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2315{ 2304{
2316 substring_t args[MAX_OPT_ARGS];
2317 char *options, *o, *p;
2318 int token, ret = 0;
2319 u64 token64; 2305 u64 token64;
2320 2306
2321 options = o = kstrdup(buf, GFP_KERNEL); 2307 if (match_u64(sstr, &token64))
2322 if (!options) 2308 return -EINVAL;
2323 return -ENOMEM; 2309 *val = token64;
2324 2310
2325 while ((p = strsep(&o, ":\n")) != NULL) { 2311 return 0;
2326 if (!*p) 2312}
2327 continue;
2328 2313
2329 token = match_token(p, traddr_opt_tokens, args); 2314/*
2330 switch (token) { 2315 * This routine validates and extracts the WWN's from the TRADDR string.
2331 case FCT_TRADDR_WWNN: 2316 * As kernel parsers need the 0x to determine number base, universally
2332 if (match_u64(args, &token64)) { 2317 * build string to parse with 0x prefix before parsing name strings.
2333 ret = -EINVAL; 2318 */
2334 goto out; 2319static int
2335 } 2320nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2336 traddr->nn = token64; 2321{
2337 break; 2322 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2338 case FCT_TRADDR_WWPN: 2323 substring_t wwn = { name, &name[sizeof(name)-1] };
2339 if (match_u64(args, &token64)) { 2324 int nnoffset, pnoffset;
2340 ret = -EINVAL; 2325
2341 goto out; 2326 /* validate it string one of the 2 allowed formats */
2342 } 2327 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2343 traddr->pn = token64; 2328 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2344 break; 2329 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2345 default: 2330 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2346 pr_warn("unknown traddr token or missing value '%s'\n", 2331 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2347 p); 2332 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2348 ret = -EINVAL; 2333 NVME_FC_TRADDR_OXNNLEN;
2349 goto out; 2334 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2350 } 2335 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2351 } 2336 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2337 "pn-", NVME_FC_TRADDR_NNLEN))) {
2338 nnoffset = NVME_FC_TRADDR_NNLEN;
2339 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2340 } else
2341 goto out_einval;
2342
2343 name[0] = '0';
2344 name[1] = 'x';
2345 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2346
2347 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2348 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2349 goto out_einval;
2350
2351 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2352 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2353 goto out_einval;
2352 2354
2353out: 2355 return 0;
2354 kfree(options); 2356
2355 return ret; 2357out_einval:
2358 pr_warn("%s: bad traddr string\n", __func__);
2359 return -EINVAL;
2356} 2360}
2357 2361
2358static int 2362static int
@@ -2370,7 +2374,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
2370 2374
2371 /* map the traddr address info to a target port */ 2375 /* map the traddr address info to a target port */
2372 2376
2373 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr); 2377 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2378 sizeof(port->disc_addr.traddr));
2374 if (ret) 2379 if (ret)
2375 return ret; 2380 return ret;
2376 2381
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ce72aa65425..ab21c846eb27 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
476 int i; 476 int i;
477 477
478 for (i = 0; i < nr_irqs; i++, res++) 478 for (i = 0; i < nr_irqs; i++, res++)
479 if (!of_irq_to_resource(dev, i, res)) 479 if (of_irq_to_resource(dev, i, res) <= 0)
480 break; 480 break;
481 481
482 return i; 482 return i;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 055f83fddc18..7147aa53e9a2 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = {
954 NULL, 954 NULL,
955}; 955};
956 956
957static struct attribute_group pdcs_attr_group = { 957static const struct attribute_group pdcs_attr_group = {
958 .attrs = pdcs_subsys_attrs, 958 .attrs = pdcs_subsys_attrs,
959}; 959};
960 960
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index dc459eb1246b..1c5e0f333779 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
569 if (irq != other_irq) { 569 if (irq != other_irq) {
570 pr_warn("mismatched PPIs detected.\n"); 570 pr_warn("mismatched PPIs detected.\n");
571 err = -EINVAL; 571 err = -EINVAL;
572 goto err_out;
572 } 573 }
573 } else { 574 } else {
574 err = request_irq(irq, handler, 575 struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
575 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", 576 unsigned long irq_flags;
577
578 err = irq_force_affinity(irq, cpumask_of(cpu));
579
580 if (err && num_possible_cpus() > 1) {
581 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
582 irq, cpu);
583 goto err_out;
584 }
585
586 if (platdata && platdata->irq_flags) {
587 irq_flags = platdata->irq_flags;
588 } else {
589 irq_flags = IRQF_PERCPU |
590 IRQF_NOBALANCING |
591 IRQF_NO_THREAD;
592 }
593
594 err = request_irq(irq, handler, irq_flags, "arm-pmu",
576 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 595 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
577 } 596 }
578 597
579 if (err) { 598 if (err)
580 pr_err("unable to request IRQ%d for ARM PMU counters\n", 599 goto err_out;
581 irq);
582 return err;
583 }
584 600
585 cpumask_set_cpu(cpu, &armpmu->active_irqs); 601 cpumask_set_cpu(cpu, &armpmu->active_irqs);
586
587 return 0; 602 return 0;
603
604err_out:
605 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
606 return err;
588} 607}
589 608
590int armpmu_request_irqs(struct arm_pmu *armpmu) 609int armpmu_request_irqs(struct arm_pmu *armpmu)
@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
628 enable_percpu_irq(irq, IRQ_TYPE_NONE); 647 enable_percpu_irq(irq, IRQ_TYPE_NONE);
629 return 0; 648 return 0;
630 } 649 }
631
632 if (irq_force_affinity(irq, cpumask_of(cpu)) &&
633 num_possible_cpus() > 1) {
634 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
635 irq, cpu);
636 }
637 } 650 }
638 651
639 return 0; 652 return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 69255f53057a..4eafa7a42e52 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
131 } 131 }
132 132
133 if (!pmu_has_irq_affinity(pdev->dev.of_node)) { 133 if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
134 pr_warn("no interrupt-affinity property for %s, guessing.\n", 134 pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
135 of_node_full_name(pdev->dev.of_node)); 135 pdev->dev.of_node);
136 } 136 }
137 137
138 /* 138 /*
@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
211 } 211 }
212 212
213 if (ret) { 213 if (ret) {
214 pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); 214 pr_info("%pOF: failed to probe PMU!\n", node);
215 goto out_free; 215 goto out_free;
216 } 216 }
217 217
@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
228out_free_irqs: 228out_free_irqs:
229 armpmu_free_irqs(pmu); 229 armpmu_free_irqs(pmu);
230out_free: 230out_free:
231 pr_info("%s: failed to register PMU devices!\n", 231 pr_info("%pOF: failed to register PMU devices!\n", node);
232 of_node_full_name(node));
233 armpmu_free(pmu); 232 armpmu_free(pmu);
234 return ret; 233 return ret;
235} 234}
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index c259848228b4..b242cce10468 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
546 } 546 }
547 547
548 if ((event != event->group_leader) && 548 if ((event != event->group_leader) &&
549 !is_software_event(event->group_leader) &&
549 (L2_EVT_GROUP(event->group_leader->attr.config) == 550 (L2_EVT_GROUP(event->group_leader->attr.config) ==
550 L2_EVT_GROUP(event->attr.config))) { 551 L2_EVT_GROUP(event->attr.config))) {
551 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 552 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
558 list_for_each_entry(sibling, &event->group_leader->sibling_list, 559 list_for_each_entry(sibling, &event->group_leader->sibling_list,
559 group_entry) { 560 group_entry) {
560 if ((sibling != event) && 561 if ((sibling != event) &&
562 !is_software_event(sibling) &&
561 (L2_EVT_GROUP(sibling->attr.config) == 563 (L2_EVT_GROUP(sibling->attr.config) ==
562 L2_EVT_GROUP(event->attr.config))) { 564 L2_EVT_GROUP(event->attr.config))) {
563 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 565 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 7e0d4f724dda..432fc40990bd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
559 chpid.id = crw0->rsid; 559 chpid.id = crw0->rsid;
560 switch (crw0->erc) { 560 switch (crw0->erc) {
561 case CRW_ERC_IPARM: /* Path has come. */ 561 case CRW_ERC_IPARM: /* Path has come. */
562 case CRW_ERC_INIT:
562 if (!chp_is_registered(chpid)) 563 if (!chp_is_registered(chpid))
563 chp_new(chpid); 564 chp_new(chpid);
564 chsc_chp_online(chpid); 565 chsc_chp_online(chpid);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8914eab84337..4f7cdb28bd38 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = {
938#endif 938#endif
939 .sdev_attrs = hpsa_sdev_attrs, 939 .sdev_attrs = hpsa_sdev_attrs,
940 .shost_attrs = hpsa_shost_attrs, 940 .shost_attrs = hpsa_shost_attrs,
941 .max_sectors = 8192, 941 .max_sectors = 1024,
942 .no_write_same = 1, 942 .no_write_same = 1,
943}; 943};
944 944
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 1e82d4128a84..4fe606b000b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -759,8 +759,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
759 return false; 759 return false;
760 return true; 760 return true;
761 case SG_DXFER_FROM_DEV: 761 case SG_DXFER_FROM_DEV:
762 if (hp->dxfer_len < 0) 762 /*
763 return false; 763 * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
764 * can either be NULL or != NULL so there's no point in checking
765 * it either. So just return true.
766 */
764 return true; 767 return true;
765 case SG_DXFER_TO_DEV: 768 case SG_DXFER_TO_DEV:
766 case SG_DXFER_TO_FROM_DEV: 769 case SG_DXFER_TO_FROM_DEV:
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07ec8a8877de..e164ffade38a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat {
690 690
691#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) 691#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
692#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 692#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
693#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) 693#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
694#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) 694#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
695 695
696#define RAID_MAP_MAX_ENTRIES 1024 696#define RAID_MAP_MAX_ENTRIES 1024
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 40219a706309..e9391bbd4036 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida);
30 30
31struct nvm_auth_status { 31struct nvm_auth_status {
32 struct list_head list; 32 struct list_head list;
33 uuid_be uuid; 33 uuid_t uuid;
34 u32 status; 34 u32 status;
35}; 35};
36 36
@@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47 struct nvm_auth_status *st; 47 struct nvm_auth_status *st;
48 48
49 list_for_each_entry(st, &nvm_auth_status_cache, list) { 49 list_for_each_entry(st, &nvm_auth_status_cache, list) {
50 if (!uuid_be_cmp(st->uuid, *sw->uuid)) 50 if (uuid_equal(&st->uuid, sw->uuid))
51 return st; 51 return st;
52 } 52 }
53 53
@@ -1461,7 +1461,7 @@ struct tb_sw_lookup {
1461 struct tb *tb; 1461 struct tb *tb;
1462 u8 link; 1462 u8 link;
1463 u8 depth; 1463 u8 depth;
1464 const uuid_be *uuid; 1464 const uuid_t *uuid;
1465}; 1465};
1466 1466
1467static int tb_switch_match(struct device *dev, void *data) 1467static int tb_switch_match(struct device *dev, void *data)
@@ -1518,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1518 * Returned switch has reference count increased so the caller needs to 1518 * Returned switch has reference count increased so the caller needs to
1519 * call tb_switch_put() when done with the switch. 1519 * call tb_switch_put() when done with the switch.
1520 */ 1520 */
1521struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid) 1521struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1522{ 1522{
1523 struct tb_sw_lookup lookup; 1523 struct tb_sw_lookup lookup;
1524 struct device *dev; 1524 struct device *dev;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 3d9f64676e58..e0deee4f1eb0 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -101,7 +101,7 @@ struct tb_switch {
101 struct tb_dma_port *dma_port; 101 struct tb_dma_port *dma_port;
102 struct tb *tb; 102 struct tb *tb;
103 u64 uid; 103 u64 uid;
104 uuid_be *uuid; 104 uuid_t *uuid;
105 u16 vendor; 105 u16 vendor;
106 u16 device; 106 u16 device;
107 const char *vendor_name; 107 const char *vendor_name;
@@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw);
407struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); 407struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
408struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, 408struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
409 u8 depth); 409 u8 depth);
410struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid); 410struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
411 411
412static inline unsigned int tb_switch_phy_port_from_link(unsigned int link) 412static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
413{ 413{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 85b6d33c0919..de6441e4a060 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response {
179 179
180struct icm_fr_event_device_connected { 180struct icm_fr_event_device_connected {
181 struct icm_pkg_header hdr; 181 struct icm_pkg_header hdr;
182 uuid_be ep_uuid; 182 uuid_t ep_uuid;
183 u8 connection_key; 183 u8 connection_key;
184 u8 connection_id; 184 u8 connection_id;
185 u16 link_info; 185 u16 link_info;
@@ -193,7 +193,7 @@ struct icm_fr_event_device_connected {
193 193
194struct icm_fr_pkg_approve_device { 194struct icm_fr_pkg_approve_device {
195 struct icm_pkg_header hdr; 195 struct icm_pkg_header hdr;
196 uuid_be ep_uuid; 196 uuid_t ep_uuid;
197 u8 connection_key; 197 u8 connection_key;
198 u8 connection_id; 198 u8 connection_id;
199 u16 reserved; 199 u16 reserved;
@@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected {
207 207
208struct icm_fr_pkg_add_device_key { 208struct icm_fr_pkg_add_device_key {
209 struct icm_pkg_header hdr; 209 struct icm_pkg_header hdr;
210 uuid_be ep_uuid; 210 uuid_t ep_uuid;
211 u8 connection_key; 211 u8 connection_key;
212 u8 connection_id; 212 u8 connection_id;
213 u16 reserved; 213 u16 reserved;
@@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key {
216 216
217struct icm_fr_pkg_add_device_key_response { 217struct icm_fr_pkg_add_device_key_response {
218 struct icm_pkg_header hdr; 218 struct icm_pkg_header hdr;
219 uuid_be ep_uuid; 219 uuid_t ep_uuid;
220 u8 connection_key; 220 u8 connection_key;
221 u8 connection_id; 221 u8 connection_id;
222 u16 reserved; 222 u16 reserved;
@@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response {
224 224
225struct icm_fr_pkg_challenge_device { 225struct icm_fr_pkg_challenge_device {
226 struct icm_pkg_header hdr; 226 struct icm_pkg_header hdr;
227 uuid_be ep_uuid; 227 uuid_t ep_uuid;
228 u8 connection_key; 228 u8 connection_key;
229 u8 connection_id; 229 u8 connection_id;
230 u16 reserved; 230 u16 reserved;
@@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device {
233 233
234struct icm_fr_pkg_challenge_device_response { 234struct icm_fr_pkg_challenge_device_response {
235 struct icm_pkg_header hdr; 235 struct icm_pkg_header hdr;
236 uuid_be ep_uuid; 236 uuid_t ep_uuid;
237 u8 connection_key; 237 u8 connection_key;
238 u8 connection_id; 238 u8 connection_id;
239 u16 reserved; 239 u16 reserved;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 22caf808bfab..f0b3a0b9d42f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page)
104 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; 104 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
105} 105}
106 106
107static struct page *balloon_pfn_to_page(u32 pfn)
108{
109 BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
110 return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
111}
112
113static void balloon_ack(struct virtqueue *vq) 107static void balloon_ack(struct virtqueue *vq)
114{ 108{
115 struct virtio_balloon *vb = vq->vdev->priv; 109 struct virtio_balloon *vb = vq->vdev->priv;
@@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
138{ 132{
139 unsigned int i; 133 unsigned int i;
140 134
141 /* Set balloon pfns pointing at this page. 135 /*
142 * Note that the first pfn points at start of the page. */ 136 * Set balloon pfns pointing at this page.
137 * Note that the first pfn points at start of the page.
138 */
143 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) 139 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
144 pfns[i] = cpu_to_virtio32(vb->vdev, 140 pfns[i] = cpu_to_virtio32(vb->vdev,
145 page_to_balloon_pfn(page) + i); 141 page_to_balloon_pfn(page) + i);
@@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
182 return num_allocated_pages; 178 return num_allocated_pages;
183} 179}
184 180
185static void release_pages_balloon(struct virtio_balloon *vb) 181static void release_pages_balloon(struct virtio_balloon *vb,
182 struct list_head *pages)
186{ 183{
187 unsigned int i; 184 struct page *page, *next;
188 struct page *page;
189 185
190 /* Find pfns pointing at start of each page, get pages and free them. */ 186 list_for_each_entry_safe(page, next, pages, lru) {
191 for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
192 page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
193 vb->pfns[i]));
194 if (!virtio_has_feature(vb->vdev, 187 if (!virtio_has_feature(vb->vdev,
195 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 188 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
196 adjust_managed_page_count(page, 1); 189 adjust_managed_page_count(page, 1);
190 list_del(&page->lru);
197 put_page(page); /* balloon reference */ 191 put_page(page); /* balloon reference */
198 } 192 }
199} 193}
@@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
203 unsigned num_freed_pages; 197 unsigned num_freed_pages;
204 struct page *page; 198 struct page *page;
205 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; 199 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
200 LIST_HEAD(pages);
206 201
207 /* We can only do one array worth at a time. */ 202 /* We can only do one array worth at a time. */
208 num = min(num, ARRAY_SIZE(vb->pfns)); 203 num = min(num, ARRAY_SIZE(vb->pfns));
@@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
216 if (!page) 211 if (!page)
217 break; 212 break;
218 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 213 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
214 list_add(&page->lru, &pages);
219 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; 215 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
220 } 216 }
221 217
@@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
227 */ 223 */
228 if (vb->num_pfns != 0) 224 if (vb->num_pfns != 0)
229 tell_host(vb, vb->deflate_vq); 225 tell_host(vb, vb->deflate_vq);
230 release_pages_balloon(vb); 226 release_pages_balloon(vb, &pages);
231 mutex_unlock(&vb->balloon_lock); 227 mutex_unlock(&vb->balloon_lock);
232 return num_freed_pages; 228 return num_freed_pages;
233} 229}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b241bfa529ce..bae1f5d36c26 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
343 info->cpu = cpu; 343 info->cpu = cpu;
344} 344}
345 345
346static void xen_evtchn_mask_all(void)
347{
348 unsigned int evtchn;
349
350 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
351 mask_evtchn(evtchn);
352}
353
354/** 346/**
355 * notify_remote_via_irq - send event to remote end of event channel via irq 347 * notify_remote_via_irq - send event to remote end of event channel via irq
356 * @irq: irq of event channel to send event to 348 * @irq: irq of event channel to send event to
@@ -1573,7 +1565,6 @@ void xen_irq_resume(void)
1573 struct irq_info *info; 1565 struct irq_info *info;
1574 1566
1575 /* New event-channel space is not 'live' yet. */ 1567 /* New event-channel space is not 'live' yet. */
1576 xen_evtchn_mask_all();
1577 xen_evtchn_resume(); 1568 xen_evtchn_resume();
1578 1569
1579 /* No IRQ <-> event-channel mappings. */ 1570 /* No IRQ <-> event-channel mappings. */
@@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0);
1681void __init xen_init_IRQ(void) 1672void __init xen_init_IRQ(void)
1682{ 1673{
1683 int ret = -EINVAL; 1674 int ret = -EINVAL;
1675 unsigned int evtchn;
1684 1676
1685 if (fifo_events) 1677 if (fifo_events)
1686 ret = xen_evtchn_fifo_init(); 1678 ret = xen_evtchn_fifo_init();
@@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void)
1692 BUG_ON(!evtchn_to_irq); 1684 BUG_ON(!evtchn_to_irq);
1693 1685
1694 /* No event channels are 'live' right now. */ 1686 /* No event channels are 'live' right now. */
1695 xen_evtchn_mask_all(); 1687 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
1688 mask_evtchn(evtchn);
1696 1689
1697 pirq_needs_eoi = pirq_needs_eoi_flag; 1690 pirq_needs_eoi = pirq_needs_eoi_flag;
1698 1691
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 66620713242a..a67e955cacd1 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter;
151static void frontswap_selfshrink(void) 151static void frontswap_selfshrink(void)
152{ 152{
153 static unsigned long cur_frontswap_pages; 153 static unsigned long cur_frontswap_pages;
154 static unsigned long last_frontswap_pages; 154 unsigned long last_frontswap_pages;
155 static unsigned long tgt_frontswap_pages; 155 unsigned long tgt_frontswap_pages;
156 156
157 last_frontswap_pages = cur_frontswap_pages; 157 last_frontswap_pages = cur_frontswap_pages;
158 cur_frontswap_pages = frontswap_curr_pages(); 158 cur_frontswap_pages = frontswap_curr_pages();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 967f069385d0..71ddfb4cf61c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -87,7 +87,6 @@ static int __init xenfs_init(void)
87 if (xen_domain()) 87 if (xen_domain())
88 return register_filesystem(&xenfs_type); 88 return register_filesystem(&xenfs_type);
89 89
90 pr_info("not registering filesystem on non-xen platform\n");
91 return 0; 90 return 0;
92} 91}
93 92
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 375f8c728d91..e3b0b4196d3d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4825,10 +4825,6 @@ skip_async:
4825 else 4825 else
4826 flush = BTRFS_RESERVE_NO_FLUSH; 4826 flush = BTRFS_RESERVE_NO_FLUSH;
4827 spin_lock(&space_info->lock); 4827 spin_lock(&space_info->lock);
4828 if (can_overcommit(fs_info, space_info, orig, flush, false)) {
4829 spin_unlock(&space_info->lock);
4830 break;
4831 }
4832 if (list_empty(&space_info->tickets) && 4828 if (list_empty(&space_info->tickets) &&
4833 list_empty(&space_info->priority_tickets)) { 4829 list_empty(&space_info->priority_tickets)) {
4834 spin_unlock(&space_info->lock); 4830 spin_unlock(&space_info->lock);
@@ -7589,6 +7585,10 @@ search:
7589 u64 offset; 7585 u64 offset;
7590 int cached; 7586 int cached;
7591 7587
7588 /* If the block group is read-only, we can skip it entirely. */
7589 if (unlikely(block_group->ro))
7590 continue;
7591
7592 btrfs_grab_block_group(block_group, delalloc); 7592 btrfs_grab_block_group(block_group, delalloc);
7593 search_start = block_group->key.objectid; 7593 search_start = block_group->key.objectid;
7594 7594
@@ -7624,8 +7624,6 @@ have_block_group:
7624 7624
7625 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) 7625 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7626 goto loop; 7626 goto loop;
7627 if (unlikely(block_group->ro))
7628 goto loop;
7629 7627
7630 /* 7628 /*
7631 * Ok we want to try and use the cluster allocator, so 7629 * Ok we want to try and use the cluster allocator, so
@@ -7839,6 +7837,7 @@ loop:
7839 failed_alloc = false; 7837 failed_alloc = false;
7840 BUG_ON(index != get_block_group_index(block_group)); 7838 BUG_ON(index != get_block_group_index(block_group));
7841 btrfs_release_block_group(block_group, delalloc); 7839 btrfs_release_block_group(block_group, delalloc);
7840 cond_resched();
7842 } 7841 }
7843 up_read(&space_info->groups_sem); 7842 up_read(&space_info->groups_sem);
7844 7843
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f20ef211a73d..3a11ae63676e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2153,8 +2153,7 @@ process_leaf:
2153 u32 this_len = sizeof(*di) + name_len + data_len; 2153 u32 this_len = sizeof(*di) + name_len + data_len;
2154 char *name; 2154 char *name;
2155 2155
2156 ret = verify_dir_item(fs_info, path->nodes[0], 2156 ret = verify_dir_item(fs_info, path->nodes[0], i, di);
2157 path->slots[0], di);
2158 if (ret) { 2157 if (ret) {
2159 ret = -EIO; 2158 ret = -EIO;
2160 goto out; 2159 goto out;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5eb7217738ed..e8b9a269fdde 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2702,7 +2702,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
2702 2702
2703 mutex_lock(&fs_info->chunk_mutex); 2703 mutex_lock(&fs_info->chunk_mutex);
2704 old_total = btrfs_super_total_bytes(super_copy); 2704 old_total = btrfs_super_total_bytes(super_copy);
2705 diff = new_size - device->total_bytes; 2705 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2706 2706
2707 if (new_size <= device->total_bytes || 2707 if (new_size <= device->total_bytes ||
2708 device->is_tgtdev_for_dev_replace) { 2708 device->is_tgtdev_for_dev_replace) {
@@ -4406,7 +4406,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4406 u64 diff; 4406 u64 diff;
4407 4407
4408 new_size = round_down(new_size, fs_info->sectorsize); 4408 new_size = round_down(new_size, fs_info->sectorsize);
4409 diff = old_size - new_size; 4409 diff = round_down(old_size - new_size, fs_info->sectorsize);
4410 4410
4411 if (device->is_tgtdev_for_dev_replace) 4411 if (device->is_tgtdev_for_dev_replace)
4412 return -EINVAL; 4412 return -EINVAL;
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 7bc186f4ed4d..2e71b6e7e646 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
77 switch (type) { 77 switch (type) {
78 case ACL_TYPE_ACCESS: 78 case ACL_TYPE_ACCESS:
79 ea_name = XATTR_NAME_POSIX_ACL_ACCESS; 79 ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
80 if (acl) {
81 rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
82 if (rc)
83 return rc;
84 inode->i_ctime = current_time(inode);
85 mark_inode_dirty(inode);
86 }
87 break; 80 break;
88 case ACL_TYPE_DEFAULT: 81 case ACL_TYPE_DEFAULT:
89 ea_name = XATTR_NAME_POSIX_ACL_DEFAULT; 82 ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
@@ -115,12 +108,27 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
115{ 108{
116 int rc; 109 int rc;
117 tid_t tid; 110 tid_t tid;
111 int update_mode = 0;
112 umode_t mode = inode->i_mode;
118 113
119 tid = txBegin(inode->i_sb, 0); 114 tid = txBegin(inode->i_sb, 0);
120 mutex_lock(&JFS_IP(inode)->commit_mutex); 115 mutex_lock(&JFS_IP(inode)->commit_mutex);
116 if (type == ACL_TYPE_ACCESS && acl) {
117 rc = posix_acl_update_mode(inode, &mode, &acl);
118 if (rc)
119 goto end_tx;
120 update_mode = 1;
121 }
121 rc = __jfs_set_acl(tid, inode, type, acl); 122 rc = __jfs_set_acl(tid, inode, type, acl);
122 if (!rc) 123 if (!rc) {
124 if (update_mode) {
125 inode->i_mode = mode;
126 inode->i_ctime = current_time(inode);
127 mark_inode_dirty(inode);
128 }
123 rc = txCommit(tid, 1, &inode, 0); 129 rc = txCommit(tid, 1, &inode, 0);
130 }
131end_tx:
124 txEnd(tid); 132 txEnd(tid);
125 mutex_unlock(&JFS_IP(inode)->commit_mutex); 133 mutex_unlock(&JFS_IP(inode)->commit_mutex);
126 return rc; 134 return rc;
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index bd9b641ada2c..7ddcb445a3d9 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -98,7 +98,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
98 goto out; 98 goto out;
99 } 99 }
100 100
101 VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; 101 VolumeSize = i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits;
102 102
103 if (VolumeSize) { 103 if (VolumeSize) {
104 if (newLVSize > VolumeSize) { 104 if (newLVSize > VolumeSize) {
@@ -211,7 +211,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
211 txQuiesce(sb); 211 txQuiesce(sb);
212 212
213 /* Reset size of direct inode */ 213 /* Reset size of direct inode */
214 sbi->direct_inode->i_size = sb->s_bdev->bd_inode->i_size; 214 sbi->direct_inode->i_size = i_size_read(sb->s_bdev->bd_inode);
215 215
216 if (sbi->mntflag & JFS_INLINELOG) { 216 if (sbi->mntflag & JFS_INLINELOG) {
217 /* 217 /*
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index e8aad7d87b8c..78b41e1d5c67 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -313,7 +313,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
313 } 313 }
314 case Opt_resize_nosize: 314 case Opt_resize_nosize:
315 { 315 {
316 *newLVSize = sb->s_bdev->bd_inode->i_size >> 316 *newLVSize = i_size_read(sb->s_bdev->bd_inode) >>
317 sb->s_blocksize_bits; 317 sb->s_blocksize_bits;
318 if (*newLVSize == 0) 318 if (*newLVSize == 0)
319 pr_err("JFS: Cannot determine volume size\n"); 319 pr_err("JFS: Cannot determine volume size\n");
@@ -579,7 +579,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
579 goto out_unload; 579 goto out_unload;
580 } 580 }
581 inode->i_ino = 0; 581 inode->i_ino = 0;
582 inode->i_size = sb->s_bdev->bd_inode->i_size; 582 inode->i_size = i_size_read(sb->s_bdev->bd_inode);
583 inode->i_mapping->a_ops = &jfs_metapage_aops; 583 inode->i_mapping->a_ops = &jfs_metapage_aops;
584 hlist_add_fake(&inode->i_hash); 584 hlist_add_fake(&inode->i_hash);
585 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 585 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 5713eb32a45e..af330c31f627 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -617,6 +617,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
617 if (result) 617 if (result)
618 goto out; 618 goto out;
619 } 619 }
620 if (iocb->ki_pos > i_size_read(inode))
621 nfs_revalidate_mapping(inode, file->f_mapping);
620 622
621 nfs_start_io_write(inode); 623 nfs_start_io_write(inode);
622 result = generic_write_checks(iocb, from); 624 result = generic_write_checks(iocb, from);
@@ -750,7 +752,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
750 */ 752 */
751 nfs_sync_mapping(filp->f_mapping); 753 nfs_sync_mapping(filp->f_mapping);
752 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 754 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
753 nfs_zap_mapping(inode, filp->f_mapping); 755 nfs_zap_caches(inode);
754out: 756out:
755 return status; 757 return status;
756} 758}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a0b4e1091340..18ca6879d8de 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2236,7 +2236,7 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
2236 int openflags) 2236 int openflags)
2237{ 2237{
2238 struct nfs_access_entry cache; 2238 struct nfs_access_entry cache;
2239 u32 mask; 2239 u32 mask, flags;
2240 2240
2241 /* access call failed or for some reason the server doesn't 2241 /* access call failed or for some reason the server doesn't
2242 * support any access modes -- defer access call until later */ 2242 * support any access modes -- defer access call until later */
@@ -2250,16 +2250,20 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
2250 */ 2250 */
2251 if (openflags & __FMODE_EXEC) { 2251 if (openflags & __FMODE_EXEC) {
2252 /* ONLY check for exec rights */ 2252 /* ONLY check for exec rights */
2253 mask = MAY_EXEC; 2253 if (S_ISDIR(state->inode->i_mode))
2254 mask = NFS4_ACCESS_LOOKUP;
2255 else
2256 mask = NFS4_ACCESS_EXECUTE;
2254 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2257 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2255 mask = MAY_READ; 2258 mask = NFS4_ACCESS_READ;
2256 2259
2257 cache.cred = cred; 2260 cache.cred = cred;
2258 cache.jiffies = jiffies; 2261 cache.jiffies = jiffies;
2259 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2262 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2260 nfs_access_add_cache(state->inode, &cache); 2263 nfs_access_add_cache(state->inode, &cache);
2261 2264
2262 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2265 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2266 if ((mask & ~cache.mask & flags) == 0)
2263 return 0; 2267 return 0;
2264 2268
2265 return -EACCES; 2269 return -EACCES;
@@ -6492,7 +6496,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6492 set_current_state(TASK_INTERRUPTIBLE); 6496 set_current_state(TASK_INTERRUPTIBLE);
6493 spin_unlock_irqrestore(&q->lock, flags); 6497 spin_unlock_irqrestore(&q->lock, flags);
6494 6498
6495 freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT); 6499 freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
6496 } 6500 }
6497 6501
6498 finish_wait(q, &wait); 6502 finish_wait(q, &wait);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 0a9880777c9c..c09c16b1ad3b 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5435,6 +5435,7 @@ __xfs_bunmapi(
5435 xfs_fsblock_t sum; 5435 xfs_fsblock_t sum;
5436 xfs_filblks_t len = *rlen; /* length to unmap in file */ 5436 xfs_filblks_t len = *rlen; /* length to unmap in file */
5437 xfs_fileoff_t max_len; 5437 xfs_fileoff_t max_len;
5438 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5438 5439
5439 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); 5440 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5440 5441
@@ -5534,6 +5535,17 @@ __xfs_bunmapi(
5534 */ 5535 */
5535 del = got; 5536 del = got;
5536 wasdel = isnullstartblock(del.br_startblock); 5537 wasdel = isnullstartblock(del.br_startblock);
5538
5539 /*
5540 * Make sure we don't touch multiple AGF headers out of order
5541 * in a single transaction, as that could cause AB-BA deadlocks.
5542 */
5543 if (!wasdel) {
5544 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5545 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5546 break;
5547 prev_agno = agno;
5548 }
5537 if (got.br_startoff < start) { 5549 if (got.br_startoff < start) {
5538 del.br_startoff = start; 5550 del.br_startoff = start;
5539 del.br_blockcount -= start - got.br_startoff; 5551 del.br_blockcount -= start - got.br_startoff;
@@ -6499,6 +6511,15 @@ xfs_bmap_finish_one(
6499 xfs_fsblock_t firstfsb; 6511 xfs_fsblock_t firstfsb;
6500 int error = 0; 6512 int error = 0;
6501 6513
6514 /*
6515 * firstfsb is tied to the transaction lifetime and is used to
6516 * ensure correct AG locking order and schedule work item
6517 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6518 * to only making one bmap call per transaction, so it should
6519 * be safe to have it as a local variable here.
6520 */
6521 firstfsb = NULLFSBLOCK;
6522
6502 trace_xfs_bmap_deferred(tp->t_mountp, 6523 trace_xfs_bmap_deferred(tp->t_mountp,
6503 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, 6524 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6504 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), 6525 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 4da85fff69ad..e0bcc4a59efd 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -728,7 +728,8 @@ xfs_btree_firstrec(
728 * Get the block pointer for this level. 728 * Get the block pointer for this level.
729 */ 729 */
730 block = xfs_btree_get_block(cur, level, &bp); 730 block = xfs_btree_get_block(cur, level, &bp);
731 xfs_btree_check_block(cur, block, level, bp); 731 if (xfs_btree_check_block(cur, block, level, bp))
732 return 0;
732 /* 733 /*
733 * It's empty, there is no such record. 734 * It's empty, there is no such record.
734 */ 735 */
@@ -757,7 +758,8 @@ xfs_btree_lastrec(
757 * Get the block pointer for this level. 758 * Get the block pointer for this level.
758 */ 759 */
759 block = xfs_btree_get_block(cur, level, &bp); 760 block = xfs_btree_get_block(cur, level, &bp);
760 xfs_btree_check_block(cur, block, level, bp); 761 if (xfs_btree_check_block(cur, block, level, bp))
762 return 0;
761 /* 763 /*
762 * It's empty, there is no such record. 764 * It's empty, there is no such record.
763 */ 765 */
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index d478065b9544..8727a43115ef 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -136,6 +136,8 @@ __xfs_dir3_data_check(
136 */ 136 */
137 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { 137 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
138 XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0); 138 XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0);
139 XFS_WANT_CORRUPTED_RETURN(mp, endp >=
140 p + be16_to_cpu(dup->length));
139 XFS_WANT_CORRUPTED_RETURN(mp, 141 XFS_WANT_CORRUPTED_RETURN(mp,
140 be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) == 142 be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
141 (char *)dup - (char *)hdr); 143 (char *)dup - (char *)hdr);
@@ -164,6 +166,8 @@ __xfs_dir3_data_check(
164 XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0); 166 XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0);
165 XFS_WANT_CORRUPTED_RETURN(mp, 167 XFS_WANT_CORRUPTED_RETURN(mp,
166 !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber))); 168 !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
169 XFS_WANT_CORRUPTED_RETURN(mp, endp >=
170 p + ops->data_entsize(dep->namelen));
167 XFS_WANT_CORRUPTED_RETURN(mp, 171 XFS_WANT_CORRUPTED_RETURN(mp,
168 be16_to_cpu(*ops->data_entry_tag_p(dep)) == 172 be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
169 (char *)dep - (char *)hdr); 173 (char *)dep - (char *)hdr);
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 900ea231f9a3..45b1c3b4e047 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1638,6 +1638,10 @@ xfs_refcount_recover_cow_leftovers(
1638 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp); 1638 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
1639 if (error) 1639 if (error)
1640 goto out_trans; 1640 goto out_trans;
1641 if (!agbp) {
1642 error = -ENOMEM;
1643 goto out_trans;
1644 }
1641 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL); 1645 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
1642 1646
1643 /* Find all the leftover CoW staging extents. */ 1647 /* Find all the leftover CoW staging extents. */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 6ce948c436d5..15751dc2a27d 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -111,6 +111,9 @@ restart:
111 skipped = 0; 111 skipped = 0;
112 break; 112 break;
113 } 113 }
114 /* we're done if id overflows back to zero */
115 if (!next_index)
116 break;
114 } 117 }
115 118
116 if (skipped) { 119 if (skipped) {
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index ab2270a87196..f45fbf0db9bb 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -170,6 +170,8 @@ xfs_reflink_find_shared(
170 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp); 170 error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
171 if (error) 171 if (error)
172 return error; 172 return error;
173 if (!agbp)
174 return -ENOMEM;
173 175
174 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL); 176 cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
175 177
@@ -329,7 +331,7 @@ xfs_reflink_convert_cow_extent(
329 xfs_filblks_t count_fsb, 331 xfs_filblks_t count_fsb,
330 struct xfs_defer_ops *dfops) 332 struct xfs_defer_ops *dfops)
331{ 333{
332 xfs_fsblock_t first_block; 334 xfs_fsblock_t first_block = NULLFSBLOCK;
333 int nimaps = 1; 335 int nimaps = 1;
334 336
335 if (imap->br_state == XFS_EXT_NORM) 337 if (imap->br_state == XFS_EXT_NORM)
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index d4b72944ccda..1e3a74f94131 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -3,6 +3,7 @@
3 3
4#ifdef CONFIG_ACPI_NUMA 4#ifdef CONFIG_ACPI_NUMA
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/numa.h>
6 7
7/* Proximity bitmap length */ 8/* Proximity bitmap length */
8#if MAX_NUMNODES > 256 9#if MAX_NUMNODES > 256
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 794811875732..df97b7af7e2c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -87,6 +87,7 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
87void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 87void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
88 size_t size); 88 size_t size);
89void dax_write_cache(struct dax_device *dax_dev, bool wc); 89void dax_write_cache(struct dax_device *dax_dev, bool wc);
90bool dax_write_cache_enabled(struct dax_device *dax_dev);
90 91
91/* 92/*
92 * We use lowest available bit in exceptional entry for locking, one bit for 93 * We use lowest available bit in exceptional entry for locking, one bit for
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 843ab866e0f4..03c0196a6f24 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -157,16 +157,40 @@ static inline int is_device_dma_capable(struct device *dev)
157 * These three functions are only for dma allocator. 157 * These three functions are only for dma allocator.
158 * Don't use them in device drivers. 158 * Don't use them in device drivers.
159 */ 159 */
160int dma_alloc_from_coherent(struct device *dev, ssize_t size, 160int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
161 dma_addr_t *dma_handle, void **ret); 161 dma_addr_t *dma_handle, void **ret);
162int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 162int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
163 163
164int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 164int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
165 void *cpu_addr, size_t size, int *ret); 165 void *cpu_addr, size_t size, int *ret);
166
167void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
168int dma_release_from_global_coherent(int order, void *vaddr);
169int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
170 size_t size, int *ret);
171
166#else 172#else
167#define dma_alloc_from_coherent(dev, size, handle, ret) (0) 173#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
168#define dma_release_from_coherent(dev, order, vaddr) (0) 174#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
169#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) 175#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
176
177static inline void *dma_alloc_from_global_coherent(ssize_t size,
178 dma_addr_t *dma_handle)
179{
180 return NULL;
181}
182
183static inline int dma_release_from_global_coherent(int order, void *vaddr)
184{
185 return 0;
186}
187
188static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
189 void *cpu_addr, size_t size,
190 int *ret)
191{
192 return 0;
193}
170#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 194#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
171 195
172#ifdef CONFIG_HAS_DMA 196#ifdef CONFIG_HAS_DMA
@@ -481,7 +505,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
481 505
482 BUG_ON(!ops); 506 BUG_ON(!ops);
483 507
484 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) 508 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
485 return cpu_addr; 509 return cpu_addr;
486 510
487 if (!arch_dma_alloc_attrs(&dev, &flag)) 511 if (!arch_dma_alloc_attrs(&dev, &flag))
@@ -503,7 +527,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
503 BUG_ON(!ops); 527 BUG_ON(!ops);
504 WARN_ON(irqs_disabled()); 528 WARN_ON(irqs_disabled());
505 529
506 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 530 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
507 return; 531 return;
508 532
509 if (!ops->free || !cpu_addr) 533 if (!ops->free || !cpu_addr)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 00db35b61e9e..d2d543794093 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -388,7 +388,12 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
388 * @irq_mask_ack: ack and mask an interrupt source 388 * @irq_mask_ack: ack and mask an interrupt source
389 * @irq_unmask: unmask an interrupt source 389 * @irq_unmask: unmask an interrupt source
390 * @irq_eoi: end of interrupt 390 * @irq_eoi: end of interrupt
391 * @irq_set_affinity: set the CPU affinity on SMP machines 391 * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force
392 * argument is true, it tells the driver to
393 * unconditionally apply the affinity setting. Sanity
394 * checks against the supplied affinity mask are not
395 * required. This is used for CPU hotplug where the
396 * target CPU is not yet set in the cpu_online_mask.
392 * @irq_retrigger: resend an IRQ to the CPU 397 * @irq_retrigger: resend an IRQ to the CPU
393 * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 398 * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
394 * @irq_set_wake: enable/disable power-management wake-on of an IRQ 399 * @irq_set_wake: enable/disable power-management wake-on of an IRQ
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 648b34cabb38..890b706d1943 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -445,6 +445,7 @@ struct kvm {
445 struct kvm_stat_data **debugfs_stat_data; 445 struct kvm_stat_data **debugfs_stat_data;
446 struct srcu_struct srcu; 446 struct srcu_struct srcu;
447 struct srcu_struct irq_srcu; 447 struct srcu_struct irq_srcu;
448 pid_t userspace_pid;
448}; 449};
449 450
450#define kvm_err(fmt, ...) \ 451#define kvm_err(fmt, ...) \
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 21c37e39e41a..36cca93a5ff2 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -334,5 +334,24 @@ struct fcnvme_ls_disconnect_acc {
334#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */ 334#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
335#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */ 335#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
336 336
337/*
338 * TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
339 * the string is allowed to be specified with or without a "0x" prefix
340 * infront of the <16hexdigits>. Without is considered the "min" string
341 * and with is considered the "max" string. The hexdigits may be upper
342 * or lower case.
343 */
344#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
345#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
346#define NVME_FC_TRADDR_HEXNAMELEN 16
347#define NVME_FC_TRADDR_MINLENGTH \
348 (2 * (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
349#define NVME_FC_TRADDR_MAXLENGTH \
350 (2 * (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
351#define NVME_FC_TRADDR_MIN_PN_OFFSET \
352 (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
353#define NVME_FC_TRADDR_MAX_PN_OFFSET \
354 (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
355
337 356
338#endif /* _NVME_FC_H */ 357#endif /* _NVME_FC_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index bc74da018bdc..25d8225dbd04 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1006,7 +1006,7 @@ static inline bool nvme_is_write(struct nvme_command *cmd)
1006 * Why can't we simply have a Fabrics In and Fabrics out command? 1006 * Why can't we simply have a Fabrics In and Fabrics out command?
1007 */ 1007 */
1008 if (unlikely(cmd->common.opcode == nvme_fabrics_command)) 1008 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
1009 return cmd->fabrics.opcode & 1; 1009 return cmd->fabrics.fctype & 1;
1010 return cmd->common.opcode & 1; 1010 return cmd->common.opcode & 1;
1011} 1011}
1012 1012
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 1360dd6d5e61..af0f44effd44 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -24,10 +24,14 @@
24 * interrupt and passed the address of the low level handler, 24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling 25 * and can be used to implement any platform specific handling
26 * before or after calling it. 26 * before or after calling it.
27 *
28 * @irq_flags: if non-zero, these flags will be passed to request_irq
29 * when requesting interrupts for this PMU device.
27 */ 30 */
28struct arm_pmu_platdata { 31struct arm_pmu_platdata {
29 irqreturn_t (*handle_irq)(int irq, void *dev, 32 irqreturn_t (*handle_irq)(int irq, void *dev,
30 irq_handler_t pmu_handler); 33 irq_handler_t pmu_handler);
34 unsigned long irq_flags;
31}; 35};
32 36
33#ifdef CONFIG_ARM_PMU 37#ifdef CONFIG_ARM_PMU
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 8e981be2e2c2..0ff1e0dba720 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -55,9 +55,6 @@ struct omap_hsmmc_platform_data {
55 u32 caps; /* Used for the MMC driver on 2430 and later */ 55 u32 caps; /* Used for the MMC driver on 2430 and later */
56 u32 pm_caps; /* PM capabilities of the mmc */ 56 u32 pm_caps; /* PM capabilities of the mmc */
57 57
58 /* use the internal clock */
59 unsigned internal_clock:1;
60
61 /* nonremovable e.g. eMMC */ 58 /* nonremovable e.g. eMMC */
62 unsigned nonremovable:1; 59 unsigned nonremovable:1;
63 60
@@ -73,13 +70,6 @@ struct omap_hsmmc_platform_data {
73 int gpio_cd; /* gpio (card detect) */ 70 int gpio_cd; /* gpio (card detect) */
74 int gpio_cod; /* gpio (cover detect) */ 71 int gpio_cod; /* gpio (cover detect) */
75 int gpio_wp; /* gpio (write protect) */ 72 int gpio_wp; /* gpio (write protect) */
76
77 int (*set_power)(struct device *dev, int power_on, int vdd);
78 void (*remux)(struct device *dev, int power_on);
79 /* Call back before enabling / disabling regulators */
80 void (*before_set_reg)(struct device *dev, int power_on, int vdd);
81 /* Call back after enabling / disabling regulators */
82 void (*after_set_reg)(struct device *dev, int power_on, int vdd);
83 /* if we have special card, init it using this callback */ 73 /* if we have special card, init it using this callback */
84 void (*init_card)(struct mmc_card *card); 74 void (*init_card)(struct mmc_card *card);
85 75
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 2251e1925ea4..33b0bdbb613c 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -84,26 +84,12 @@ int guid_parse(const char *uuid, guid_t *u);
84int uuid_parse(const char *uuid, uuid_t *u); 84int uuid_parse(const char *uuid, uuid_t *u);
85 85
86/* backwards compatibility, don't use in new code */ 86/* backwards compatibility, don't use in new code */
87typedef uuid_t uuid_be;
88#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
89 UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
90#define NULL_UUID_BE \
91 UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
92 0x00, 0x00, 0x00, 0x00)
93
94#define uuid_le_gen(u) guid_gen(u) 87#define uuid_le_gen(u) guid_gen(u)
95#define uuid_be_gen(u) uuid_gen(u)
96#define uuid_le_to_bin(guid, u) guid_parse(guid, u) 88#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
97#define uuid_be_to_bin(uuid, u) uuid_parse(uuid, u)
98 89
99static inline int uuid_le_cmp(const guid_t u1, const guid_t u2) 90static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
100{ 91{
101 return memcmp(&u1, &u2, sizeof(guid_t)); 92 return memcmp(&u1, &u2, sizeof(guid_t));
102} 93}
103 94
104static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
105{
106 return memcmp(&u1, &u2, sizeof(uuid_t));
107}
108
109#endif 95#endif
diff --git a/include/linux/wait.h b/include/linux/wait.h
index b289c96151ee..5b74e36c0ca8 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -529,13 +529,13 @@ do { \
529 529
530/** 530/**
531 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses 531 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
532 * @wq_head: the waitqueue to wait on 532 * @wq: the waitqueue to wait on
533 * @condition: a C expression for the event to wait for 533 * @condition: a C expression for the event to wait for
534 * @timeout: timeout, as a ktime_t 534 * @timeout: timeout, as a ktime_t
535 * 535 *
536 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 536 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
537 * @condition evaluates to true or a signal is received. 537 * @condition evaluates to true or a signal is received.
538 * The @condition is checked each time the waitqueue @wq_head is woken up. 538 * The @condition is checked each time the waitqueue @wq is woken up.
539 * 539 *
540 * wake_up() has to be called after changing any variable that could 540 * wake_up() has to be called after changing any variable that could
541 * change the result of the wait condition. 541 * change the result of the wait condition.
@@ -735,12 +735,12 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
735 735
736/** 736/**
737 * wait_event_killable - sleep until a condition gets true 737 * wait_event_killable - sleep until a condition gets true
738 * @wq: the waitqueue to wait on 738 * @wq_head: the waitqueue to wait on
739 * @condition: a C expression for the event to wait for 739 * @condition: a C expression for the event to wait for
740 * 740 *
741 * The process is put to sleep (TASK_KILLABLE) until the 741 * The process is put to sleep (TASK_KILLABLE) until the
742 * @condition evaluates to true or a signal is received. 742 * @condition evaluates to true or a signal is received.
743 * The @condition is checked each time the waitqueue @wq is woken up. 743 * The @condition is checked each time the waitqueue @wq_head is woken up.
744 * 744 *
745 * wake_up() has to be called after changing any variable that could 745 * wake_up() has to be called after changing any variable that could
746 * change the result of the wait condition. 746 * change the result of the wait condition.
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index aee8f7ec40af..638eb9c83d9f 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -95,8 +95,13 @@ static bool migrate_one_irq(struct irq_desc *desc)
95 affinity = cpu_online_mask; 95 affinity = cpu_online_mask;
96 brokeaff = true; 96 brokeaff = true;
97 } 97 }
98 98 /*
99 err = irq_do_set_affinity(d, affinity, true); 99 * Do not set the force argument of irq_do_set_affinity() as this
100 * disables the masking of offline CPUs from the supplied affinity
101 * mask and therefore might keep/reassign the irq to the outgoing
102 * CPU.
103 */
104 err = irq_do_set_affinity(d, affinity, false);
100 if (err) { 105 if (err) {
101 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n", 106 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
102 d->irq, err); 107 d->irq, err);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 17c667b427b4..0869b20fba81 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2069,7 +2069,7 @@ out:
2069/** 2069/**
2070 * try_to_wake_up_local - try to wake up a local task with rq lock held 2070 * try_to_wake_up_local - try to wake up a local task with rq lock held
2071 * @p: the thread to be awakened 2071 * @p: the thread to be awakened
2072 * @cookie: context's cookie for pinning 2072 * @rf: request-queue flags for pinning
2073 * 2073 *
2074 * Put @p on the run-queue if it's not already there. The caller must 2074 * Put @p on the run-queue if it's not already there. The caller must
2075 * ensure that this_rq() is locked, @p is bound to this_rq() and not 2075 * ensure that this_rq() is locked, @p is bound to this_rq() and not
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
index 478c049630b5..cd819c397dc7 100644
--- a/lib/test_uuid.c
+++ b/lib/test_uuid.c
@@ -82,7 +82,7 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
82 test_uuid_failed("conversion", false, true, data->uuid, NULL); 82 test_uuid_failed("conversion", false, true, data->uuid, NULL);
83 83
84 total_tests++; 84 total_tests++;
85 if (uuid_equal(&data->be, &be)) { 85 if (!uuid_equal(&data->be, &be)) {
86 sprintf(buf, "%pUb", &be); 86 sprintf(buf, "%pUb", &be);
87 test_uuid_failed("cmp", false, true, data->uuid, buf); 87 test_uuid_failed("cmp", false, true, data->uuid, buf);
88 } 88 }
diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
index fb86f3899e16..f9a3d8d23c64 100755
--- a/scripts/dtc/dtx_diff
+++ b/scripts/dtc/dtx_diff
@@ -321,7 +321,7 @@ fi
321cpp_flags="\ 321cpp_flags="\
322 -nostdinc \ 322 -nostdinc \
323 -I${srctree}/arch/${ARCH}/boot/dts \ 323 -I${srctree}/arch/${ARCH}/boot/dts \
324 -I${srctree}/arch/${ARCH}/boot/dts/include \ 324 -I${srctree}/scripts/dtc/include-prefixes \
325 -I${srctree}/drivers/of/testcase-data \ 325 -I${srctree}/drivers/of/testcase-data \
326 -undef -D__DTS__" 326 -undef -D__DTS__"
327 327
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 2e402ece4c86..8e6b04b39dcc 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1235,8 +1235,6 @@ static int snd_fm801_create(struct snd_card *card,
1235 } 1235 }
1236 } 1236 }
1237 1237
1238 snd_fm801_chip_init(chip);
1239
1240 if ((chip->tea575x_tuner & TUNER_ONLY) == 0) { 1238 if ((chip->tea575x_tuner & TUNER_ONLY) == 0) {
1241 if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt, 1239 if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
1242 IRQF_SHARED, KBUILD_MODNAME, chip)) { 1240 IRQF_SHARED, KBUILD_MODNAME, chip)) {
@@ -1248,6 +1246,8 @@ static int snd_fm801_create(struct snd_card *card,
1248 pci_set_master(pci); 1246 pci_set_master(pci);
1249 } 1247 }
1250 1248
1249 snd_fm801_chip_init(chip);
1250
1251 if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { 1251 if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
1252 snd_fm801_free(chip); 1252 snd_fm801_free(chip);
1253 return err; 1253 return err;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 63bc894ddf5e..8c1289963c80 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -933,6 +933,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
933 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 933 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
934 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 934 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
935 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), 935 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
936 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
936 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), 937 SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
937 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), 938 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
938 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), 939 SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index d549f35f39d3..53f9311370de 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3733,11 +3733,15 @@ HDA_CODEC_ENTRY(0x1002aa01, "R6xx HDMI", patch_atihdmi),
3733HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI", patch_generic_hdmi), 3733HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI", patch_generic_hdmi),
3734HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI", patch_generic_hdmi), 3734HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI", patch_generic_hdmi),
3735HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI", patch_generic_hdmi), 3735HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI", patch_generic_hdmi),
3736HDA_CODEC_ENTRY(0x10de0001, "MCP73 HDMI", patch_nvhdmi_2ch),
3736HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), 3737HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3737HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), 3738HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3739HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x),
3738HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), 3740HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3739HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), 3741HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3740HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x), 3742HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x),
3743HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
3744HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
3741HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi), 3745HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
3742HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi), 3746HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
3743HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi), 3747HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi),
@@ -3764,17 +3768,40 @@ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi),
3764HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi), 3768HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi),
3765HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi), 3769HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi),
3766HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi), 3770HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi),
3771HDA_CODEC_ENTRY(0x10de0045, "GPU 45 HDMI/DP", patch_nvhdmi),
3772HDA_CODEC_ENTRY(0x10de0050, "GPU 50 HDMI/DP", patch_nvhdmi),
3767HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi), 3773HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi),
3774HDA_CODEC_ENTRY(0x10de0052, "GPU 52 HDMI/DP", patch_nvhdmi),
3768HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi), 3775HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi),
3776HDA_CODEC_ENTRY(0x10de0061, "GPU 61 HDMI/DP", patch_nvhdmi),
3777HDA_CODEC_ENTRY(0x10de0062, "GPU 62 HDMI/DP", patch_nvhdmi),
3769HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI", patch_nvhdmi_2ch), 3778HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI", patch_nvhdmi_2ch),
3770HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi), 3779HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
3771HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), 3780HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
3772HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), 3781HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
3782HDA_CODEC_ENTRY(0x10de0073, "GPU 73 HDMI/DP", patch_nvhdmi),
3783HDA_CODEC_ENTRY(0x10de0074, "GPU 74 HDMI/DP", patch_nvhdmi),
3784HDA_CODEC_ENTRY(0x10de0076, "GPU 76 HDMI/DP", patch_nvhdmi),
3785HDA_CODEC_ENTRY(0x10de007b, "GPU 7b HDMI/DP", patch_nvhdmi),
3786HDA_CODEC_ENTRY(0x10de007c, "GPU 7c HDMI/DP", patch_nvhdmi),
3773HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), 3787HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
3788HDA_CODEC_ENTRY(0x10de007e, "GPU 7e HDMI/DP", patch_nvhdmi),
3774HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi), 3789HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
3790HDA_CODEC_ENTRY(0x10de0081, "GPU 81 HDMI/DP", patch_nvhdmi),
3775HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), 3791HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
3776HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), 3792HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
3793HDA_CODEC_ENTRY(0x10de0084, "GPU 84 HDMI/DP", patch_nvhdmi),
3794HDA_CODEC_ENTRY(0x10de0090, "GPU 90 HDMI/DP", patch_nvhdmi),
3795HDA_CODEC_ENTRY(0x10de0091, "GPU 91 HDMI/DP", patch_nvhdmi),
3796HDA_CODEC_ENTRY(0x10de0092, "GPU 92 HDMI/DP", patch_nvhdmi),
3797HDA_CODEC_ENTRY(0x10de0093, "GPU 93 HDMI/DP", patch_nvhdmi),
3798HDA_CODEC_ENTRY(0x10de0094, "GPU 94 HDMI/DP", patch_nvhdmi),
3799HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
3800HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
3801HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
3802HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
3777HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), 3803HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
3804HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
3778HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), 3805HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
3779HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), 3806HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi),
3780HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), 3807HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 45d58fc1df39..443a45eaec32 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3838,6 +3838,17 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
3838 } 3838 }
3839} 3839}
3840 3840
3841static struct coef_fw alc225_pre_hsmode[] = {
3842 UPDATE_COEF(0x4a, 1<<8, 0),
3843 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
3844 UPDATE_COEF(0x63, 3<<14, 3<<14),
3845 UPDATE_COEF(0x4a, 3<<4, 2<<4),
3846 UPDATE_COEF(0x4a, 3<<10, 3<<10),
3847 UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
3848 UPDATE_COEF(0x4a, 3<<10, 0),
3849 {}
3850};
3851
3841static void alc_headset_mode_unplugged(struct hda_codec *codec) 3852static void alc_headset_mode_unplugged(struct hda_codec *codec)
3842{ 3853{
3843 static struct coef_fw coef0255[] = { 3854 static struct coef_fw coef0255[] = {
@@ -3873,6 +3884,10 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3873 UPDATE_COEF(0x67, 0x2000, 0), 3884 UPDATE_COEF(0x67, 0x2000, 0),
3874 {} 3885 {}
3875 }; 3886 };
3887 static struct coef_fw coef0298[] = {
3888 UPDATE_COEF(0x19, 0x1300, 0x0300),
3889 {}
3890 };
3876 static struct coef_fw coef0292[] = { 3891 static struct coef_fw coef0292[] = {
3877 WRITE_COEF(0x76, 0x000e), 3892 WRITE_COEF(0x76, 0x000e),
3878 WRITE_COEF(0x6c, 0x2400), 3893 WRITE_COEF(0x6c, 0x2400),
@@ -3895,13 +3910,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3895 {} 3910 {}
3896 }; 3911 };
3897 static struct coef_fw coef0225[] = { 3912 static struct coef_fw coef0225[] = {
3898 UPDATE_COEF(0x4a, 1<<8, 0), 3913 UPDATE_COEF(0x63, 3<<14, 0),
3899 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
3900 UPDATE_COEF(0x63, 3<<14, 3<<14),
3901 UPDATE_COEF(0x4a, 3<<4, 2<<4),
3902 UPDATE_COEF(0x4a, 3<<10, 3<<10),
3903 UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
3904 UPDATE_COEF(0x4a, 3<<10, 0),
3905 {} 3914 {}
3906 }; 3915 };
3907 static struct coef_fw coef0274[] = { 3916 static struct coef_fw coef0274[] = {
@@ -3935,7 +3944,10 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3935 break; 3944 break;
3936 case 0x10ec0286: 3945 case 0x10ec0286:
3937 case 0x10ec0288: 3946 case 0x10ec0288:
3947 alc_process_coef_fw(codec, coef0288);
3948 break;
3938 case 0x10ec0298: 3949 case 0x10ec0298:
3950 alc_process_coef_fw(codec, coef0298);
3939 alc_process_coef_fw(codec, coef0288); 3951 alc_process_coef_fw(codec, coef0288);
3940 break; 3952 break;
3941 case 0x10ec0292: 3953 case 0x10ec0292:
@@ -3976,6 +3988,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3976 {} 3988 {}
3977 }; 3989 };
3978 static struct coef_fw coef0288[] = { 3990 static struct coef_fw coef0288[] = {
3991 UPDATE_COEF(0x4f, 0x00c0, 0),
3979 UPDATE_COEF(0x50, 0x2000, 0), 3992 UPDATE_COEF(0x50, 0x2000, 0),
3980 UPDATE_COEF(0x56, 0x0006, 0), 3993 UPDATE_COEF(0x56, 0x0006, 0),
3981 UPDATE_COEF(0x4f, 0xfcc0, 0xc400), 3994 UPDATE_COEF(0x4f, 0xfcc0, 0xc400),
@@ -4039,7 +4052,6 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
4039 case 0x10ec0286: 4052 case 0x10ec0286:
4040 case 0x10ec0288: 4053 case 0x10ec0288:
4041 case 0x10ec0298: 4054 case 0x10ec0298:
4042 alc_update_coef_idx(codec, 0x4f, 0x000c, 0);
4043 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); 4055 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
4044 alc_process_coef_fw(codec, coef0288); 4056 alc_process_coef_fw(codec, coef0288);
4045 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); 4057 snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
@@ -4072,6 +4084,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
4072 case 0x10ec0225: 4084 case 0x10ec0225:
4073 case 0x10ec0295: 4085 case 0x10ec0295:
4074 case 0x10ec0299: 4086 case 0x10ec0299:
4087 alc_process_coef_fw(codec, alc225_pre_hsmode);
4075 alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); 4088 alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
4076 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); 4089 snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
4077 alc_process_coef_fw(codec, coef0225); 4090 alc_process_coef_fw(codec, coef0225);
@@ -4084,7 +4097,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
4084static void alc_headset_mode_default(struct hda_codec *codec) 4097static void alc_headset_mode_default(struct hda_codec *codec)
4085{ 4098{
4086 static struct coef_fw coef0225[] = { 4099 static struct coef_fw coef0225[] = {
4087 UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10), 4100 UPDATE_COEF(0x45, 0x3f<<10, 0x30<<10),
4101 UPDATE_COEF(0x45, 0x3f<<10, 0x31<<10),
4102 UPDATE_COEF(0x49, 3<<8, 0<<8),
4103 UPDATE_COEF(0x4a, 3<<4, 3<<4),
4104 UPDATE_COEF(0x63, 3<<14, 0),
4105 UPDATE_COEF(0x67, 0xf000, 0x3000),
4088 {} 4106 {}
4089 }; 4107 };
4090 static struct coef_fw coef0255[] = { 4108 static struct coef_fw coef0255[] = {
@@ -4138,6 +4156,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
4138 case 0x10ec0225: 4156 case 0x10ec0225:
4139 case 0x10ec0295: 4157 case 0x10ec0295:
4140 case 0x10ec0299: 4158 case 0x10ec0299:
4159 alc_process_coef_fw(codec, alc225_pre_hsmode);
4141 alc_process_coef_fw(codec, coef0225); 4160 alc_process_coef_fw(codec, coef0225);
4142 break; 4161 break;
4143 case 0x10ec0255: 4162 case 0x10ec0255:
@@ -4177,6 +4196,8 @@ static void alc_headset_mode_default(struct hda_codec *codec)
4177/* Iphone type */ 4196/* Iphone type */
4178static void alc_headset_mode_ctia(struct hda_codec *codec) 4197static void alc_headset_mode_ctia(struct hda_codec *codec)
4179{ 4198{
4199 int val;
4200
4180 static struct coef_fw coef0255[] = { 4201 static struct coef_fw coef0255[] = {
4181 WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ 4202 WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
4182 WRITE_COEF(0x1b, 0x0c2b), 4203 WRITE_COEF(0x1b, 0x0c2b),
@@ -4219,11 +4240,14 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4219 WRITE_COEF(0xc3, 0x0000), 4240 WRITE_COEF(0xc3, 0x0000),
4220 {} 4241 {}
4221 }; 4242 };
4222 static struct coef_fw coef0225[] = { 4243 static struct coef_fw coef0225_1[] = {
4223 UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10), 4244 UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
4224 UPDATE_COEF(0x49, 1<<8, 1<<8), 4245 UPDATE_COEF(0x63, 3<<14, 2<<14),
4225 UPDATE_COEF(0x4a, 7<<6, 7<<6), 4246 {}
4226 UPDATE_COEF(0x4a, 3<<4, 3<<4), 4247 };
4248 static struct coef_fw coef0225_2[] = {
4249 UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
4250 UPDATE_COEF(0x63, 3<<14, 1<<14),
4227 {} 4251 {}
4228 }; 4252 };
4229 4253
@@ -4244,8 +4268,17 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4244 alc_process_coef_fw(codec, coef0233); 4268 alc_process_coef_fw(codec, coef0233);
4245 break; 4269 break;
4246 case 0x10ec0298: 4270 case 0x10ec0298:
4247 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020);/* Headset output enable */ 4271 val = alc_read_coef_idx(codec, 0x50);
4248 /* ALC298 jack type setting is the same with ALC286/ALC288 */ 4272 if (val & (1 << 12)) {
4273 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020);
4274 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400);
4275 msleep(300);
4276 } else {
4277 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010);
4278 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400);
4279 msleep(300);
4280 }
4281 break;
4249 case 0x10ec0286: 4282 case 0x10ec0286:
4250 case 0x10ec0288: 4283 case 0x10ec0288:
4251 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400); 4284 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xd400);
@@ -4264,7 +4297,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4264 case 0x10ec0225: 4297 case 0x10ec0225:
4265 case 0x10ec0295: 4298 case 0x10ec0295:
4266 case 0x10ec0299: 4299 case 0x10ec0299:
4267 alc_process_coef_fw(codec, coef0225); 4300 val = alc_read_coef_idx(codec, 0x45);
4301 if (val & (1 << 9))
4302 alc_process_coef_fw(codec, coef0225_2);
4303 else
4304 alc_process_coef_fw(codec, coef0225_1);
4268 break; 4305 break;
4269 case 0x10ec0867: 4306 case 0x10ec0867:
4270 alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0); 4307 alc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);
@@ -4320,9 +4357,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4320 }; 4357 };
4321 static struct coef_fw coef0225[] = { 4358 static struct coef_fw coef0225[] = {
4322 UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10), 4359 UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
4323 UPDATE_COEF(0x49, 1<<8, 1<<8), 4360 UPDATE_COEF(0x63, 3<<14, 2<<14),
4324 UPDATE_COEF(0x4a, 7<<6, 7<<6),
4325 UPDATE_COEF(0x4a, 3<<4, 3<<4),
4326 {} 4361 {}
4327 }; 4362 };
4328 4363
@@ -4344,7 +4379,9 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4344 break; 4379 break;
4345 case 0x10ec0298: 4380 case 0x10ec0298:
4346 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010);/* Headset output enable */ 4381 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010);/* Headset output enable */
4347 /* ALC298 jack type setting is the same with ALC286/ALC288 */ 4382 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xe400);
4383 msleep(300);
4384 break;
4348 case 0x10ec0286: 4385 case 0x10ec0286:
4349 case 0x10ec0288: 4386 case 0x10ec0288:
4350 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xe400); 4387 alc_update_coef_idx(codec, 0x4f, 0xfcc0, 0xe400);
@@ -4384,6 +4421,14 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4384 UPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */ 4421 UPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */
4385 {} 4422 {}
4386 }; 4423 };
4424 static struct coef_fw coef0298[] = {
4425 UPDATE_COEF(0x50, 0x2000, 0x2000),
4426 UPDATE_COEF(0x56, 0x0006, 0x0006),
4427 UPDATE_COEF(0x66, 0x0008, 0),
4428 UPDATE_COEF(0x67, 0x2000, 0),
4429 UPDATE_COEF(0x19, 0x1300, 0x1300),
4430 {}
4431 };
4387 static struct coef_fw coef0293[] = { 4432 static struct coef_fw coef0293[] = {
4388 UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */ 4433 UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */
4389 WRITE_COEF(0x45, 0xD429), /* Set to ctia type */ 4434 WRITE_COEF(0x45, 0xD429), /* Set to ctia type */
@@ -4396,11 +4441,6 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4396 WRITE_COEF(0xc3, 0x0c00), 4441 WRITE_COEF(0xc3, 0x0c00),
4397 {} 4442 {}
4398 }; 4443 };
4399 static struct coef_fw coef0225[] = {
4400 UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
4401 UPDATE_COEF(0x49, 1<<8, 1<<8),
4402 {}
4403 };
4404 static struct coef_fw coef0274[] = { 4444 static struct coef_fw coef0274[] = {
4405 UPDATE_COEF(0x4a, 0x0010, 0), 4445 UPDATE_COEF(0x4a, 0x0010, 0),
4406 UPDATE_COEF(0x4a, 0x8000, 0), 4446 UPDATE_COEF(0x4a, 0x8000, 0),
@@ -4433,8 +4473,34 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4433 is_ctia = (val & 0x0070) == 0x0070; 4473 is_ctia = (val & 0x0070) == 0x0070;
4434 break; 4474 break;
4435 case 0x10ec0298: 4475 case 0x10ec0298:
4436 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020); /* Headset output enable */ 4476 snd_hda_codec_write(codec, 0x21, 0,
4437 /* ALC298 check jack type is the same with ALC286/ALC288 */ 4477 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
4478 msleep(100);
4479 snd_hda_codec_write(codec, 0x21, 0,
4480 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
4481 msleep(200);
4482
4483 val = alc_read_coef_idx(codec, 0x50);
4484 if (val & (1 << 12)) {
4485 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020);
4486 alc_process_coef_fw(codec, coef0288);
4487 msleep(350);
4488 val = alc_read_coef_idx(codec, 0x50);
4489 is_ctia = (val & 0x0070) == 0x0070;
4490 } else {
4491 alc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010);
4492 alc_process_coef_fw(codec, coef0288);
4493 msleep(350);
4494 val = alc_read_coef_idx(codec, 0x50);
4495 is_ctia = (val & 0x0070) == 0x0070;
4496 }
4497 alc_process_coef_fw(codec, coef0298);
4498 snd_hda_codec_write(codec, 0x21, 0,
4499 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
4500 msleep(75);
4501 snd_hda_codec_write(codec, 0x21, 0,
4502 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
4503 break;
4438 case 0x10ec0286: 4504 case 0x10ec0286:
4439 case 0x10ec0288: 4505 case 0x10ec0288:
4440 alc_process_coef_fw(codec, coef0288); 4506 alc_process_coef_fw(codec, coef0288);
@@ -4463,10 +4529,25 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4463 case 0x10ec0225: 4529 case 0x10ec0225:
4464 case 0x10ec0295: 4530 case 0x10ec0295:
4465 case 0x10ec0299: 4531 case 0x10ec0299:
4466 alc_process_coef_fw(codec, coef0225); 4532 alc_process_coef_fw(codec, alc225_pre_hsmode);
4467 msleep(800); 4533 alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
4468 val = alc_read_coef_idx(codec, 0x46); 4534 val = alc_read_coef_idx(codec, 0x45);
4469 is_ctia = (val & 0x00f0) == 0x00f0; 4535 if (val & (1 << 9)) {
4536 alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x34<<10);
4537 alc_update_coef_idx(codec, 0x49, 3<<8, 2<<8);
4538 msleep(800);
4539 val = alc_read_coef_idx(codec, 0x46);
4540 is_ctia = (val & 0x00f0) == 0x00f0;
4541 } else {
4542 alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x34<<10);
4543 alc_update_coef_idx(codec, 0x49, 3<<8, 1<<8);
4544 msleep(800);
4545 val = alc_read_coef_idx(codec, 0x46);
4546 is_ctia = (val & 0x00f0) == 0x00f0;
4547 }
4548 alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
4549 alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
4550 alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
4470 break; 4551 break;
4471 case 0x10ec0867: 4552 case 0x10ec0867:
4472 is_ctia = true; 4553 is_ctia = true;
@@ -6724,6 +6805,7 @@ static int patch_alc269(struct hda_codec *codec)
6724 case 0x10ec0225: 6805 case 0x10ec0225:
6725 case 0x10ec0295: 6806 case 0x10ec0295:
6726 spec->codec_variant = ALC269_TYPE_ALC225; 6807 spec->codec_variant = ALC269_TYPE_ALC225;
6808 spec->gen.mixer_nid = 0; /* no loopback on ALC225 ALC295 */
6727 break; 6809 break;
6728 case 0x10ec0299: 6810 case 0x10ec0299:
6729 spec->codec_variant = ALC269_TYPE_ALC225; 6811 spec->codec_variant = ALC269_TYPE_ALC225;
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index dd8f00cfb8b4..32283d88701a 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -474,7 +474,7 @@ class Provider(object):
474 @staticmethod 474 @staticmethod
475 def is_field_wanted(fields_filter, field): 475 def is_field_wanted(fields_filter, field):
476 """Indicate whether field is valid according to fields_filter.""" 476 """Indicate whether field is valid according to fields_filter."""
477 if not fields_filter: 477 if not fields_filter or fields_filter == "help":
478 return True 478 return True
479 return re.match(fields_filter, field) is not None 479 return re.match(fields_filter, field) is not None
480 480
@@ -1413,8 +1413,8 @@ performance.
1413 1413
1414Requirements: 1414Requirements:
1415- Access to: 1415- Access to:
1416 /sys/kernel/debug/kvm 1416 %s
1417 /sys/kernel/debug/trace/events/* 1417 %s/events/*
1418 /proc/pid/task 1418 /proc/pid/task
1419- /proc/sys/kernel/perf_event_paranoid < 1 if user has no 1419- /proc/sys/kernel/perf_event_paranoid < 1 if user has no
1420 CAP_SYS_ADMIN and perf events are used. 1420 CAP_SYS_ADMIN and perf events are used.
@@ -1434,7 +1434,7 @@ Interactive Commands:
1434 s set update interval 1434 s set update interval
1435 x toggle reporting of stats for individual child trace events 1435 x toggle reporting of stats for individual child trace events
1436Press any other key to refresh statistics immediately. 1436Press any other key to refresh statistics immediately.
1437""" 1437""" % (PATH_DEBUGFS_KVM, PATH_DEBUGFS_TRACING)
1438 1438
1439 class PlainHelpFormatter(optparse.IndentedHelpFormatter): 1439 class PlainHelpFormatter(optparse.IndentedHelpFormatter):
1440 def format_description(self, description): 1440 def format_description(self, description):
@@ -1496,7 +1496,8 @@ Press any other key to refresh statistics immediately.
1496 action='store', 1496 action='store',
1497 default=DEFAULT_REGEX, 1497 default=DEFAULT_REGEX,
1498 dest='fields', 1498 dest='fields',
1499 help='fields to display (regex)', 1499 help='''fields to display (regex)
1500 "-f help" for a list of available events''',
1500 ) 1501 )
1501 optparser.add_option('-p', '--pid', 1502 optparser.add_option('-p', '--pid',
1502 action='store', 1503 action='store',
@@ -1559,6 +1560,17 @@ def main():
1559 1560
1560 stats = Stats(options) 1561 stats = Stats(options)
1561 1562
1563 if options.fields == "help":
1564 event_list = "\n"
1565 s = stats.get()
1566 for key in s.keys():
1567 if key.find('(') != -1:
1568 key = key[0:key.find('(')]
1569 if event_list.find('\n' + key + '\n') == -1:
1570 event_list += key + '\n'
1571 sys.stdout.write(event_list)
1572 return ""
1573
1562 if options.log: 1574 if options.log:
1563 log(stats) 1575 log(stats)
1564 elif not options.once: 1576 elif not options.once:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 82987d457b8b..f3f74271f1a9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3883,7 +3883,6 @@ static const struct file_operations *stat_fops[] = {
3883static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) 3883static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
3884{ 3884{
3885 struct kobj_uevent_env *env; 3885 struct kobj_uevent_env *env;
3886 char *tmp, *pathbuf = NULL;
3887 unsigned long long created, active; 3886 unsigned long long created, active;
3888 3887
3889 if (!kvm_dev.this_device || !kvm) 3888 if (!kvm_dev.this_device || !kvm)
@@ -3907,38 +3906,28 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
3907 add_uevent_var(env, "CREATED=%llu", created); 3906 add_uevent_var(env, "CREATED=%llu", created);
3908 add_uevent_var(env, "COUNT=%llu", active); 3907 add_uevent_var(env, "COUNT=%llu", active);
3909 3908
3910 if (type == KVM_EVENT_CREATE_VM) 3909 if (type == KVM_EVENT_CREATE_VM) {
3911 add_uevent_var(env, "EVENT=create"); 3910 add_uevent_var(env, "EVENT=create");
3912 else if (type == KVM_EVENT_DESTROY_VM) 3911 kvm->userspace_pid = task_pid_nr(current);
3912 } else if (type == KVM_EVENT_DESTROY_VM) {
3913 add_uevent_var(env, "EVENT=destroy"); 3913 add_uevent_var(env, "EVENT=destroy");
3914 }
3915 add_uevent_var(env, "PID=%d", kvm->userspace_pid);
3914 3916
3915 if (kvm->debugfs_dentry) { 3917 if (kvm->debugfs_dentry) {
3916 char p[ITOA_MAX_LEN]; 3918 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
3917 3919
3918 snprintf(p, sizeof(p), "%s", kvm->debugfs_dentry->d_name.name); 3920 if (p) {
3919 tmp = strchrnul(p + 1, '-'); 3921 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
3920 *tmp = '\0'; 3922 if (!IS_ERR(tmp))
3921 add_uevent_var(env, "PID=%s", p); 3923 add_uevent_var(env, "STATS_PATH=%s", tmp);
3922 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 3924 kfree(p);
3923 if (pathbuf) {
3924 /* sizeof counts the final '\0' */
3925 int len = sizeof("STATS_PATH=") - 1;
3926 const char *pvar = "STATS_PATH=";
3927
3928 tmp = dentry_path_raw(kvm->debugfs_dentry,
3929 pathbuf + len,
3930 PATH_MAX - len);
3931 if (!IS_ERR(tmp)) {
3932 memcpy(tmp - len, pvar, len);
3933 env->envp[env->envp_idx++] = tmp - len;
3934 }
3935 } 3925 }
3936 } 3926 }
3937 /* no need for checks, since we are adding at most only 5 keys */ 3927 /* no need for checks, since we are adding at most only 5 keys */
3938 env->envp[env->envp_idx++] = NULL; 3928 env->envp[env->envp_idx++] = NULL;
3939 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp); 3929 kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
3940 kfree(env); 3930 kfree(env);
3941 kfree(pathbuf);
3942} 3931}
3943 3932
3944static int kvm_init_debug(void) 3933static int kvm_init_debug(void)