aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2015-07-24 13:29:31 -0400
committerMark Brown <broonie@kernel.org>2015-07-24 13:29:31 -0400
commit41dae91a7206d9a09047f3d376282bba4d9545b5 (patch)
treefe12449256757eea6c14d7d041cc494b712e83ba
parent5d506a5ad4155e813d254d2f02ce17b58045423c (diff)
parent3a003baeec246f604ed1d2e0087560d7f15edcc6 (diff)
Merge branch 'topic/ocp' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator into regulator-qcom-spmi
-rw-r--r--Documentation/arm/sunxi/README18
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.txt2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti/emif.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt1
-rw-r--r--Documentation/power/swsusp.txt13
-rw-r--r--MAINTAINERS26
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts4
-rw-r--r--arch/arm/boot/dts/am4372.dtsi7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts4
-rw-r--r--arch/arm/boot/dts/atlas7.dtsi1042
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts25
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig6
-rw-r--r--arch/arm/include/asm/io.h75
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h31
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/lib/memcpy.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/mach-omap2/dma.c1
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-prima2/rtciobrg.c48
-rw-r--r--arch/arm/mach-sunxi/Kconfig2
-rw-r--r--arch/arm/mach-sunxi/sunxi.c5
-rw-r--r--arch/arm/mm/ioremap.c33
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/nommu.c39
-rw-r--r--arch/arm/vdso/vdsomunge.c56
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts10
-rw-r--r--arch/arm64/boot/dts/arm/Makefile1
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts191
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi9
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h8
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/entry32.S2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h2
-rw-r--r--arch/mips/include/asm/smp.h1
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cps-vec.S96
-rw-r--r--arch/mips/kernel/scall32-o32.S37
-rw-r--r--arch/mips/kernel/scall64-o32.S35
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/smp-cps.c6
-rw-r--r--arch/mips/kernel/smp.c44
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/loongson64/common/bonito-irq.c2
-rw-r--r--arch/mips/loongson64/common/cmdline.c2
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c2
-rw-r--r--arch/mips/loongson64/common/env.c2
-rw-r--r--arch/mips/loongson64/common/irq.c2
-rw-r--r--arch/mips/loongson64/common/setup.c2
-rw-r--r--arch/mips/loongson64/fuloong-2e/irq.c2
-rw-r--r--arch/mips/loongson64/lemote-2f/clock.c4
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c2
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mti-malta/malta-time.c20
-rw-r--r--arch/mips/pistachio/init.c8
-rw-r--r--arch/mips/pistachio/time.c5
-rw-r--r--arch/parisc/include/asm/pgtable.h55
-rw-r--r--arch/parisc/include/asm/tlbflush.h53
-rw-r--r--arch/parisc/kernel/cache.c105
-rw-r--r--arch/parisc/kernel/entry.S163
-rw-r--r--arch/parisc/kernel/traps.c4
-rw-r--r--arch/powerpc/kernel/idle_power7.S31
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/mm/fault.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c16
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c9
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c1
-rw-r--r--arch/tile/lib/memcpy_user_64.c4
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/include/asm/espfix.h2
-rw-r--r--arch/x86/include/asm/kasan.h8
-rw-r--r--arch/x86/kernel/apic/vector.c10
-rw-r--r--arch/x86/kernel/early_printk.c4
-rw-r--r--arch/x86/kernel/espfix_64.c28
-rw-r--r--arch/x86/kernel/head64.c10
-rw-r--r--arch/x86/kernel/head_64.S29
-rw-r--r--arch/x86/kernel/irq.c20
-rw-r--r--arch/x86/kernel/smpboot.c27
-rw-r--r--arch/x86/kernel/tsc.c11
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c47
-rw-r--r--drivers/acpi/acpi_lpss.c7
-rw-r--r--drivers/acpi/nfit.c134
-rw-r--r--drivers/acpi/nfit.h20
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/resource.c162
-rw-r--r--drivers/acpi/scan.c32
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci_platform.c9
-rw-r--r--drivers/base/firmware_class.c16
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/power/wakeirq.c12
-rw-r--r--drivers/base/power/wakeup.c31
-rw-r--r--drivers/clk/at91/clk-h32mx.c4
-rw-r--r--drivers/clk/at91/clk-main.c4
-rw-r--r--drivers/clk/at91/clk-master.c8
-rw-r--r--drivers/clk/at91/clk-pll.c8
-rw-r--r--drivers/clk/at91/clk-system.c8
-rw-r--r--drivers/clk/at91/clk-utmi.c8
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c6
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c13
-rw-r--r--drivers/clk/clk-stm32f4.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c26
-rw-r--r--drivers/clk/qcom/clk-rcg2.c9
-rw-r--r--drivers/clk/st/clk-flexgen.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c12
-rw-r--r--drivers/clk/st/clkgen-mux.c10
-rw-r--r--drivers/clk/st/clkgen-pll.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clocksource/timer-imx-gpt.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c26
-rw-r--r--drivers/gpu/drm/radeon/cik.c336
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c40
-rw-r--r--drivers/gpu/drm/radeon/si.c336
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c15
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/i2c-core.c16
-rw-r--r--drivers/input/mouse/elan_i2c_core.c12
-rw-r--r--drivers/input/mouse/synaptics.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c10
-rw-r--r--drivers/memory/omap-gpmc.c8
-rw-r--r--drivers/misc/cxl/api.c12
-rw-r--r--drivers/misc/cxl/context.c14
-rw-r--r--drivers/misc/cxl/main.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/vphb.c3
-rw-r--r--drivers/misc/mei/bus.c16
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/nfc.c3
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/pnp/system.c35
-rw-r--r--drivers/regulator/core.c9
-rw-r--r--drivers/regulator/of_regulator.c3
-rw-r--r--drivers/video/fbdev/stifb.c40
-rw-r--r--fs/9p/vfs_inode.c3
-rw-r--r--fs/9p/vfs_inode_dotl.c3
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c41
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/inode-map.c17
-rw-r--r--fs/btrfs/inode.c89
-rw-r--r--fs/btrfs/ioctl.c241
-rw-r--r--fs/btrfs/ordered-data.c5
-rw-r--r--fs/btrfs/qgroup.c49
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c39
-rw-r--r--fs/btrfs/tree-log.c226
-rw-r--r--fs/btrfs/volumes.c50
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/inode.c22
-rw-r--r--fs/ext4/ioctl.c1
-rw-r--r--fs/ext4/mballoc.c16
-rw-r--r--fs/ext4/migrate.c17
-rw-r--r--fs/hpfs/alloc.c95
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hpfs/hpfs_fn.h4
-rw-r--r--fs/hpfs/super.c47
-rw-r--r--fs/jfs/ioctl.c3
-rw-r--r--fs/nilfs2/ioctl.c1
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--include/linux/acpi.h24
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/ceph/messenger.h3
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/irqdesc.h7
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/regulator/driver.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h4
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--kernel/auditsc.c3
-rw-r--r--kernel/cpu.c22
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/events/internal.h10
-rw-r--r--kernel/events/ring_buffer.c27
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/time/clockevents.c24
-rw-r--r--kernel/time/tick-broadcast.c163
-rw-r--r--kernel/time/tick-common.c21
-rw-r--r--kernel/time/tick-sched.h10
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--mm/memory.c20
-rw-r--r--net/ceph/ceph_common.c16
-rw-r--r--net/ceph/messenger.c24
-rw-r--r--scripts/mod/devicetable-offsets.c2
-rw-r--r--scripts/mod/file2alias.c32
-rw-r--r--scripts/mod/modpost.c3
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/selinux/ss/ebitmap.c6
-rw-r--r--tools/include/linux/compiler.h58
-rw-r--r--tools/include/linux/export.h10
-rw-r--r--tools/include/linux/rbtree.h104
-rw-r--r--tools/include/linux/rbtree_augmented.h245
-rw-r--r--tools/lib/rbtree.c548
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/include/linux/rbtree.h16
-rw-r--r--tools/perf/util/include/linux/rbtree_augmented.h2
-rw-r--r--tools/testing/nvdimm/Kbuild3
-rw-r--r--tools/testing/nvdimm/test/iomap.c27
-rw-r--r--tools/testing/nvdimm/test/nfit.c52
250 files changed, 5786 insertions, 1912 deletions
diff --git a/Documentation/arm/sunxi/README b/Documentation/arm/sunxi/README
index 1fe2d7fd4108..5e38e1582f95 100644
--- a/Documentation/arm/sunxi/README
+++ b/Documentation/arm/sunxi/README
@@ -36,7 +36,7 @@ SunXi family
36 + User Manual 36 + User Manual
37 http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf 37 http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
38 38
39 - Allwinner A23 39 - Allwinner A23 (sun8i)
40 + Datasheet 40 + Datasheet
41 http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf 41 http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
42 + User Manual 42 + User Manual
@@ -55,7 +55,23 @@ SunXi family
55 + User Manual 55 + User Manual
56 http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf 56 http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
57 57
58 - Allwinner A33 (sun8i)
59 + Datasheet
60 http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf
61 + User Manual
62 http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf
63
64 - Allwinner H3 (sun8i)
65 + Datasheet
66 http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf
67
58 * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs 68 * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
59 - Allwinner A80 69 - Allwinner A80
60 + Datasheet 70 + Datasheet
61 http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf 71 http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
72
73 * Octa ARM Cortex-A7 based SoCs
74 - Allwinner A83T
75 + Not Supported
76 + Datasheet
77 http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt
index 42941fdefb11..67da20539540 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.txt
+++ b/Documentation/devicetree/bindings/arm/sunxi.txt
@@ -9,4 +9,6 @@ using one of the following compatible strings:
9 allwinner,sun6i-a31 9 allwinner,sun6i-a31
10 allwinner,sun7i-a20 10 allwinner,sun7i-a20
11 allwinner,sun8i-a23 11 allwinner,sun8i-a23
12 allwinner,sun8i-a33
13 allwinner,sun8i-h3
12 allwinner,sun9i-a80 14 allwinner,sun9i-a80
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
index 938f8e1ba205..0db60470ebb6 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
@@ -8,6 +8,7 @@ of the EMIF IP and memory parts attached to it.
8Required properties: 8Required properties:
9- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev> 9- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
10 is the IP revision of the specific EMIF instance. 10 is the IP revision of the specific EMIF instance.
11 For am437x should be ti,emif-am4372.
11 12
12- phy-type : <u32> indicating the DDR phy type. Following are the 13- phy-type : <u32> indicating the DDR phy type. Following are the
13 allowed values 14 allowed values
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index db88feb28c03..24bd422cecd5 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -42,6 +42,7 @@ Optional properties:
42- regulator-system-load: Load in uA present on regulator that is not captured by 42- regulator-system-load: Load in uA present on regulator that is not captured by
43 any consumer request. 43 any consumer request.
44- regulator-pull-down: Enable pull down resistor when the regulator is disabled. 44- regulator-pull-down: Enable pull down resistor when the regulator is disabled.
45- regulator-over-current-protection: Enable over current protection.
45 46
46Deprecated properties: 47Deprecated properties:
47- regulator-compatible: If a regulator chip contains multiple 48- regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index f732a8321e8a..8cc17ca71813 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -410,8 +410,17 @@ Documentation/usb/persist.txt.
410 410
411Q: Can I suspend-to-disk using a swap partition under LVM? 411Q: Can I suspend-to-disk using a swap partition under LVM?
412 412
413A: No. You can suspend successfully, but you'll not be able to 413A: Yes and No. You can suspend successfully, but the kernel will not be able
414resume. uswsusp should be able to work with LVM. See suspend.sf.net. 414to resume on its own. You need an initramfs that can recognize the resume
415situation, activate the logical volume containing the swap volume (but not
416touch any filesystems!), and eventually call
417
418echo -n "$major:$minor" > /sys/power/resume
419
420where $major and $minor are the respective major and minor device numbers of
421the swap volume.
422
423uswsusp works with LVM, too. See http://suspend.sourceforge.net/
415 424
416Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were 425Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
417compiled with the similar configuration files. Anyway I found that 426compiled with the similar configuration files. Anyway I found that
diff --git a/MAINTAINERS b/MAINTAINERS
index 8133cefb6b6e..fd6078443083 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1614,6 +1614,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
1614L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1614L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1615S: Maintained 1615S: Maintained
1616F: arch/arm/boot/dts/vexpress* 1616F: arch/arm/boot/dts/vexpress*
1617F: arch/arm64/boot/dts/arm/vexpress*
1617F: arch/arm/mach-vexpress/ 1618F: arch/arm/mach-vexpress/
1618F: */*/vexpress* 1619F: */*/vexpress*
1619F: */*/*/vexpress* 1620F: */*/*/vexpress*
@@ -2562,19 +2563,31 @@ F: arch/powerpc/include/uapi/asm/spu*.h
2562F: arch/powerpc/oprofile/*cell* 2563F: arch/powerpc/oprofile/*cell*
2563F: arch/powerpc/platforms/cell/ 2564F: arch/powerpc/platforms/cell/
2564 2565
2565CEPH DISTRIBUTED FILE SYSTEM CLIENT 2566CEPH COMMON CODE (LIBCEPH)
2567M: Ilya Dryomov <idryomov@gmail.com>
2566M: "Yan, Zheng" <zyan@redhat.com> 2568M: "Yan, Zheng" <zyan@redhat.com>
2567M: Sage Weil <sage@redhat.com> 2569M: Sage Weil <sage@redhat.com>
2568L: ceph-devel@vger.kernel.org 2570L: ceph-devel@vger.kernel.org
2569W: http://ceph.com/ 2571W: http://ceph.com/
2570T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git 2572T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
2573T: git git://github.com/ceph/ceph-client.git
2571S: Supported 2574S: Supported
2572F: Documentation/filesystems/ceph.txt
2573F: fs/ceph/
2574F: net/ceph/ 2575F: net/ceph/
2575F: include/linux/ceph/ 2576F: include/linux/ceph/
2576F: include/linux/crush/ 2577F: include/linux/crush/
2577 2578
2579CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
2580M: "Yan, Zheng" <zyan@redhat.com>
2581M: Sage Weil <sage@redhat.com>
2582M: Ilya Dryomov <idryomov@gmail.com>
2583L: ceph-devel@vger.kernel.org
2584W: http://ceph.com/
2585T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
2586T: git git://github.com/ceph/ceph-client.git
2587S: Supported
2588F: Documentation/filesystems/ceph.txt
2589F: fs/ceph/
2590
2578CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: 2591CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
2579L: linux-usb@vger.kernel.org 2592L: linux-usb@vger.kernel.org
2580S: Orphan 2593S: Orphan
@@ -6147,6 +6160,7 @@ L: linux-nvdimm@lists.01.org
6147Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 6160Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
6148S: Supported 6161S: Supported
6149F: drivers/nvdimm/pmem.c 6162F: drivers/nvdimm/pmem.c
6163F: include/linux/pmem.h
6150 6164
6151LINUX FOR IBM pSERIES (RS/6000) 6165LINUX FOR IBM pSERIES (RS/6000)
6152M: Paul Mackerras <paulus@au.ibm.com> 6166M: Paul Mackerras <paulus@au.ibm.com>
@@ -6161,7 +6175,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
6161W: http://www.penguinppc.org/ 6175W: http://www.penguinppc.org/
6162L: linuxppc-dev@lists.ozlabs.org 6176L: linuxppc-dev@lists.ozlabs.org
6163Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ 6177Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
6164T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git 6178T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
6165S: Supported 6179S: Supported
6166F: Documentation/powerpc/ 6180F: Documentation/powerpc/
6167F: arch/powerpc/ 6181F: arch/powerpc/
@@ -8366,10 +8380,12 @@ RADOS BLOCK DEVICE (RBD)
8366M: Ilya Dryomov <idryomov@gmail.com> 8380M: Ilya Dryomov <idryomov@gmail.com>
8367M: Sage Weil <sage@redhat.com> 8381M: Sage Weil <sage@redhat.com>
8368M: Alex Elder <elder@kernel.org> 8382M: Alex Elder <elder@kernel.org>
8369M: ceph-devel@vger.kernel.org 8383L: ceph-devel@vger.kernel.org
8370W: http://ceph.com/ 8384W: http://ceph.com/
8371T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git 8385T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
8386T: git git://github.com/ceph/ceph-client.git
8372S: Supported 8387S: Supported
8388F: Documentation/ABI/testing/sysfs-bus-rbd
8373F: drivers/block/rbd.c 8389F: drivers/block/rbd.c
8374F: drivers/block/rbd_types.h 8390F: drivers/block/rbd_types.h
8375 8391
diff --git a/Makefile b/Makefile
index 13270c0a9336..257ef5892ab7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c1425c3a..1c5021002fe4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1693,6 +1693,12 @@ config HIGHMEM
1693config HIGHPTE 1693config HIGHPTE
1694 bool "Allocate 2nd-level pagetables from highmem" 1694 bool "Allocate 2nd-level pagetables from highmem"
1695 depends on HIGHMEM 1695 depends on HIGHMEM
1696 help
1697 The VM uses one page of physical memory for each page table.
1698 For systems with a lot of processes, this can use a lot of
1699 precious low memory, eventually leading to low memory being
1700 consumed by page tables. Setting this option will allow
1701 user-space 2nd level page tables to reside in high memory.
1696 1702
1697config HW_PERF_EVENTS 1703config HW_PERF_EVENTS
1698 bool "Enable hardware performance counter support for perf events" 1704 bool "Enable hardware performance counter support for perf events"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f1b157971366..a2e16f940394 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
1635 1635
1636config DEBUG_SET_MODULE_RONX 1636config DEBUG_SET_MODULE_RONX
1637 bool "Set loadable kernel module data as NX and text as RO" 1637 bool "Set loadable kernel module data as NX and text as RO"
1638 depends on MODULES 1638 depends on MODULES && MMU
1639 ---help--- 1639 ---help---
1640 This option helps catch unintended modifications to loadable 1640 This option helps catch unintended modifications to loadable
1641 kernel module's text and read-only data. It also prevents execution 1641 kernel module's text and read-only data. It also prevents execution
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 901739fcb85a..5c42d259fa68 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -80,3 +80,7 @@
80 status = "okay"; 80 status = "okay";
81 }; 81 };
82}; 82};
83
84&rtc {
85 system-power-controller;
86};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index c80a3e233792..ade28c790f4b 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -132,6 +132,12 @@
132 }; 132 };
133 }; 133 };
134 134
135 emif: emif@4c000000 {
136 compatible = "ti,emif-am4372";
137 reg = <0x4c000000 0x1000000>;
138 ti,hwmods = "emif";
139 };
140
135 edma: edma@49000000 { 141 edma: edma@49000000 {
136 compatible = "ti,edma3"; 142 compatible = "ti,edma3";
137 ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; 143 ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
@@ -941,6 +947,7 @@
941 ti,hwmods = "dss_rfbi"; 947 ti,hwmods = "dss_rfbi";
942 clocks = <&disp_clk>; 948 clocks = <&disp_clk>;
943 clock-names = "fck"; 949 clock-names = "fck";
950 status = "disabled";
944 }; 951 };
945 }; 952 };
946 953
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index a42cc377a862..a63bf78191ea 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -605,6 +605,10 @@
605 phy-supply = <&ldousb_reg>; 605 phy-supply = <&ldousb_reg>;
606}; 606};
607 607
608&usb2_phy2 {
609 phy-supply = <&ldousb_reg>;
610};
611
608&usb1 { 612&usb1 {
609 dr_mode = "host"; 613 dr_mode = "host";
610 pinctrl-names = "default"; 614 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
index 5dfd3a44bf82..3e21311f9514 100644
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ b/arch/arm/boot/dts/atlas7.dtsi
@@ -135,6 +135,1025 @@
135 compatible = "sirf,atlas7-ioc"; 135 compatible = "sirf,atlas7-ioc";
136 reg = <0x18880000 0x1000>, 136 reg = <0x18880000 0x1000>,
137 <0x10E40000 0x1000>; 137 <0x10E40000 0x1000>;
138
139 audio_ac97_pmx: audio_ac97@0 {
140 audio_ac97 {
141 groups = "audio_ac97_grp";
142 function = "audio_ac97";
143 };
144 };
145
146 audio_func_dbg_pmx: audio_func_dbg@0 {
147 audio_func_dbg {
148 groups = "audio_func_dbg_grp";
149 function = "audio_func_dbg";
150 };
151 };
152
153 audio_i2s_pmx: audio_i2s@0 {
154 audio_i2s {
155 groups = "audio_i2s_grp";
156 function = "audio_i2s";
157 };
158 };
159
160 audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
161 audio_i2s_2ch {
162 groups = "audio_i2s_2ch_grp";
163 function = "audio_i2s_2ch";
164 };
165 };
166
167 audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
168 audio_i2s_extclk {
169 groups = "audio_i2s_extclk_grp";
170 function = "audio_i2s_extclk";
171 };
172 };
173
174 audio_uart0_pmx: audio_uart0@0 {
175 audio_uart0 {
176 groups = "audio_uart0_grp";
177 function = "audio_uart0";
178 };
179 };
180
181 audio_uart1_pmx: audio_uart1@0 {
182 audio_uart1 {
183 groups = "audio_uart1_grp";
184 function = "audio_uart1";
185 };
186 };
187
188 audio_uart2_pmx0: audio_uart2@0 {
189 audio_uart2_0 {
190 groups = "audio_uart2_grp0";
191 function = "audio_uart2_m0";
192 };
193 };
194
195 audio_uart2_pmx1: audio_uart2@1 {
196 audio_uart2_1 {
197 groups = "audio_uart2_grp1";
198 function = "audio_uart2_m1";
199 };
200 };
201
202 c_can_trnsvr_pmx: c_can_trnsvr@0 {
203 c_can_trnsvr {
204 groups = "c_can_trnsvr_grp";
205 function = "c_can_trnsvr";
206 };
207 };
208
209 c0_can_pmx0: c0_can@0 {
210 c0_can_0 {
211 groups = "c0_can_grp0";
212 function = "c0_can_m0";
213 };
214 };
215
216 c0_can_pmx1: c0_can@1 {
217 c0_can_1 {
218 groups = "c0_can_grp1";
219 function = "c0_can_m1";
220 };
221 };
222
223 c1_can_pmx0: c1_can@0 {
224 c1_can_0 {
225 groups = "c1_can_grp0";
226 function = "c1_can_m0";
227 };
228 };
229
230 c1_can_pmx1: c1_can@1 {
231 c1_can_1 {
232 groups = "c1_can_grp1";
233 function = "c1_can_m1";
234 };
235 };
236
237 c1_can_pmx2: c1_can@2 {
238 c1_can_2 {
239 groups = "c1_can_grp2";
240 function = "c1_can_m2";
241 };
242 };
243
244 ca_audio_lpc_pmx: ca_audio_lpc@0 {
245 ca_audio_lpc {
246 groups = "ca_audio_lpc_grp";
247 function = "ca_audio_lpc";
248 };
249 };
250
251 ca_bt_lpc_pmx: ca_bt_lpc@0 {
252 ca_bt_lpc {
253 groups = "ca_bt_lpc_grp";
254 function = "ca_bt_lpc";
255 };
256 };
257
258 ca_coex_pmx: ca_coex@0 {
259 ca_coex {
260 groups = "ca_coex_grp";
261 function = "ca_coex";
262 };
263 };
264
265 ca_curator_lpc_pmx: ca_curator_lpc@0 {
266 ca_curator_lpc {
267 groups = "ca_curator_lpc_grp";
268 function = "ca_curator_lpc";
269 };
270 };
271
272 ca_pcm_debug_pmx: ca_pcm_debug@0 {
273 ca_pcm_debug {
274 groups = "ca_pcm_debug_grp";
275 function = "ca_pcm_debug";
276 };
277 };
278
279 ca_pio_pmx: ca_pio@0 {
280 ca_pio {
281 groups = "ca_pio_grp";
282 function = "ca_pio";
283 };
284 };
285
286 ca_sdio_debug_pmx: ca_sdio_debug@0 {
287 ca_sdio_debug {
288 groups = "ca_sdio_debug_grp";
289 function = "ca_sdio_debug";
290 };
291 };
292
293 ca_spi_pmx: ca_spi@0 {
294 ca_spi {
295 groups = "ca_spi_grp";
296 function = "ca_spi";
297 };
298 };
299
300 ca_trb_pmx: ca_trb@0 {
301 ca_trb {
302 groups = "ca_trb_grp";
303 function = "ca_trb";
304 };
305 };
306
307 ca_uart_debug_pmx: ca_uart_debug@0 {
308 ca_uart_debug {
309 groups = "ca_uart_debug_grp";
310 function = "ca_uart_debug";
311 };
312 };
313
314 clkc_pmx0: clkc@0 {
315 clkc_0 {
316 groups = "clkc_grp0";
317 function = "clkc_m0";
318 };
319 };
320
321 clkc_pmx1: clkc@1 {
322 clkc_1 {
323 groups = "clkc_grp1";
324 function = "clkc_m1";
325 };
326 };
327
328 gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
329 gn_gnss_i2c {
330 groups = "gn_gnss_i2c_grp";
331 function = "gn_gnss_i2c";
332 };
333 };
334
335 gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
336 gn_gnss_uart_nopause {
337 groups = "gn_gnss_uart_nopause_grp";
338 function = "gn_gnss_uart_nopause";
339 };
340 };
341
342 gn_gnss_uart_pmx: gn_gnss_uart@0 {
343 gn_gnss_uart {
344 groups = "gn_gnss_uart_grp";
345 function = "gn_gnss_uart";
346 };
347 };
348
349 gn_trg_spi_pmx0: gn_trg_spi@0 {
350 gn_trg_spi_0 {
351 groups = "gn_trg_spi_grp0";
352 function = "gn_trg_spi_m0";
353 };
354 };
355
356 gn_trg_spi_pmx1: gn_trg_spi@1 {
357 gn_trg_spi_1 {
358 groups = "gn_trg_spi_grp1";
359 function = "gn_trg_spi_m1";
360 };
361 };
362
363 cvbs_dbg_pmx: cvbs_dbg@0 {
364 cvbs_dbg {
365 groups = "cvbs_dbg_grp";
366 function = "cvbs_dbg";
367 };
368 };
369
370 cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
371 cvbs_dbg_test_0 {
372 groups = "cvbs_dbg_test_grp0";
373 function = "cvbs_dbg_test_m0";
374 };
375 };
376
377 cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
378 cvbs_dbg_test_1 {
379 groups = "cvbs_dbg_test_grp1";
380 function = "cvbs_dbg_test_m1";
381 };
382 };
383
384 cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
385 cvbs_dbg_test_2 {
386 groups = "cvbs_dbg_test_grp2";
387 function = "cvbs_dbg_test_m2";
388 };
389 };
390
391 cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
392 cvbs_dbg_test_3 {
393 groups = "cvbs_dbg_test_grp3";
394 function = "cvbs_dbg_test_m3";
395 };
396 };
397
398 cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
399 cvbs_dbg_test_4 {
400 groups = "cvbs_dbg_test_grp4";
401 function = "cvbs_dbg_test_m4";
402 };
403 };
404
405 cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
406 cvbs_dbg_test_5 {
407 groups = "cvbs_dbg_test_grp5";
408 function = "cvbs_dbg_test_m5";
409 };
410 };
411
412 cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
413 cvbs_dbg_test_6 {
414 groups = "cvbs_dbg_test_grp6";
415 function = "cvbs_dbg_test_m6";
416 };
417 };
418
419 cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
420 cvbs_dbg_test_7 {
421 groups = "cvbs_dbg_test_grp7";
422 function = "cvbs_dbg_test_m7";
423 };
424 };
425
426 cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
427 cvbs_dbg_test_8 {
428 groups = "cvbs_dbg_test_grp8";
429 function = "cvbs_dbg_test_m8";
430 };
431 };
432
433 cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
434 cvbs_dbg_test_9 {
435 groups = "cvbs_dbg_test_grp9";
436 function = "cvbs_dbg_test_m9";
437 };
438 };
439
440 cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
441 cvbs_dbg_test_10 {
442 groups = "cvbs_dbg_test_grp10";
443 function = "cvbs_dbg_test_m10";
444 };
445 };
446
447 cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
448 cvbs_dbg_test_11 {
449 groups = "cvbs_dbg_test_grp11";
450 function = "cvbs_dbg_test_m11";
451 };
452 };
453
454 cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
455 cvbs_dbg_test_12 {
456 groups = "cvbs_dbg_test_grp12";
457 function = "cvbs_dbg_test_m12";
458 };
459 };
460
461 cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
462 cvbs_dbg_test_13 {
463 groups = "cvbs_dbg_test_grp13";
464 function = "cvbs_dbg_test_m13";
465 };
466 };
467
468 cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
469 cvbs_dbg_test_14 {
470 groups = "cvbs_dbg_test_grp14";
471 function = "cvbs_dbg_test_m14";
472 };
473 };
474
475 cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
476 cvbs_dbg_test_15 {
477 groups = "cvbs_dbg_test_grp15";
478 function = "cvbs_dbg_test_m15";
479 };
480 };
481
482 gn_gnss_power_pmx: gn_gnss_power@0 {
483 gn_gnss_power {
484 groups = "gn_gnss_power_grp";
485 function = "gn_gnss_power";
486 };
487 };
488
489 gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
490 gn_gnss_sw_status {
491 groups = "gn_gnss_sw_status_grp";
492 function = "gn_gnss_sw_status";
493 };
494 };
495
496 gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
497 gn_gnss_eclk {
498 groups = "gn_gnss_eclk_grp";
499 function = "gn_gnss_eclk";
500 };
501 };
502
503 gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
504 gn_gnss_irq1_0 {
505 groups = "gn_gnss_irq1_grp0";
506 function = "gn_gnss_irq1_m0";
507 };
508 };
509
510 gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
511 gn_gnss_irq2_0 {
512 groups = "gn_gnss_irq2_grp0";
513 function = "gn_gnss_irq2_m0";
514 };
515 };
516
517 gn_gnss_tm_pmx: gn_gnss_tm@0 {
518 gn_gnss_tm {
519 groups = "gn_gnss_tm_grp";
520 function = "gn_gnss_tm";
521 };
522 };
523
524 gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
525 gn_gnss_tsync {
526 groups = "gn_gnss_tsync_grp";
527 function = "gn_gnss_tsync";
528 };
529 };
530
531 gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
532 gn_io_gnsssys_sw_cfg {
533 groups = "gn_io_gnsssys_sw_cfg_grp";
534 function = "gn_io_gnsssys_sw_cfg";
535 };
536 };
537
538 gn_trg_pmx0: gn_trg@0 {
539 gn_trg_0 {
540 groups = "gn_trg_grp0";
541 function = "gn_trg_m0";
542 };
543 };
544
545 gn_trg_pmx1: gn_trg@1 {
546 gn_trg_1 {
547 groups = "gn_trg_grp1";
548 function = "gn_trg_m1";
549 };
550 };
551
552 gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
553 gn_trg_shutdown_0 {
554 groups = "gn_trg_shutdown_grp0";
555 function = "gn_trg_shutdown_m0";
556 };
557 };
558
559 gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
560 gn_trg_shutdown_1 {
561 groups = "gn_trg_shutdown_grp1";
562 function = "gn_trg_shutdown_m1";
563 };
564 };
565
566 gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
567 gn_trg_shutdown_2 {
568 groups = "gn_trg_shutdown_grp2";
569 function = "gn_trg_shutdown_m2";
570 };
571 };
572
573 gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
574 gn_trg_shutdown_3 {
575 groups = "gn_trg_shutdown_grp3";
576 function = "gn_trg_shutdown_m3";
577 };
578 };
579
580 i2c0_pmx: i2c0@0 {
581 i2c0 {
582 groups = "i2c0_grp";
583 function = "i2c0";
584 };
585 };
586
587 i2c1_pmx: i2c1@0 {
588 i2c1 {
589 groups = "i2c1_grp";
590 function = "i2c1";
591 };
592 };
593
594 jtag_pmx0: jtag@0 {
595 jtag_0 {
596 groups = "jtag_grp0";
597 function = "jtag_m0";
598 };
599 };
600
601 ks_kas_spi_pmx0: ks_kas_spi@0 {
602 ks_kas_spi_0 {
603 groups = "ks_kas_spi_grp0";
604 function = "ks_kas_spi_m0";
605 };
606 };
607
608 ld_ldd_pmx: ld_ldd@0 {
609 ld_ldd {
610 groups = "ld_ldd_grp";
611 function = "ld_ldd";
612 };
613 };
614
615 ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
616 ld_ldd_16bit {
617 groups = "ld_ldd_16bit_grp";
618 function = "ld_ldd_16bit";
619 };
620 };
621
622 ld_ldd_fck_pmx: ld_ldd_fck@0 {
623 ld_ldd_fck {
624 groups = "ld_ldd_fck_grp";
625 function = "ld_ldd_fck";
626 };
627 };
628
629 ld_ldd_lck_pmx: ld_ldd_lck@0 {
630 ld_ldd_lck {
631 groups = "ld_ldd_lck_grp";
632 function = "ld_ldd_lck";
633 };
634 };
635
636 lr_lcdrom_pmx: lr_lcdrom@0 {
637 lr_lcdrom {
638 groups = "lr_lcdrom_grp";
639 function = "lr_lcdrom";
640 };
641 };
642
643 lvds_analog_pmx: lvds_analog@0 {
644 lvds_analog {
645 groups = "lvds_analog_grp";
646 function = "lvds_analog";
647 };
648 };
649
650 nd_df_pmx: nd_df@0 {
651 nd_df {
652 groups = "nd_df_grp";
653 function = "nd_df";
654 };
655 };
656
657 nd_df_nowp_pmx: nd_df_nowp@0 {
658 nd_df_nowp {
659 groups = "nd_df_nowp_grp";
660 function = "nd_df_nowp";
661 };
662 };
663
664 ps_pmx: ps@0 {
665 ps {
666 groups = "ps_grp";
667 function = "ps";
668 };
669 };
670
671 pwc_core_on_pmx: pwc_core_on@0 {
672 pwc_core_on {
673 groups = "pwc_core_on_grp";
674 function = "pwc_core_on";
675 };
676 };
677
678 pwc_ext_on_pmx: pwc_ext_on@0 {
679 pwc_ext_on {
680 groups = "pwc_ext_on_grp";
681 function = "pwc_ext_on";
682 };
683 };
684
685 pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
686 pwc_gpio3_clk {
687 groups = "pwc_gpio3_clk_grp";
688 function = "pwc_gpio3_clk";
689 };
690 };
691
692 pwc_io_on_pmx: pwc_io_on@0 {
693 pwc_io_on {
694 groups = "pwc_io_on_grp";
695 function = "pwc_io_on";
696 };
697 };
698
699 pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
700 pwc_lowbatt_b_0 {
701 groups = "pwc_lowbatt_b_grp0";
702 function = "pwc_lowbatt_b_m0";
703 };
704 };
705
706 pwc_mem_on_pmx: pwc_mem_on@0 {
707 pwc_mem_on {
708 groups = "pwc_mem_on_grp";
709 function = "pwc_mem_on";
710 };
711 };
712
713 pwc_on_key_b_pmx0: pwc_on_key_b@0 {
714 pwc_on_key_b_0 {
715 groups = "pwc_on_key_b_grp0";
716 function = "pwc_on_key_b_m0";
717 };
718 };
719
720 pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
721 pwc_wakeup_src0 {
722 groups = "pwc_wakeup_src0_grp";
723 function = "pwc_wakeup_src0";
724 };
725 };
726
727 pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
728 pwc_wakeup_src1 {
729 groups = "pwc_wakeup_src1_grp";
730 function = "pwc_wakeup_src1";
731 };
732 };
733
734 pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
735 pwc_wakeup_src2 {
736 groups = "pwc_wakeup_src2_grp";
737 function = "pwc_wakeup_src2";
738 };
739 };
740
741 pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
742 pwc_wakeup_src3 {
743 groups = "pwc_wakeup_src3_grp";
744 function = "pwc_wakeup_src3";
745 };
746 };
747
748 pw_cko0_pmx0: pw_cko0@0 {
749 pw_cko0_0 {
750 groups = "pw_cko0_grp0";
751 function = "pw_cko0_m0";
752 };
753 };
754
755 pw_cko0_pmx1: pw_cko0@1 {
756 pw_cko0_1 {
757 groups = "pw_cko0_grp1";
758 function = "pw_cko0_m1";
759 };
760 };
761
762 pw_cko0_pmx2: pw_cko0@2 {
763 pw_cko0_2 {
764 groups = "pw_cko0_grp2";
765 function = "pw_cko0_m2";
766 };
767 };
768
769 pw_cko1_pmx0: pw_cko1@0 {
770 pw_cko1_0 {
771 groups = "pw_cko1_grp0";
772 function = "pw_cko1_m0";
773 };
774 };
775
776 pw_cko1_pmx1: pw_cko1@1 {
777 pw_cko1_1 {
778 groups = "pw_cko1_grp1";
779 function = "pw_cko1_m1";
780 };
781 };
782
783 pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
784 pw_i2s01_clk_0 {
785 groups = "pw_i2s01_clk_grp0";
786 function = "pw_i2s01_clk_m0";
787 };
788 };
789
790 pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
791 pw_i2s01_clk_1 {
792 groups = "pw_i2s01_clk_grp1";
793 function = "pw_i2s01_clk_m1";
794 };
795 };
796
797 pw_pwm0_pmx: pw_pwm0@0 {
798 pw_pwm0 {
799 groups = "pw_pwm0_grp";
800 function = "pw_pwm0";
801 };
802 };
803
804 pw_pwm1_pmx: pw_pwm1@0 {
805 pw_pwm1 {
806 groups = "pw_pwm1_grp";
807 function = "pw_pwm1";
808 };
809 };
810
811 pw_pwm2_pmx0: pw_pwm2@0 {
812 pw_pwm2_0 {
813 groups = "pw_pwm2_grp0";
814 function = "pw_pwm2_m0";
815 };
816 };
817
818 pw_pwm2_pmx1: pw_pwm2@1 {
819 pw_pwm2_1 {
820 groups = "pw_pwm2_grp1";
821 function = "pw_pwm2_m1";
822 };
823 };
824
825 pw_pwm3_pmx0: pw_pwm3@0 {
826 pw_pwm3_0 {
827 groups = "pw_pwm3_grp0";
828 function = "pw_pwm3_m0";
829 };
830 };
831
832 pw_pwm3_pmx1: pw_pwm3@1 {
833 pw_pwm3_1 {
834 groups = "pw_pwm3_grp1";
835 function = "pw_pwm3_m1";
836 };
837 };
838
839 pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
840 pw_pwm_cpu_vol_0 {
841 groups = "pw_pwm_cpu_vol_grp0";
842 function = "pw_pwm_cpu_vol_m0";
843 };
844 };
845
846 pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
847 pw_pwm_cpu_vol_1 {
848 groups = "pw_pwm_cpu_vol_grp1";
849 function = "pw_pwm_cpu_vol_m1";
850 };
851 };
852
853 pw_backlight_pmx0: pw_backlight@0 {
854 pw_backlight_0 {
855 groups = "pw_backlight_grp0";
856 function = "pw_backlight_m0";
857 };
858 };
859
860 pw_backlight_pmx1: pw_backlight@1 {
861 pw_backlight_1 {
862 groups = "pw_backlight_grp1";
863 function = "pw_backlight_m1";
864 };
865 };
866
867 rg_eth_mac_pmx: rg_eth_mac@0 {
868 rg_eth_mac {
869 groups = "rg_eth_mac_grp";
870 function = "rg_eth_mac";
871 };
872 };
873
874 rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
875 rg_gmac_phy_intr_n {
876 groups = "rg_gmac_phy_intr_n_grp";
877 function = "rg_gmac_phy_intr_n";
878 };
879 };
880
881 rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
882 rg_rgmii_mac {
883 groups = "rg_rgmii_mac_grp";
884 function = "rg_rgmii_mac";
885 };
886 };
887
888 rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
889 rg_rgmii_phy_ref_clk_0 {
890 groups =
891 "rg_rgmii_phy_ref_clk_grp0";
892 function =
893 "rg_rgmii_phy_ref_clk_m0";
894 };
895 };
896
897 rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
898 rg_rgmii_phy_ref_clk_1 {
899 groups =
900 "rg_rgmii_phy_ref_clk_grp1";
901 function =
902 "rg_rgmii_phy_ref_clk_m1";
903 };
904 };
905
906 sd0_pmx: sd0@0 {
907 sd0 {
908 groups = "sd0_grp";
909 function = "sd0";
910 };
911 };
912
913 sd0_4bit_pmx: sd0_4bit@0 {
914 sd0_4bit {
915 groups = "sd0_4bit_grp";
916 function = "sd0_4bit";
917 };
918 };
919
920 sd1_pmx: sd1@0 {
921 sd1 {
922 groups = "sd1_grp";
923 function = "sd1";
924 };
925 };
926
927 sd1_4bit_pmx0: sd1_4bit@0 {
928 sd1_4bit_0 {
929 groups = "sd1_4bit_grp0";
930 function = "sd1_4bit_m0";
931 };
932 };
933
934 sd1_4bit_pmx1: sd1_4bit@1 {
935 sd1_4bit_1 {
936 groups = "sd1_4bit_grp1";
937 function = "sd1_4bit_m1";
938 };
939 };
940
941 sd2_pmx0: sd2@0 {
942 sd2_0 {
943 groups = "sd2_grp0";
944 function = "sd2_m0";
945 };
946 };
947
948 sd2_no_cdb_pmx0: sd2_no_cdb@0 {
949 sd2_no_cdb_0 {
950 groups = "sd2_no_cdb_grp0";
951 function = "sd2_no_cdb_m0";
952 };
953 };
954
955 sd3_pmx: sd3@0 {
956 sd3 {
957 groups = "sd3_grp";
958 function = "sd3";
959 };
960 };
961
962 sd5_pmx: sd5@0 {
963 sd5 {
964 groups = "sd5_grp";
965 function = "sd5";
966 };
967 };
968
969 sd6_pmx0: sd6@0 {
970 sd6_0 {
971 groups = "sd6_grp0";
972 function = "sd6_m0";
973 };
974 };
975
976 sd6_pmx1: sd6@1 {
977 sd6_1 {
978 groups = "sd6_grp1";
979 function = "sd6_m1";
980 };
981 };
982
983 sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
984 sp0_ext_ldo_on {
985 groups = "sp0_ext_ldo_on_grp";
986 function = "sp0_ext_ldo_on";
987 };
988 };
989
990 sp0_qspi_pmx: sp0_qspi@0 {
991 sp0_qspi {
992 groups = "sp0_qspi_grp";
993 function = "sp0_qspi";
994 };
995 };
996
997 sp1_spi_pmx: sp1_spi@0 {
998 sp1_spi {
999 groups = "sp1_spi_grp";
1000 function = "sp1_spi";
1001 };
1002 };
1003
1004 tpiu_trace_pmx: tpiu_trace@0 {
1005 tpiu_trace {
1006 groups = "tpiu_trace_grp";
1007 function = "tpiu_trace";
1008 };
1009 };
1010
1011 uart0_pmx: uart0@0 {
1012 uart0 {
1013 groups = "uart0_grp";
1014 function = "uart0";
1015 };
1016 };
1017
1018 uart0_nopause_pmx: uart0_nopause@0 {
1019 uart0_nopause {
1020 groups = "uart0_nopause_grp";
1021 function = "uart0_nopause";
1022 };
1023 };
1024
1025 uart1_pmx: uart1@0 {
1026 uart1 {
1027 groups = "uart1_grp";
1028 function = "uart1";
1029 };
1030 };
1031
1032 uart2_pmx: uart2@0 {
1033 uart2 {
1034 groups = "uart2_grp";
1035 function = "uart2";
1036 };
1037 };
1038
1039 uart3_pmx0: uart3@0 {
1040 uart3_0 {
1041 groups = "uart3_grp0";
1042 function = "uart3_m0";
1043 };
1044 };
1045
1046 uart3_pmx1: uart3@1 {
1047 uart3_1 {
1048 groups = "uart3_grp1";
1049 function = "uart3_m1";
1050 };
1051 };
1052
1053 uart3_pmx2: uart3@2 {
1054 uart3_2 {
1055 groups = "uart3_grp2";
1056 function = "uart3_m2";
1057 };
1058 };
1059
1060 uart3_pmx3: uart3@3 {
1061 uart3_3 {
1062 groups = "uart3_grp3";
1063 function = "uart3_m3";
1064 };
1065 };
1066
1067 uart3_nopause_pmx0: uart3_nopause@0 {
1068 uart3_nopause_0 {
1069 groups = "uart3_nopause_grp0";
1070 function = "uart3_nopause_m0";
1071 };
1072 };
1073
1074 uart3_nopause_pmx1: uart3_nopause@1 {
1075 uart3_nopause_1 {
1076 groups = "uart3_nopause_grp1";
1077 function = "uart3_nopause_m1";
1078 };
1079 };
1080
1081 uart4_pmx0: uart4@0 {
1082 uart4_0 {
1083 groups = "uart4_grp0";
1084 function = "uart4_m0";
1085 };
1086 };
1087
1088 uart4_pmx1: uart4@1 {
1089 uart4_1 {
1090 groups = "uart4_grp1";
1091 function = "uart4_m1";
1092 };
1093 };
1094
1095 uart4_pmx2: uart4@2 {
1096 uart4_2 {
1097 groups = "uart4_grp2";
1098 function = "uart4_m2";
1099 };
1100 };
1101
1102 uart4_nopause_pmx: uart4_nopause@0 {
1103 uart4_nopause {
1104 groups = "uart4_nopause_grp";
1105 function = "uart4_nopause";
1106 };
1107 };
1108
1109 usb0_drvvbus_pmx: usb0_drvvbus@0 {
1110 usb0_drvvbus {
1111 groups = "usb0_drvvbus_grp";
1112 function = "usb0_drvvbus";
1113 };
1114 };
1115
1116 usb1_drvvbus_pmx: usb1_drvvbus@0 {
1117 usb1_drvvbus {
1118 groups = "usb1_drvvbus_grp";
1119 function = "usb1_drvvbus";
1120 };
1121 };
1122
1123 visbus_dout_pmx: visbus_dout@0 {
1124 visbus_dout {
1125 groups = "visbus_dout_grp";
1126 function = "visbus_dout";
1127 };
1128 };
1129
1130 vi_vip1_pmx: vi_vip1@0 {
1131 vi_vip1 {
1132 groups = "vi_vip1_grp";
1133 function = "vi_vip1";
1134 };
1135 };
1136
1137 vi_vip1_ext_pmx: vi_vip1_ext@0 {
1138 vi_vip1_ext {
1139 groups = "vi_vip1_ext_grp";
1140 function = "vi_vip1_ext";
1141 };
1142 };
1143
1144 vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
1145 vi_vip1_low8bit {
1146 groups = "vi_vip1_low8bit_grp";
1147 function = "vi_vip1_low8bit";
1148 };
1149 };
1150
1151 vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
1152 vi_vip1_high8bit {
1153 groups = "vi_vip1_high8bit_grp";
1154 function = "vi_vip1_high8bit";
1155 };
1156 };
138 }; 1157 };
139 1158
140 pmipc { 1159 pmipc {
@@ -356,6 +1375,12 @@
356 clock-names = "gpio0_io"; 1375 clock-names = "gpio0_io";
357 gpio-controller; 1376 gpio-controller;
358 interrupt-controller; 1377 interrupt-controller;
1378
1379 gpio-banks = <2>;
1380 gpio-ranges = <&pinctrl 0 0 0>,
1381 <&pinctrl 32 0 0>;
1382 gpio-ranges-group-names = "lvds_gpio_grp",
1383 "uart_nand_gpio_grp";
359 }; 1384 };
360 1385
361 nand@17050000 { 1386 nand@17050000 {
@@ -461,11 +1486,22 @@
461 #interrupt-cells = <2>; 1486 #interrupt-cells = <2>;
462 compatible = "sirf,atlas7-gpio"; 1487 compatible = "sirf,atlas7-gpio";
463 reg = <0x13300000 0x1000>; 1488 reg = <0x13300000 0x1000>;
464 interrupts = <0 43 0>, <0 44 0>, <0 45 0>; 1489 interrupts = <0 43 0>, <0 44 0>,
1490 <0 45 0>, <0 46 0>;
465 clocks = <&car 84>; 1491 clocks = <&car 84>;
466 clock-names = "gpio1_io"; 1492 clock-names = "gpio1_io";
467 gpio-controller; 1493 gpio-controller;
468 interrupt-controller; 1494 interrupt-controller;
1495
1496 gpio-banks = <4>;
1497 gpio-ranges = <&pinctrl 0 0 0>,
1498 <&pinctrl 32 0 0>,
1499 <&pinctrl 64 0 0>,
1500 <&pinctrl 96 0 0>;
1501 gpio-ranges-group-names = "gnss_gpio_grp",
1502 "lcd_vip_gpio_grp",
1503 "sdio_i2s_gpio_grp",
1504 "sp_rgmii_gpio_grp";
469 }; 1505 };
470 1506
471 sd2: sdhci@14200000 { 1507 sd2: sdhci@14200000 {
@@ -744,6 +1780,10 @@
744 interrupts = <0 47 0>; 1780 interrupts = <0 47 0>;
745 gpio-controller; 1781 gpio-controller;
746 interrupt-controller; 1782 interrupt-controller;
1783
1784 gpio-banks = <1>;
1785 gpio-ranges = <&pinctrl 0 0 0>;
1786 gpio-ranges-group-names = "rtc_gpio_grp";
747 }; 1787 };
748 1788
749 rtc-iobg@18840000 { 1789 rtc-iobg@18840000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 107395c32d82..17f63f7dfd9e 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -150,6 +150,16 @@
150 interface-type = "ace"; 150 interface-type = "ace";
151 reg = <0x5000 0x1000>; 151 reg = <0x5000 0x1000>;
152 }; 152 };
153
154 pmu@9000 {
155 compatible = "arm,cci-400-pmu,r0";
156 reg = <0x9000 0x5000>;
157 interrupts = <0 105 4>,
158 <0 101 4>,
159 <0 102 4>,
160 <0 103 4>,
161 <0 104 4>;
162 };
153 }; 163 };
154 164
155 memory-controller@7ffd0000 { 165 memory-controller@7ffd0000 {
@@ -187,11 +197,22 @@
187 <1 10 0xf08>; 197 <1 10 0xf08>;
188 }; 198 };
189 199
190 pmu { 200 pmu_a15 {
191 compatible = "arm,cortex-a15-pmu"; 201 compatible = "arm,cortex-a15-pmu";
192 interrupts = <0 68 4>, 202 interrupts = <0 68 4>,
193 <0 69 4>; 203 <0 69 4>;
194 interrupt-affinity = <&cpu0>, <&cpu1>; 204 interrupt-affinity = <&cpu0>,
205 <&cpu1>;
206 };
207
208 pmu_a7 {
209 compatible = "arm,cortex-a7-pmu";
210 interrupts = <0 128 4>,
211 <0 129 4>,
212 <0 130 4>;
213 interrupt-affinity = <&cpu2>,
214 <&cpu3>,
215 <&cpu4>;
195 }; 216 };
196 217
197 oscclk6a: oscclk6a { 218 oscclk6a: oscclk6a {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6d83a1bf0c74..5fd8df6f50ea 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -353,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y
353CONFIG_POWER_RESET_GPIO=y 353CONFIG_POWER_RESET_GPIO=y
354CONFIG_POWER_RESET_GPIO_RESTART=y 354CONFIG_POWER_RESET_GPIO_RESTART=y
355CONFIG_POWER_RESET_KEYSTONE=y 355CONFIG_POWER_RESET_KEYSTONE=y
356CONFIG_POWER_RESET_SUN6I=y
357CONFIG_POWER_RESET_RMOBILE=y 356CONFIG_POWER_RESET_RMOBILE=y
358CONFIG_SENSORS_LM90=y 357CONFIG_SENSORS_LM90=y
359CONFIG_SENSORS_LM95245=y 358CONFIG_SENSORS_LM95245=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 8ecba00dcd83..7ebc346bf9fa 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -2,6 +2,7 @@ CONFIG_NO_HZ=y
2CONFIG_HIGH_RES_TIMERS=y 2CONFIG_HIGH_RES_TIMERS=y
3CONFIG_BLK_DEV_INITRD=y 3CONFIG_BLK_DEV_INITRD=y
4CONFIG_PERF_EVENTS=y 4CONFIG_PERF_EVENTS=y
5CONFIG_MODULES=y
5CONFIG_ARCH_SUNXI=y 6CONFIG_ARCH_SUNXI=y
6CONFIG_SMP=y 7CONFIG_SMP=y
7CONFIG_NR_CPUS=8 8CONFIG_NR_CPUS=8
@@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y
77CONFIG_GPIO_SYSFS=y 78CONFIG_GPIO_SYSFS=y
78CONFIG_POWER_SUPPLY=y 79CONFIG_POWER_SUPPLY=y
79CONFIG_POWER_RESET=y 80CONFIG_POWER_RESET=y
80CONFIG_POWER_RESET_SUN6I=y
81CONFIG_THERMAL=y 81CONFIG_THERMAL=y
82CONFIG_CPU_THERMAL=y 82CONFIG_CPU_THERMAL=y
83CONFIG_WATCHDOG=y 83CONFIG_WATCHDOG=y
@@ -87,6 +87,10 @@ CONFIG_REGULATOR=y
87CONFIG_REGULATOR_FIXED_VOLTAGE=y 87CONFIG_REGULATOR_FIXED_VOLTAGE=y
88CONFIG_REGULATOR_AXP20X=y 88CONFIG_REGULATOR_AXP20X=y
89CONFIG_REGULATOR_GPIO=y 89CONFIG_REGULATOR_GPIO=y
90CONFIG_FB=y
91CONFIG_FB_SIMPLE=y
92CONFIG_FRAMEBUFFER_CONSOLE=y
93CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
90CONFIG_USB=y 94CONFIG_USB=y
91CONFIG_USB_EHCI_HCD=y 95CONFIG_USB_EHCI_HCD=y
92CONFIG_USB_EHCI_HCD_PLATFORM=y 96CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1c3938f26beb..485982084fe9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
140 * The _caller variety takes a __builtin_return_address(0) value for 140 * The _caller variety takes a __builtin_return_address(0) value for
141 * /proc/vmalloc to use - and should only be used in non-inline functions. 141 * /proc/vmalloc to use - and should only be used in non-inline functions.
142 */ 142 */
143extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
144 size_t, unsigned int, void *);
145extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, 143extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
146 void *); 144 void *);
147
148extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 145extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
149extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
150extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); 146extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
151extern void __iounmap(volatile void __iomem *addr); 147extern void __iounmap(volatile void __iomem *addr);
152extern void __arm_iounmap(volatile void __iomem *addr);
153 148
154extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, 149extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
155 unsigned int, void *); 150 unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
321static inline void memset_io(volatile void __iomem *dst, unsigned c, 316static inline void memset_io(volatile void __iomem *dst, unsigned c,
322 size_t count) 317 size_t count)
323{ 318{
324 memset((void __force *)dst, c, count); 319 extern void mmioset(void *, unsigned int, size_t);
320 mmioset((void __force *)dst, c, count);
325} 321}
326#define memset_io(dst,c,count) memset_io(dst,c,count) 322#define memset_io(dst,c,count) memset_io(dst,c,count)
327 323
328static inline void memcpy_fromio(void *to, const volatile void __iomem *from, 324static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
329 size_t count) 325 size_t count)
330{ 326{
331 memcpy(to, (const void __force *)from, count); 327 extern void mmiocpy(void *, const void *, size_t);
328 mmiocpy(to, (const void __force *)from, count);
332} 329}
333#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) 330#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
334 331
335static inline void memcpy_toio(volatile void __iomem *to, const void *from, 332static inline void memcpy_toio(volatile void __iomem *to, const void *from,
336 size_t count) 333 size_t count)
337{ 334{
338 memcpy((void __force *)to, from, count); 335 extern void mmiocpy(void *, const void *, size_t);
336 mmiocpy((void __force *)to, from, count);
339} 337}
340#define memcpy_toio(to,from,count) memcpy_toio(to,from,count) 338#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
341 339
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
348#endif /* readl */ 346#endif /* readl */
349 347
350/* 348/*
351 * ioremap and friends. 349 * ioremap() and friends.
350 *
351 * ioremap() takes a resource address, and size. Due to the ARM memory
352 * types, it is important to use the correct ioremap() function as each
353 * mapping has specific properties.
354 *
355 * Function Memory type Cacheability Cache hint
356 * ioremap() Device n/a n/a
357 * ioremap_nocache() Device n/a n/a
358 * ioremap_cache() Normal Writeback Read allocate
359 * ioremap_wc() Normal Non-cacheable n/a
360 * ioremap_wt() Normal Non-cacheable n/a
361 *
362 * All device mappings have the following properties:
363 * - no access speculation
364 * - no repetition (eg, on return from an exception)
365 * - number, order and size of accesses are maintained
366 * - unaligned accesses are "unpredictable"
367 * - writes may be delayed before they hit the endpoint device
352 * 368 *
353 * ioremap takes a PCI memory address, as specified in 369 * ioremap_nocache() is the same as ioremap() as there are too many device
354 * Documentation/io-mapping.txt. 370 * drivers using this for device registers, and documentation which tells
371 * people to use it for such for this to be any different. This is not a
372 * safe fallback for memory-like mappings, or memory regions where the
373 * compiler may generate unaligned accesses - eg, via inlining its own
374 * memcpy.
355 * 375 *
376 * All normal memory mappings have the following properties:
377 * - reads can be repeated with no side effects
378 * - repeated reads return the last value written
379 * - reads can fetch additional locations without side effects
380 * - writes can be repeated (in certain cases) with no side effects
381 * - writes can be merged before accessing the target
382 * - unaligned accesses can be supported
383 * - ordering is not guaranteed without explicit dependencies or barrier
384 * instructions
385 * - writes may be delayed before they hit the endpoint memory
386 *
387 * The cache hint is only a performance hint: CPUs may alias these hints.
388 * Eg, a CPU not implementing read allocate but implementing write allocate
389 * will provide a write allocate mapping instead.
356 */ 390 */
357#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 391void __iomem *ioremap(resource_size_t res_cookie, size_t size);
358#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 392#define ioremap ioremap
359#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) 393#define ioremap_nocache ioremap
360#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) 394
361#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 395void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
362#define iounmap __arm_iounmap 396#define ioremap_cache ioremap_cache
397
398void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
399#define ioremap_wc ioremap_wc
400#define ioremap_wt ioremap_wc
401
402void iounmap(volatile void __iomem *iomem_cookie);
403#define iounmap iounmap
363 404
364/* 405/*
365 * io{read,write}{16,32}be() macros 406 * io{read,write}{16,32}be() macros
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 3a72d69b3255..6f225acc07c5 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
275 */ 275 */
276#define __pa(x) __virt_to_phys((unsigned long)(x)) 276#define __pa(x) __virt_to_phys((unsigned long)(x))
277#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 277#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
278#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 278#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
279 279
280extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); 280extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
281 281
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index bfd662e49a25..aeddd28b3595 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -129,7 +129,36 @@
129 129
130/* 130/*
131 * These are the memory types, defined to be compatible with 131 * These are the memory types, defined to be compatible with
132 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 132 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
133 * ARMv6+ without TEX remapping, they are a table index.
134 * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
135 *
136 * MT type Pre-ARMv6 ARMv6+ type / cacheable status
137 * UNCACHED Uncached Strongly ordered
138 * BUFFERABLE Bufferable Normal memory / non-cacheable
139 * WRITETHROUGH Writethrough Normal memory / write through
140 * WRITEBACK Writeback Normal memory / write back, read alloc
141 * MINICACHE Minicache N/A
142 * WRITEALLOC Writeback Normal memory / write back, write alloc
143 * DEV_SHARED Uncached Device memory (shared)
144 * DEV_NONSHARED Uncached Device memory (non-shared)
145 * DEV_WC Bufferable Normal memory / non-cacheable
146 * DEV_CACHED Writeback Normal memory / write back, read alloc
147 * VECTORS Variable Normal memory / variable
148 *
149 * All normal memory mappings have the following properties:
150 * - reads can be repeated with no side effects
151 * - repeated reads return the last value written
152 * - reads can fetch additional locations without side effects
153 * - writes can be repeated (in certain cases) with no side effects
154 * - writes can be merged before accessing the target
155 * - unaligned accesses can be supported
156 *
157 * All device mappings have the following properties:
158 * - no access speculation
159 * - no repetition (eg, on return from an exception)
160 * - number, order and size of accesses are maintained
161 * - unaligned accesses are "unpredictable"
133 */ 162 */
134#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ 163#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
135#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ 164#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a88671cfe1ff..5e5a51a99e68 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
50 50
51extern void fpundefinstr(void); 51extern void fpundefinstr(void);
52 52
53void mmioset(void *, unsigned int, size_t);
54void mmiocpy(void *, const void *, size_t);
55
53 /* platform dependent support */ 56 /* platform dependent support */
54EXPORT_SYMBOL(arm_delay_ops); 57EXPORT_SYMBOL(arm_delay_ops);
55 58
@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
88EXPORT_SYMBOL(memchr); 91EXPORT_SYMBOL(memchr);
89EXPORT_SYMBOL(__memzero); 92EXPORT_SYMBOL(__memzero);
90 93
94EXPORT_SYMBOL(mmioset);
95EXPORT_SYMBOL(mmiocpy);
96
91#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
92EXPORT_SYMBOL(copy_page); 98EXPORT_SYMBOL(copy_page);
93 99
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7dac3086e361..cb4fb1e69778 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -410,7 +410,7 @@ ENDPROC(__fiq_abt)
410 zero_fp 410 zero_fp
411 411
412 .if \trace 412 .if \trace
413#ifdef CONFIG_IRQSOFF_TRACER 413#ifdef CONFIG_TRACE_IRQFLAGS
414 bl trace_hardirqs_off 414 bl trace_hardirqs_off
415#endif 415#endif
416 ct_user_exit save = 0 416 ct_user_exit save = 0
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 90dfbedfbfb8..3d6b7821cff8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
578 struct pt_regs *old_regs = set_irq_regs(regs); 578 struct pt_regs *old_regs = set_irq_regs(regs);
579 579
580 if ((unsigned)ipinr < NR_IPI) { 580 if ((unsigned)ipinr < NR_IPI) {
581 trace_ipi_entry(ipi_types[ipinr]); 581 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
582 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 582 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
583 } 583 }
584 584
@@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
637 } 637 }
638 638
639 if ((unsigned)ipinr < NR_IPI) 639 if ((unsigned)ipinr < NR_IPI)
640 trace_ipi_exit(ipi_types[ipinr]); 640 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
641 set_irq_regs(old_regs); 641 set_irq_regs(old_regs);
642} 642}
643 643
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 7797e81e40e0..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,8 +61,10 @@
61 61
62/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ 62/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
63 63
64ENTRY(mmiocpy)
64ENTRY(memcpy) 65ENTRY(memcpy)
65 66
66#include "copy_template.S" 67#include "copy_template.S"
67 68
68ENDPROC(memcpy) 69ENDPROC(memcpy)
70ENDPROC(mmiocpy)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a4ee97b5a2bf..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,7 @@
16 .text 16 .text
17 .align 5 17 .align 5
18 18
19ENTRY(mmioset)
19ENTRY(memset) 20ENTRY(memset)
20UNWIND( .fnstart ) 21UNWIND( .fnstart )
21 ands r3, r0, #3 @ 1 unaligned? 22 ands r3, r0, #3 @ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart )
133 b 1b 134 b 1b
134UNWIND( .fnend ) 135UNWIND( .fnend )
135ENDPROC(memset) 136ENDPROC(memset)
137ENDPROC(mmioset)
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index e1a56d87599e..1ed4be184a29 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void)
117 u8 revision = dma_read(REVISION, 0) & 0xff; 117 u8 revision = dma_read(REVISION, 0) & 0xff;
118 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", 118 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
119 revision >> 4, revision & 0xf); 119 revision >> 4, revision & 0xf);
120 return;
121} 120}
122 121
123static unsigned configure_dma_errata(void) 122static unsigned configure_dma_errata(void)
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index e03d8b5c9ad0..9ab8932403e5 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -4,6 +4,7 @@ menuconfig ARCH_SIRF
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
5 select GENERIC_IRQ_CHIP 5 select GENERIC_IRQ_CHIP
6 select NO_IOPORT_MAP 6 select NO_IOPORT_MAP
7 select REGMAP
7 select PINCTRL 8 select PINCTRL
8 select PINCTRL_SIRF 9 select PINCTRL_SIRF
9 help 10 help
diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c
index 8f66d8f7ca75..d4852d24dc7d 100644
--- a/arch/arm/mach-prima2/rtciobrg.c
+++ b/arch/arm/mach-prima2/rtciobrg.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * RTC I/O Bridge interfaces for CSR SiRFprimaII 2 * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
3 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module 3 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
4 * 4 *
5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/regmap.h>
13#include <linux/of.h> 14#include <linux/of.h>
14#include <linux/of_address.h> 15#include <linux/of_address.h>
15#include <linux/of_device.h> 16#include <linux/of_device.h>
@@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr)
66{ 67{
67 unsigned long flags, val; 68 unsigned long flags, val;
68 69
70 /* TODO: add hwspinlock to sync with M3 */
69 spin_lock_irqsave(&rtciobrg_lock, flags); 71 spin_lock_irqsave(&rtciobrg_lock, flags);
70 72
71 val = __sirfsoc_rtc_iobrg_readl(addr); 73 val = __sirfsoc_rtc_iobrg_readl(addr);
@@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
90{ 92{
91 unsigned long flags; 93 unsigned long flags;
92 94
95 /* TODO: add hwspinlock to sync with M3 */
93 spin_lock_irqsave(&rtciobrg_lock, flags); 96 spin_lock_irqsave(&rtciobrg_lock, flags);
94 97
95 sirfsoc_rtc_iobrg_pre_writel(val, addr); 98 sirfsoc_rtc_iobrg_pre_writel(val, addr);
@@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
102} 105}
103EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel); 106EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
104 107
108
109static int regmap_iobg_regwrite(void *context, unsigned int reg,
110 unsigned int val)
111{
112 sirfsoc_rtc_iobrg_writel(val, reg);
113 return 0;
114}
115
116static int regmap_iobg_regread(void *context, unsigned int reg,
117 unsigned int *val)
118{
119 *val = (u32)sirfsoc_rtc_iobrg_readl(reg);
120 return 0;
121}
122
123static struct regmap_bus regmap_iobg = {
124 .reg_write = regmap_iobg_regwrite,
125 .reg_read = regmap_iobg_regread,
126};
127
128/**
129 * devm_regmap_init_iobg(): Initialise managed register map
130 *
131 * @iobg: Device that will be interacted with
132 * @config: Configuration for register map
133 *
134 * The return value will be an ERR_PTR() on error or a valid pointer
135 * to a struct regmap. The regmap will be automatically freed by the
136 * device management code.
137 */
138struct regmap *devm_regmap_init_iobg(struct device *dev,
139 const struct regmap_config *config)
140{
141 const struct regmap_bus *bus = &regmap_iobg;
142
143 return devm_regmap_init(dev, bus, dev, config);
144}
145EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
146
105static const struct of_device_id rtciobrg_ids[] = { 147static const struct of_device_id rtciobrg_ids[] = {
106 { .compatible = "sirf,prima2-rtciobg" }, 148 { .compatible = "sirf,prima2-rtciobg" },
107 {} 149 {}
@@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void)
132} 174}
133postcore_initcall(sirfsoc_rtciobrg_init); 175postcore_initcall(sirfsoc_rtciobrg_init);
134 176
135MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, " 177MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
136 "Barry Song <baohua.song@csr.com>"); 178MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
137MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge"); 179MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
138MODULE_LICENSE("GPL v2"); 180MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 81502b90dd91..4efe2d43a126 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -35,7 +35,7 @@ config MACH_SUN7I
35 select SUN5I_HSTIMER 35 select SUN5I_HSTIMER
36 36
37config MACH_SUN8I 37config MACH_SUN8I
38 bool "Allwinner A23 (sun8i) SoCs support" 38 bool "Allwinner sun8i Family SoCs support"
39 default ARCH_SUNXI 39 default ARCH_SUNXI
40 select ARM_GIC 40 select ARM_GIC
41 select MFD_SUN6I_PRCM 41 select MFD_SUN6I_PRCM
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 1bc811a74a9f..65bab2876343 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -67,10 +67,13 @@ MACHINE_END
67 67
68static const char * const sun8i_board_dt_compat[] = { 68static const char * const sun8i_board_dt_compat[] = {
69 "allwinner,sun8i-a23", 69 "allwinner,sun8i-a23",
70 "allwinner,sun8i-a33",
71 "allwinner,sun8i-h3",
70 NULL, 72 NULL,
71}; 73};
72 74
73DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family") 75DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
76 .init_time = sun6i_timer_init,
74 .dt_compat = sun8i_board_dt_compat, 77 .dt_compat = sun8i_board_dt_compat,
75 .init_late = sunxi_dt_cpufreq_init, 78 .init_late = sunxi_dt_cpufreq_init,
76MACHINE_END 79MACHINE_END
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d1e5ad7ab3bc..0c81056c1dd7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
255} 255}
256#endif 256#endif
257 257
258void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 258static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
259 unsigned long offset, size_t size, unsigned int mtype, void *caller) 259 unsigned long offset, size_t size, unsigned int mtype, void *caller)
260{ 260{
261 const struct mem_type *type; 261 const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
363 unsigned int mtype) 363 unsigned int mtype)
364{ 364{
365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
366 __builtin_return_address(0)); 366 __builtin_return_address(0));
367} 367}
368EXPORT_SYMBOL(__arm_ioremap_pfn); 368EXPORT_SYMBOL(__arm_ioremap_pfn);
369 369
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
371 unsigned int, void *) = 371 unsigned int, void *) =
372 __arm_ioremap_caller; 372 __arm_ioremap_caller;
373 373
374void __iomem * 374void __iomem *ioremap(resource_size_t res_cookie, size_t size)
375__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) 375{
376 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
377 __builtin_return_address(0));
378}
379EXPORT_SYMBOL(ioremap);
380
381void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
382{
383 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
384 __builtin_return_address(0));
385}
386EXPORT_SYMBOL(ioremap_cache);
387
388void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
376{ 389{
377 return arch_ioremap_caller(phys_addr, size, mtype, 390 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
378 __builtin_return_address(0)); 391 __builtin_return_address(0));
379} 392}
380EXPORT_SYMBOL(__arm_ioremap); 393EXPORT_SYMBOL(ioremap_wc);
381 394
382/* 395/*
383 * Remap an arbitrary physical address space into the kernel virtual 396 * Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
431 444
432void (*arch_iounmap)(volatile void __iomem *) = __iounmap; 445void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
433 446
434void __arm_iounmap(volatile void __iomem *io_addr) 447void iounmap(volatile void __iomem *cookie)
435{ 448{
436 arch_iounmap(io_addr); 449 arch_iounmap(cookie);
437} 450}
438EXPORT_SYMBOL(__arm_iounmap); 451EXPORT_SYMBOL(iounmap);
439 452
440#ifdef CONFIG_PCI 453#ifdef CONFIG_PCI
441static int pci_ioremap_mem_type = MT_DEVICE; 454static int pci_ioremap_mem_type = MT_DEVICE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9aa896f..870838a46d52 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void)
1072 int highmem = 0; 1072 int highmem = 0;
1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1074 struct memblock_region *reg; 1074 struct memblock_region *reg;
1075 bool should_use_highmem = false;
1075 1076
1076 for_each_memblock(memory, reg) { 1077 for_each_memblock(memory, reg) {
1077 phys_addr_t block_start = reg->base; 1078 phys_addr_t block_start = reg->base;
@@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void)
1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1091 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1091 &block_start, &block_end); 1092 &block_start, &block_end);
1092 memblock_remove(reg->base, reg->size); 1093 memblock_remove(reg->base, reg->size);
1094 should_use_highmem = true;
1093 continue; 1095 continue;
1094 } 1096 }
1095 1097
@@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void)
1100 &block_start, &block_end, &vmalloc_limit); 1102 &block_start, &block_end, &vmalloc_limit);
1101 memblock_remove(vmalloc_limit, overlap_size); 1103 memblock_remove(vmalloc_limit, overlap_size);
1102 block_end = vmalloc_limit; 1104 block_end = vmalloc_limit;
1105 should_use_highmem = true;
1103 } 1106 }
1104 } 1107 }
1105 1108
@@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void)
1134 } 1137 }
1135 } 1138 }
1136 1139
1140 if (should_use_highmem)
1141 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1142
1137 high_memory = __va(arm_lowmem_limit - 1) + 1; 1143 high_memory = __va(arm_lowmem_limit - 1) + 1;
1138 1144
1139 /* 1145 /*
@@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc)
1494 build_mem_type_table(); 1500 build_mem_type_table();
1495 prepare_page_table(); 1501 prepare_page_table();
1496 map_lowmem(); 1502 map_lowmem();
1503 memblock_set_current_limit(arm_lowmem_limit);
1497 dma_contiguous_remap(); 1504 dma_contiguous_remap();
1498 devicemaps_init(mdesc); 1505 devicemaps_init(mdesc);
1499 kmap_init(); 1506 kmap_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index afd7e05d95f1..1dd10936d68d 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
351} 351}
352EXPORT_SYMBOL(__arm_ioremap_pfn); 352EXPORT_SYMBOL(__arm_ioremap_pfn);
353 353
354void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, 354void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
355 size_t size, unsigned int mtype, void *caller) 355 unsigned int mtype, void *caller)
356{ 356{
357 return __arm_ioremap_pfn(pfn, offset, size, mtype); 357 return (void __iomem *)phys_addr;
358} 358}
359 359
360void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, 360void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
361 unsigned int mtype) 361
362void __iomem *ioremap(resource_size_t res_cookie, size_t size)
362{ 363{
363 return (void __iomem *)phys_addr; 364 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
365 __builtin_return_address(0));
364} 366}
365EXPORT_SYMBOL(__arm_ioremap); 367EXPORT_SYMBOL(ioremap);
366 368
367void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); 369void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
370{
371 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
372 __builtin_return_address(0));
373}
374EXPORT_SYMBOL(ioremap_cache);
368 375
369void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, 376void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
370 unsigned int mtype, void *caller) 377{
378 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
379 __builtin_return_address(0));
380}
381EXPORT_SYMBOL(ioremap_wc);
382
383void __iounmap(volatile void __iomem *addr)
371{ 384{
372 return __arm_ioremap(phys_addr, size, mtype);
373} 385}
386EXPORT_SYMBOL(__iounmap);
374 387
375void (*arch_iounmap)(volatile void __iomem *); 388void (*arch_iounmap)(volatile void __iomem *);
376 389
377void __arm_iounmap(volatile void __iomem *addr) 390void iounmap(volatile void __iomem *addr)
378{ 391{
379} 392}
380EXPORT_SYMBOL(__arm_iounmap); 393EXPORT_SYMBOL(iounmap);
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index 9005b07296c8..aedec81d1198 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,13 +45,11 @@
45 * it does. 45 * it does.
46 */ 46 */
47 47
48#define _GNU_SOURCE
49
50#include <byteswap.h> 48#include <byteswap.h>
51#include <elf.h> 49#include <elf.h>
52#include <errno.h> 50#include <errno.h>
53#include <error.h>
54#include <fcntl.h> 51#include <fcntl.h>
52#include <stdarg.h>
55#include <stdbool.h> 53#include <stdbool.h>
56#include <stdio.h> 54#include <stdio.h>
57#include <stdlib.h> 55#include <stdlib.h>
@@ -82,11 +80,25 @@
82#define EF_ARM_ABI_FLOAT_HARD 0x400 80#define EF_ARM_ABI_FLOAT_HARD 0x400
83#endif 81#endif
84 82
83static int failed;
84static const char *argv0;
85static const char *outfile; 85static const char *outfile;
86 86
87static void fail(const char *fmt, ...)
88{
89 va_list ap;
90
91 failed = 1;
92 fprintf(stderr, "%s: ", argv0);
93 va_start(ap, fmt);
94 vfprintf(stderr, fmt, ap);
95 va_end(ap);
96 exit(EXIT_FAILURE);
97}
98
87static void cleanup(void) 99static void cleanup(void)
88{ 100{
89 if (error_message_count > 0 && outfile != NULL) 101 if (failed && outfile != NULL)
90 unlink(outfile); 102 unlink(outfile);
91} 103}
92 104
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
119 int infd; 131 int infd;
120 132
121 atexit(cleanup); 133 atexit(cleanup);
134 argv0 = argv[0];
122 135
123 if (argc != 3) 136 if (argc != 3)
124 error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]); 137 fail("Usage: %s [infile] [outfile]\n", argv[0]);
125 138
126 infile = argv[1]; 139 infile = argv[1];
127 outfile = argv[2]; 140 outfile = argv[2];
128 141
129 infd = open(infile, O_RDONLY); 142 infd = open(infile, O_RDONLY);
130 if (infd < 0) 143 if (infd < 0)
131 error(EXIT_FAILURE, errno, "Cannot open %s", infile); 144 fail("Cannot open %s: %s\n", infile, strerror(errno));
132 145
133 if (fstat(infd, &stat) != 0) 146 if (fstat(infd, &stat) != 0)
134 error(EXIT_FAILURE, errno, "Failed stat for %s", infile); 147 fail("Failed stat for %s: %s\n", infile, strerror(errno));
135 148
136 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); 149 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
137 if (inbuf == MAP_FAILED) 150 if (inbuf == MAP_FAILED)
138 error(EXIT_FAILURE, errno, "Failed to map %s", infile); 151 fail("Failed to map %s: %s\n", infile, strerror(errno));
139 152
140 close(infd); 153 close(infd);
141 154
142 inhdr = inbuf; 155 inhdr = inbuf;
143 156
144 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) 157 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
145 error(EXIT_FAILURE, 0, "Not an ELF file"); 158 fail("Not an ELF file\n");
146 159
147 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) 160 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
148 error(EXIT_FAILURE, 0, "Unsupported ELF class"); 161 fail("Unsupported ELF class\n");
149 162
150 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; 163 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
151 164
152 if (read_elf_half(inhdr->e_type, swap) != ET_DYN) 165 if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
153 error(EXIT_FAILURE, 0, "Not a shared object"); 166 fail("Not a shared object\n");
154 167
155 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) { 168 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
156 error(EXIT_FAILURE, 0, "Unsupported architecture %#x", 169 fail("Unsupported architecture %#x\n", inhdr->e_machine);
157 inhdr->e_machine);
158 }
159 170
160 e_flags = read_elf_word(inhdr->e_flags, swap); 171 e_flags = read_elf_word(inhdr->e_flags, swap);
161 172
162 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { 173 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
163 error(EXIT_FAILURE, 0, "Unsupported EABI version %#x", 174 fail("Unsupported EABI version %#x\n",
164 EF_ARM_EABI_VERSION(e_flags)); 175 EF_ARM_EABI_VERSION(e_flags));
165 } 176 }
166 177
167 if (e_flags & EF_ARM_ABI_FLOAT_HARD) 178 if (e_flags & EF_ARM_ABI_FLOAT_HARD)
168 error(EXIT_FAILURE, 0, 179 fail("Unexpected hard-float flag set in e_flags\n");
169 "Unexpected hard-float flag set in e_flags");
170 180
171 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); 181 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
172 182
173 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); 183 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
174 if (outfd < 0) 184 if (outfd < 0)
175 error(EXIT_FAILURE, errno, "Cannot open %s", outfile); 185 fail("Cannot open %s: %s\n", outfile, strerror(errno));
176 186
177 if (ftruncate(outfd, stat.st_size) != 0) 187 if (ftruncate(outfd, stat.st_size) != 0)
178 error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile); 188 fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
179 189
180 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, 190 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
181 outfd, 0); 191 outfd, 0);
182 if (outbuf == MAP_FAILED) 192 if (outbuf == MAP_FAILED)
183 error(EXIT_FAILURE, errno, "Failed to map %s", outfile); 193 fail("Failed to map %s: %s\n", outfile, strerror(errno));
184 194
185 close(outfd); 195 close(outfd);
186 196
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
195 } 205 }
196 206
197 if (msync(outbuf, stat.st_size, MS_SYNC) != 0) 207 if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
198 error(EXIT_FAILURE, errno, "Failed to sync %s", outfile); 208 fail("Failed to sync %s: %s\n", outfile, strerror(errno));
199 209
200 return EXIT_SUCCESS; 210 return EXIT_SUCCESS;
201} 211}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0f6edb14b7e4..318175f62c24 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -23,9 +23,9 @@ config ARM64
23 select BUILDTIME_EXTABLE_SORT 23 select BUILDTIME_EXTABLE_SORT
24 select CLONE_BACKWARDS 24 select CLONE_BACKWARDS
25 select COMMON_CLK 25 select COMMON_CLK
26 select EDAC_SUPPORT
27 select CPU_PM if (SUSPEND || CPU_IDLE) 26 select CPU_PM if (SUSPEND || CPU_IDLE)
28 select DCACHE_WORD_ACCESS 27 select DCACHE_WORD_ACCESS
28 select EDAC_SUPPORT
29 select GENERIC_ALLOCATOR 29 select GENERIC_ALLOCATOR
30 select GENERIC_CLOCKEVENTS 30 select GENERIC_CLOCKEVENTS
31 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 31 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 83578e766b94..4c55833d8a41 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -23,6 +23,16 @@
23 device_type = "memory"; 23 device_type = "memory";
24 reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ 24 reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
25 }; 25 };
26
27 gpio-keys {
28 compatible = "gpio-keys";
29 button@1 {
30 label = "POWER";
31 linux,code = <116>;
32 linux,input-type = <0x1>;
33 interrupts = <0x0 0x2d 0x1>;
34 };
35 };
26}; 36};
27 37
28&pcie0clk { 38&pcie0clk {
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index c5c98b91514e..bb3c07209676 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -1,6 +1,7 @@
1dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb 1dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb
2dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb 2dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb
3dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb 3dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
4dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
4 5
5always := $(dtb-y) 6always := $(dtb-y)
6subdir-y := $(dts-dirs) 7subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
new file mode 100644
index 000000000000..5b1d0181023b
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -0,0 +1,191 @@
1/*
2 * ARM Ltd. Versatile Express
3 *
4 * LogicTile Express 20MG
5 * V2F-1XV7
6 *
7 * Cortex-A53 (2 cores) Soft Macrocell Model
8 *
9 * HBI-0247C
10 */
11
12/dts-v1/;
13
14#include <dt-bindings/interrupt-controller/arm-gic.h>
15
16/ {
17 model = "V2F-1XV7 Cortex-A53x2 SMM";
18 arm,hbi = <0x247>;
19 arm,vexpress,site = <0xf>;
20 compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress";
21 interrupt-parent = <&gic>;
22 #address-cells = <2>;
23 #size-cells = <2>;
24
25 chosen {
26 stdout-path = "serial0:38400n8";
27 };
28
29 aliases {
30 serial0 = &v2m_serial0;
31 serial1 = &v2m_serial1;
32 serial2 = &v2m_serial2;
33 serial3 = &v2m_serial3;
34 i2c0 = &v2m_i2c_dvi;
35 i2c1 = &v2m_i2c_pcie;
36 };
37
38 cpus {
39 #address-cells = <2>;
40 #size-cells = <0>;
41
42 cpu@0 {
43 device_type = "cpu";
44 compatible = "arm,cortex-a53", "arm,armv8";
45 reg = <0 0>;
46 next-level-cache = <&L2_0>;
47 };
48
49 cpu@1 {
50 device_type = "cpu";
51 compatible = "arm,cortex-a53", "arm,armv8";
52 reg = <0 1>;
53 next-level-cache = <&L2_0>;
54 };
55
56 L2_0: l2-cache0 {
57 compatible = "cache";
58 };
59 };
60
61 memory@80000000 {
62 device_type = "memory";
63 reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */
64 };
65
66 gic: interrupt-controller@2c001000 {
67 compatible = "arm,gic-400";
68 #interrupt-cells = <3>;
69 #address-cells = <0>;
70 interrupt-controller;
71 reg = <0 0x2c001000 0 0x1000>,
72 <0 0x2c002000 0 0x2000>,
73 <0 0x2c004000 0 0x2000>,
74 <0 0x2c006000 0 0x2000>;
75 interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
76 };
77
78 timer {
79 compatible = "arm,armv8-timer";
80 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
81 <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
82 <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
83 <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
84 };
85
86 pmu {
87 compatible = "arm,armv8-pmuv3";
88 interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
89 <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
90 };
91
92 dcc {
93 compatible = "arm,vexpress,config-bus";
94 arm,vexpress,config-bridge = <&v2m_sysreg>;
95
96 smbclk: osc@4 {
97 /* SMC clock */
98 compatible = "arm,vexpress-osc";
99 arm,vexpress-sysreg,func = <1 4>;
100 freq-range = <40000000 40000000>;
101 #clock-cells = <0>;
102 clock-output-names = "smclk";
103 };
104
105 volt@0 {
106 /* VIO to expansion board above */
107 compatible = "arm,vexpress-volt";
108 arm,vexpress-sysreg,func = <2 0>;
109 regulator-name = "VIO_UP";
110 regulator-min-microvolt = <800000>;
111 regulator-max-microvolt = <1800000>;
112 regulator-always-on;
113 };
114
115 volt@1 {
116 /* 12V from power connector J6 */
117 compatible = "arm,vexpress-volt";
118 arm,vexpress-sysreg,func = <2 1>;
119 regulator-name = "12";
120 regulator-always-on;
121 };
122
123 temp@0 {
124 /* FPGA temperature */
125 compatible = "arm,vexpress-temp";
126 arm,vexpress-sysreg,func = <4 0>;
127 label = "FPGA";
128 };
129 };
130
131 smb {
132 compatible = "simple-bus";
133
134 #address-cells = <2>;
135 #size-cells = <1>;
136 ranges = <0 0 0 0x08000000 0x04000000>,
137 <1 0 0 0x14000000 0x04000000>,
138 <2 0 0 0x18000000 0x04000000>,
139 <3 0 0 0x1c000000 0x04000000>,
140 <4 0 0 0x0c000000 0x04000000>,
141 <5 0 0 0x10000000 0x04000000>;
142
143 #interrupt-cells = <1>;
144 interrupt-map-mask = <0 0 63>;
145 interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
146 <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
147 <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
148 <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
149 <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
150 <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
151 <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
152 <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
153 <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
154 <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
155 <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
156 <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
157 <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
158 <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
159 <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
160 <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
161 <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
162 <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
163 <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
164 <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
165 <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
166 <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
167 <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
168 <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
169 <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
170 <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
171 <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
172 <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
173 <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
174 <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
175 <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
176 <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
177 <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
178 <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
179 <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
180 <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
181 <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
182 <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
183 <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
184 <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
185 <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
186 <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
187 <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
188
189 /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi"
190 };
191};
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index d8c0bdc51882..9cb7cf94284a 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -376,10 +376,19 @@
376 gic0: interrupt-controller@8010,00000000 { 376 gic0: interrupt-controller@8010,00000000 {
377 compatible = "arm,gic-v3"; 377 compatible = "arm,gic-v3";
378 #interrupt-cells = <3>; 378 #interrupt-cells = <3>;
379 #address-cells = <2>;
380 #size-cells = <2>;
381 ranges;
379 interrupt-controller; 382 interrupt-controller;
380 reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */ 383 reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */
381 <0x8010 0x80000000 0x0 0x600000>; /* GICR */ 384 <0x8010 0x80000000 0x0 0x600000>; /* GICR */
382 interrupts = <1 9 0xf04>; 385 interrupts = <1 9 0xf04>;
386
387 its: gic-its@8010,00020000 {
388 compatible = "arm,gic-v3-its";
389 msi-controller;
390 reg = <0x8010 0x20000 0x0 0x200000>;
391 };
383 }; 392 };
384 393
385 uaa0: serial@87e0,24000000 { 394 uaa0: serial@87e0,24000000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f38c94f1d898..4e17e7ede33d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -83,6 +83,7 @@ CONFIG_BLK_DEV_SD=y
83CONFIG_ATA=y 83CONFIG_ATA=y
84CONFIG_SATA_AHCI=y 84CONFIG_SATA_AHCI=y
85CONFIG_SATA_AHCI_PLATFORM=y 85CONFIG_SATA_AHCI_PLATFORM=y
86CONFIG_AHCI_CEVA=y
86CONFIG_AHCI_XGENE=y 87CONFIG_AHCI_XGENE=y
87CONFIG_PATA_PLATFORM=y 88CONFIG_PATA_PLATFORM=y
88CONFIG_PATA_OF_PLATFORM=y 89CONFIG_PATA_OF_PLATFORM=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 39248d3adf5d..406485ed110a 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -19,6 +19,14 @@
19#include <asm/psci.h> 19#include <asm/psci.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22/* Macros for consistency checks of the GICC subtable of MADT */
23#define ACPI_MADT_GICC_LENGTH \
24 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
25
26#define BAD_MADT_GICC_ENTRY(entry, end) \
27 (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
28 (entry)->header.length != ACPI_MADT_GICC_LENGTH)
29
22/* Basic configuration for ACPI */ 30/* Basic configuration for ACPI */
23#ifdef CONFIG_ACPI 31#ifdef CONFIG_ACPI
24/* ACPI table mapping after acpi_gbl_permanent_mmap is set */ 32/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a7691a378668..f860bfda454a 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -352,8 +352,8 @@ el1_inv:
352 // TODO: add support for undefined instructions in kernel mode 352 // TODO: add support for undefined instructions in kernel mode
353 enable_dbg 353 enable_dbg
354 mov x0, sp 354 mov x0, sp
355 mov x2, x1
355 mov x1, #BAD_SYNC 356 mov x1, #BAD_SYNC
356 mrs x2, esr_el1
357 b bad_mode 357 b bad_mode
358ENDPROC(el1_sync) 358ENDPROC(el1_sync)
359 359
@@ -553,7 +553,7 @@ el0_inv:
553 ct_user_exit 553 ct_user_exit
554 mov x0, sp 554 mov x0, sp
555 mov x1, #BAD_SYNC 555 mov x1, #BAD_SYNC
556 mrs x2, esr_el1 556 mov x2, x25
557 bl bad_mode 557 bl bad_mode
558 b ret_to_user 558 b ret_to_user
559ENDPROC(el0_sync) 559ENDPROC(el0_sync)
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
index bd9bfaa9269b..f332d5d1f6b4 100644
--- a/arch/arm64/kernel/entry32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -32,13 +32,11 @@
32 32
33ENTRY(compat_sys_sigreturn_wrapper) 33ENTRY(compat_sys_sigreturn_wrapper)
34 mov x0, sp 34 mov x0, sp
35 mov x27, #0 // prevent syscall restart handling (why)
36 b compat_sys_sigreturn 35 b compat_sys_sigreturn
37ENDPROC(compat_sys_sigreturn_wrapper) 36ENDPROC(compat_sys_sigreturn_wrapper)
38 37
39ENTRY(compat_sys_rt_sigreturn_wrapper) 38ENTRY(compat_sys_rt_sigreturn_wrapper)
40 mov x0, sp 39 mov x0, sp
41 mov x27, #0 // prevent syscall restart handling (why)
42 b compat_sys_rt_sigreturn 40 b compat_sys_rt_sigreturn
43ENDPROC(compat_sys_rt_sigreturn_wrapper) 41ENDPROC(compat_sys_rt_sigreturn_wrapper)
44 42
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 695801a54ca5..50fb4696654e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -438,7 +438,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
438 struct acpi_madt_generic_interrupt *processor; 438 struct acpi_madt_generic_interrupt *processor;
439 439
440 processor = (struct acpi_madt_generic_interrupt *)header; 440 processor = (struct acpi_madt_generic_interrupt *)header;
441 if (BAD_MADT_ENTRY(processor, end)) 441 if (BAD_MADT_GICC_ENTRY(processor, end))
442 return -EINVAL; 442 return -EINVAL;
443 443
444 acpi_table_print_madt_entry(header); 444 acpi_table_print_madt_entry(header);
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 9d84feb41a16..773d37a14039 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,5 +4,3 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
4 context.o proc.o pageattr.o 4 context.o proc.o pageattr.o
5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
6obj-$(CONFIG_ARM64_PTDUMP) += dump.o 6obj-$(CONFIG_ARM64_PTDUMP) += dump.o
7
8CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 4dda9bd6b8fb..e989cee77414 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port)
1464 if (port->write_ts_idx == NBR_IN_DESCR) 1464 if (port->write_ts_idx == NBR_IN_DESCR)
1465 port->write_ts_idx = 0; 1465 port->write_ts_idx = 0;
1466 idx = port->write_ts_idx++; 1466 idx = port->write_ts_idx++;
1467 do_posix_clock_monotonic_gettime(&port->timestamp[idx]); 1467 ktime_get_ts(&port->timestamp[idx]);
1468 port->in_buffer_len += port->inbufchunk; 1468 port->in_buffer_len += port->inbufchunk;
1469 } 1469 }
1470 spin_unlock_irqrestore(&port->lock, flags); 1470 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2a14585c90d2..aab7e46cadd5 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2231,7 +2231,7 @@ config MIPS_CMP
2231 2231
2232config MIPS_CPS 2232config MIPS_CPS
2233 bool "MIPS Coherent Processing System support" 2233 bool "MIPS Coherent Processing System support"
2234 depends on SYS_SUPPORTS_MIPS_CPS && !64BIT 2234 depends on SYS_SUPPORTS_MIPS_CPS
2235 select MIPS_CM 2235 select MIPS_CM
2236 select MIPS_CPC 2236 select MIPS_CPC
2237 select MIPS_CPS_PM if HOTPLUG_CPU 2237 select MIPS_CPS_PM if HOTPLUG_CPU
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 37c08a27b4f0..c9f7e231e66b 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & 2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
3 * Insititute of Computing Technology 3 * Institute of Computing Technology
4 * Author: Xiang Gao, gaoxiang@ict.ac.cn 4 * Author: Xiang Gao, gaoxiang@ict.ac.cn
5 * Huacai Chen, chenhc@lemote.com 5 * Huacai Chen, chenhc@lemote.com
6 * Xiaofu Meng, Shuangshuang Zhang 6 * Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 2b25d1ba1ea0..16f1ea9ab191 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,6 +23,7 @@
23extern int smp_num_siblings; 23extern int smp_num_siblings;
24extern cpumask_t cpu_sibling_map[]; 24extern cpumask_t cpu_sibling_map[];
25extern cpumask_t cpu_core_map[]; 25extern cpumask_t cpu_core_map[];
26extern cpumask_t cpu_foreign_map;
26 27
27#define raw_smp_processor_id() (current_thread_info()->cpu) 28#define raw_smp_processor_id() (current_thread_info()->cpu)
28 29
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c0c5e5972256..d8f9b357b222 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
600 break; 600 break;
601 601
602 case blezl_op: /* not really i_format */ 602 case blezl_op: /* not really i_format */
603 if (NO_R6EMU) 603 if (!insn.i_format.rt && NO_R6EMU)
604 goto sigill_r6; 604 goto sigill_r6;
605 case blez_op: 605 case blez_op:
606 /* 606 /*
@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
635 break; 635 break;
636 636
637 case bgtzl_op: 637 case bgtzl_op:
638 if (NO_R6EMU) 638 if (!insn.i_format.rt && NO_R6EMU)
639 goto sigill_r6; 639 goto sigill_r6;
640 case bgtz_op: 640 case bgtz_op:
641 /* 641 /*
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 55b759a0019e..1b6ca634e646 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
60 nop 60 nop
61 61
62 /* This is an NMI */ 62 /* This is an NMI */
63 la k0, nmi_handler 63 PTR_LA k0, nmi_handler
64 jr k0 64 jr k0
65 nop 65 nop
66 66
@@ -107,10 +107,10 @@ not_nmi:
107 mul t1, t1, t0 107 mul t1, t1, t0
108 mul t1, t1, t2 108 mul t1, t1, t2
109 109
110 li a0, KSEG0 110 li a0, CKSEG0
111 add a1, a0, t1 111 PTR_ADD a1, a0, t1
1121: cache Index_Store_Tag_I, 0(a0) 1121: cache Index_Store_Tag_I, 0(a0)
113 add a0, a0, t0 113 PTR_ADD a0, a0, t0
114 bne a0, a1, 1b 114 bne a0, a1, 1b
115 nop 115 nop
116icache_done: 116icache_done:
@@ -134,12 +134,12 @@ icache_done:
134 mul t1, t1, t0 134 mul t1, t1, t0
135 mul t1, t1, t2 135 mul t1, t1, t2
136 136
137 li a0, KSEG0 137 li a0, CKSEG0
138 addu a1, a0, t1 138 PTR_ADDU a1, a0, t1
139 subu a1, a1, t0 139 PTR_SUBU a1, a1, t0
1401: cache Index_Store_Tag_D, 0(a0) 1401: cache Index_Store_Tag_D, 0(a0)
141 bne a0, a1, 1b 141 bne a0, a1, 1b
142 add a0, a0, t0 142 PTR_ADD a0, a0, t0
143dcache_done: 143dcache_done:
144 144
145 /* Set Kseg0 CCA to that in s0 */ 145 /* Set Kseg0 CCA to that in s0 */
@@ -152,11 +152,11 @@ dcache_done:
152 152
153 /* Enter the coherent domain */ 153 /* Enter the coherent domain */
154 li t0, 0xff 154 li t0, 0xff
155 sw t0, GCR_CL_COHERENCE_OFS(v1) 155 PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
156 ehb 156 ehb
157 157
158 /* Jump to kseg0 */ 158 /* Jump to kseg0 */
159 la t0, 1f 159 PTR_LA t0, 1f
160 jr t0 160 jr t0
161 nop 161 nop
162 162
@@ -178,9 +178,9 @@ dcache_done:
178 nop 178 nop
179 179
180 /* Off we go! */ 180 /* Off we go! */
181 lw t1, VPEBOOTCFG_PC(v0) 181 PTR_L t1, VPEBOOTCFG_PC(v0)
182 lw gp, VPEBOOTCFG_GP(v0) 182 PTR_L gp, VPEBOOTCFG_GP(v0)
183 lw sp, VPEBOOTCFG_SP(v0) 183 PTR_L sp, VPEBOOTCFG_SP(v0)
184 jr t1 184 jr t1
185 nop 185 nop
186 END(mips_cps_core_entry) 186 END(mips_cps_core_entry)
@@ -217,7 +217,7 @@ LEAF(excep_intex)
217 217
218.org 0x480 218.org 0x480
219LEAF(excep_ejtag) 219LEAF(excep_ejtag)
220 la k0, ejtag_debug_handler 220 PTR_LA k0, ejtag_debug_handler
221 jr k0 221 jr k0
222 nop 222 nop
223 END(excep_ejtag) 223 END(excep_ejtag)
@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
229 nop 229 nop
230 230
231 .set push 231 .set push
232 .set mips32r2 232 .set mips64r2
233 .set mt 233 .set mt
234 234
235 /* Only allow 1 TC per VPE to execute... */ 235 /* Only allow 1 TC per VPE to execute... */
@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
237 237
238 /* ...and for the moment only 1 VPE */ 238 /* ...and for the moment only 1 VPE */
239 dvpe 239 dvpe
240 la t1, 1f 240 PTR_LA t1, 1f
241 jr.hb t1 241 jr.hb t1
242 nop 242 nop
243 243
@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
250 mfc0 t0, CP0_MVPCONF0 250 mfc0 t0, CP0_MVPCONF0
251 srl t0, t0, MVPCONF0_PVPE_SHIFT 251 srl t0, t0, MVPCONF0_PVPE_SHIFT
252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
253 addiu t7, t0, 1 253 addiu ta3, t0, 1
254 254
255 /* If there's only 1, we're done */ 255 /* If there's only 1, we're done */
256 beqz t0, 2f 256 beqz t0, 2f
257 nop 257 nop
258 258
259 /* Loop through each VPE within this core */ 259 /* Loop through each VPE within this core */
260 li t5, 1 260 li ta1, 1
261 261
2621: /* Operate on the appropriate TC */ 2621: /* Operate on the appropriate TC */
263 mtc0 t5, CP0_VPECONTROL 263 mtc0 ta1, CP0_VPECONTROL
264 ehb 264 ehb
265 265
266 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 266 /* Bind TC to VPE (1:1 TC:VPE mapping) */
267 mttc0 t5, CP0_TCBIND 267 mttc0 ta1, CP0_TCBIND
268 268
269 /* Set exclusive TC, non-active, master */ 269 /* Set exclusive TC, non-active, master */
270 li t0, VPECONF0_MVP 270 li t0, VPECONF0_MVP
271 sll t1, t5, VPECONF0_XTC_SHIFT 271 sll t1, ta1, VPECONF0_XTC_SHIFT
272 or t0, t0, t1 272 or t0, t0, t1
273 mttc0 t0, CP0_VPECONF0 273 mttc0 t0, CP0_VPECONF0
274 274
@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
280 mttc0 t0, CP0_TCHALT 280 mttc0 t0, CP0_TCHALT
281 281
282 /* Next VPE */ 282 /* Next VPE */
283 addiu t5, t5, 1 283 addiu ta1, ta1, 1
284 slt t0, t5, t7 284 slt t0, ta1, ta3
285 bnez t0, 1b 285 bnez t0, 1b
286 nop 286 nop
287 287
@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init)
298 298
299LEAF(mips_cps_boot_vpes) 299LEAF(mips_cps_boot_vpes)
300 /* Retrieve CM base address */ 300 /* Retrieve CM base address */
301 la t0, mips_cm_base 301 PTR_LA t0, mips_cm_base
302 lw t0, 0(t0) 302 PTR_L t0, 0(t0)
303 303
304 /* Calculate a pointer to this cores struct core_boot_config */ 304 /* Calculate a pointer to this cores struct core_boot_config */
305 lw t0, GCR_CL_ID_OFS(t0) 305 PTR_L t0, GCR_CL_ID_OFS(t0)
306 li t1, COREBOOTCFG_SIZE 306 li t1, COREBOOTCFG_SIZE
307 mul t0, t0, t1 307 mul t0, t0, t1
308 la t1, mips_cps_core_bootcfg 308 PTR_LA t1, mips_cps_core_bootcfg
309 lw t1, 0(t1) 309 PTR_L t1, 0(t1)
310 addu t0, t0, t1 310 PTR_ADDU t0, t0, t1
311 311
312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
313 has_mt t6, 1f 313 has_mt ta2, 1f
314 li t9, 0 314 li t9, 0
315 315
316 /* Find the number of VPEs present in the core */ 316 /* Find the number of VPEs present in the core */
@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes)
3341: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 3341: /* Calculate a pointer to this VPEs struct vpe_boot_config */
335 li t1, VPEBOOTCFG_SIZE 335 li t1, VPEBOOTCFG_SIZE
336 mul v0, t9, t1 336 mul v0, t9, t1
337 lw t7, COREBOOTCFG_VPECONFIG(t0) 337 PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
338 addu v0, v0, t7 338 PTR_ADDU v0, v0, ta3
339 339
340#ifdef CONFIG_MIPS_MT 340#ifdef CONFIG_MIPS_MT
341 341
342 /* If the core doesn't support MT then return */ 342 /* If the core doesn't support MT then return */
343 bnez t6, 1f 343 bnez ta2, 1f
344 nop 344 nop
345 jr ra 345 jr ra
346 nop 346 nop
347 347
348 .set push 348 .set push
349 .set mips32r2 349 .set mips64r2
350 .set mt 350 .set mt
351 351
3521: /* Enter VPE configuration state */ 3521: /* Enter VPE configuration state */
353 dvpe 353 dvpe
354 la t1, 1f 354 PTR_LA t1, 1f
355 jr.hb t1 355 jr.hb t1
356 nop 356 nop
3571: mfc0 t1, CP0_MVPCONTROL 3571: mfc0 t1, CP0_MVPCONTROL
@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
360 ehb 360 ehb
361 361
362 /* Loop through each VPE */ 362 /* Loop through each VPE */
363 lw t6, COREBOOTCFG_VPEMASK(t0) 363 PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
364 move t8, t6 364 move t8, ta2
365 li t5, 0 365 li ta1, 0
366 366
367 /* Check whether the VPE should be running. If not, skip it */ 367 /* Check whether the VPE should be running. If not, skip it */
3681: andi t0, t6, 1 3681: andi t0, ta2, 1
369 beqz t0, 2f 369 beqz t0, 2f
370 nop 370 nop
371 371
@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
373 mfc0 t0, CP0_VPECONTROL 373 mfc0 t0, CP0_VPECONTROL
374 ori t0, t0, VPECONTROL_TARGTC 374 ori t0, t0, VPECONTROL_TARGTC
375 xori t0, t0, VPECONTROL_TARGTC 375 xori t0, t0, VPECONTROL_TARGTC
376 or t0, t0, t5 376 or t0, t0, ta1
377 mtc0 t0, CP0_VPECONTROL 377 mtc0 t0, CP0_VPECONTROL
378 ehb 378 ehb
379 379
@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
384 384
385 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 385 /* Calculate a pointer to the VPEs struct vpe_boot_config */
386 li t0, VPEBOOTCFG_SIZE 386 li t0, VPEBOOTCFG_SIZE
387 mul t0, t0, t5 387 mul t0, t0, ta1
388 addu t0, t0, t7 388 addu t0, t0, ta3
389 389
390 /* Set the TC restart PC */ 390 /* Set the TC restart PC */
391 lw t1, VPEBOOTCFG_PC(t0) 391 lw t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
423 mttc0 t0, CP0_VPECONF0 423 mttc0 t0, CP0_VPECONF0
424 424
425 /* Next VPE */ 425 /* Next VPE */
4262: srl t6, t6, 1 4262: srl ta2, ta2, 1
427 addiu t5, t5, 1 427 addiu ta1, ta1, 1
428 bnez t6, 1b 428 bnez ta2, 1b
429 nop 429 nop
430 430
431 /* Leave VPE configuration state */ 431 /* Leave VPE configuration state */
@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
445 /* This VPE should be offline, halt the TC */ 445 /* This VPE should be offline, halt the TC */
446 li t0, TCHALT_H 446 li t0, TCHALT_H
447 mtc0 t0, CP0_TCHALT 447 mtc0 t0, CP0_TCHALT
448 la t0, 1f 448 PTR_LA t0, 1f
4491: jr.hb t0 4491: jr.hb t0
450 nop 450 nop
451 451
@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
466 .set noat 466 .set noat
467 lw $1, TI_CPU(gp) 467 lw $1, TI_CPU(gp)
468 sll $1, $1, LONGLOG 468 sll $1, $1, LONGLOG
469 la \dest, __per_cpu_offset 469 PTR_LA \dest, __per_cpu_offset
470 addu $1, $1, \dest 470 addu $1, $1, \dest
471 lw $1, 0($1) 471 lw $1, 0($1)
472 la \dest, cps_cpu_state 472 PTR_LA \dest, cps_cpu_state
473 addu \dest, \dest, $1 473 addu \dest, \dest, $1
474 .set pop 474 .set pop
475 .endm 475 .endm
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 6e8de80bb446..4cc13508d967 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp)
73 .set noreorder 73 .set noreorder
74 .set nomacro 74 .set nomacro
75 75
761: user_lw(t5, 16(t0)) # argument #5 from usp 76load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
774: user_lw(t6, 20(t0)) # argument #6 from usp 77load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
783: user_lw(t7, 24(t0)) # argument #7 from usp 78load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
792: user_lw(t8, 28(t0)) # argument #8 from usp 79load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
80loads_done:
80 81
81 sw t5, 16(sp) # argument #5 to ksp 82 sw t5, 16(sp) # argument #5 to ksp
82 sw t6, 20(sp) # argument #6 to ksp 83 sw t6, 20(sp) # argument #6 to ksp
@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp)
85 .set pop 86 .set pop
86 87
87 .section __ex_table,"a" 88 .section __ex_table,"a"
88 PTR 1b,bad_stack 89 PTR load_a4, bad_stack_a4
89 PTR 2b,bad_stack 90 PTR load_a5, bad_stack_a5
90 PTR 3b,bad_stack 91 PTR load_a6, bad_stack_a6
91 PTR 4b,bad_stack 92 PTR load_a7, bad_stack_a7
92 .previous 93 .previous
93 94
94 lw t0, TI_FLAGS($28) # syscall tracing enabled? 95 lw t0, TI_FLAGS($28) # syscall tracing enabled?
@@ -153,8 +154,8 @@ syscall_trace_entry:
153/* ------------------------------------------------------------------------ */ 154/* ------------------------------------------------------------------------ */
154 155
155 /* 156 /*
156 * The stackpointer for a call with more than 4 arguments is bad. 157 * Our open-coded access area sanity test for the stack pointer
157 * We probably should handle this case a bit more drastic. 158 * failed. We probably should handle this case a bit more drastic.
158 */ 159 */
159bad_stack: 160bad_stack:
160 li v0, EFAULT 161 li v0, EFAULT
@@ -163,6 +164,22 @@ bad_stack:
163 sw t0, PT_R7(sp) 164 sw t0, PT_R7(sp)
164 j o32_syscall_exit 165 j o32_syscall_exit
165 166
167bad_stack_a4:
168 li t5, 0
169 b load_a5
170
171bad_stack_a5:
172 li t6, 0
173 b load_a6
174
175bad_stack_a6:
176 li t7, 0
177 b load_a7
178
179bad_stack_a7:
180 li t8, 0
181 b loads_done
182
166 /* 183 /*
167 * The system call does not exist in this kernel 184 * The system call does not exist in this kernel
168 */ 185 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d07b210fbeff..f543ff4feef9 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
69 daddu t1, t0, 32 69 daddu t1, t0, 32
70 bltz t1, bad_stack 70 bltz t1, bad_stack
71 71
721: lw a4, 16(t0) # argument #5 from usp 72load_a4: lw a4, 16(t0) # argument #5 from usp
732: lw a5, 20(t0) # argument #6 from usp 73load_a5: lw a5, 20(t0) # argument #6 from usp
743: lw a6, 24(t0) # argument #7 from usp 74load_a6: lw a6, 24(t0) # argument #7 from usp
754: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls) 75load_a7: lw a7, 28(t0) # argument #8 from usp
76loads_done:
76 77
77 .section __ex_table,"a" 78 .section __ex_table,"a"
78 PTR 1b, bad_stack 79 PTR load_a4, bad_stack_a4
79 PTR 2b, bad_stack 80 PTR load_a5, bad_stack_a5
80 PTR 3b, bad_stack 81 PTR load_a6, bad_stack_a6
81 PTR 4b, bad_stack 82 PTR load_a7, bad_stack_a7
82 .previous 83 .previous
83 84
84 li t1, _TIF_WORK_SYSCALL_ENTRY 85 li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -167,6 +168,22 @@ bad_stack:
167 sd t0, PT_R7(sp) 168 sd t0, PT_R7(sp)
168 j o32_syscall_exit 169 j o32_syscall_exit
169 170
171bad_stack_a4:
172 li a4, 0
173 b load_a5
174
175bad_stack_a5:
176 li a5, 0
177 b load_a6
178
179bad_stack_a6:
180 li a6, 0
181 b load_a7
182
183bad_stack_a7:
184 li a7, 0
185 b loads_done
186
170not_o32_scall: 187not_o32_scall:
171 /* 188 /*
172 * This is not an o32 compatibility syscall, pass it on 189 * This is not an o32 compatibility syscall, pass it on
@@ -383,7 +400,7 @@ EXPORT(sys32_call_table)
383 PTR sys_connect /* 4170 */ 400 PTR sys_connect /* 4170 */
384 PTR sys_getpeername 401 PTR sys_getpeername
385 PTR sys_getsockname 402 PTR sys_getsockname
386 PTR sys_getsockopt 403 PTR compat_sys_getsockopt
387 PTR sys_listen 404 PTR sys_listen
388 PTR compat_sys_recv /* 4175 */ 405 PTR compat_sys_recv /* 4175 */
389 PTR compat_sys_recvfrom 406 PTR compat_sys_recvfrom
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index be73c491182b..008b3378653a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -337,6 +337,11 @@ static void __init bootmem_init(void)
337 min_low_pfn = start; 337 min_low_pfn = start;
338 if (end <= reserved_end) 338 if (end <= reserved_end)
339 continue; 339 continue;
340#ifdef CONFIG_BLK_DEV_INITRD
341 /* mapstart should be after initrd_end */
342 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
343 continue;
344#endif
340 if (start >= mapstart) 345 if (start >= mapstart)
341 continue; 346 continue;
342 mapstart = max(reserved_end, start); 347 mapstart = max(reserved_end, start);
@@ -366,14 +371,6 @@ static void __init bootmem_init(void)
366 max_low_pfn = PFN_DOWN(HIGHMEM_START); 371 max_low_pfn = PFN_DOWN(HIGHMEM_START);
367 } 372 }
368 373
369#ifdef CONFIG_BLK_DEV_INITRD
370 /*
371 * mapstart should be after initrd_end
372 */
373 if (initrd_end)
374 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
375#endif
376
377 /* 374 /*
378 * Initialize the boot-time allocator with low memory only. 375 * Initialize the boot-time allocator with low memory only.
379 */ 376 */
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4251d390b5b6..c88937745b4e 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
133 /* 133 /*
134 * Patch the start of mips_cps_core_entry to provide: 134 * Patch the start of mips_cps_core_entry to provide:
135 * 135 *
136 * v0 = CM base address 136 * v1 = CM base address
137 * s0 = kseg0 CCA 137 * s0 = kseg0 CCA
138 */ 138 */
139 entry_code = (u32 *)&mips_cps_core_entry; 139 entry_code = (u32 *)&mips_cps_core_entry;
@@ -369,7 +369,7 @@ void play_dead(void)
369 369
370static void wait_for_sibling_halt(void *ptr_cpu) 370static void wait_for_sibling_halt(void *ptr_cpu)
371{ 371{
372 unsigned cpu = (unsigned)ptr_cpu; 372 unsigned cpu = (unsigned long)ptr_cpu;
373 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 373 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
374 unsigned halted; 374 unsigned halted;
375 unsigned long flags; 375 unsigned long flags;
@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu)
430 */ 430 */
431 err = smp_call_function_single(cpu_death_sibling, 431 err = smp_call_function_single(cpu_death_sibling,
432 wait_for_sibling_halt, 432 wait_for_sibling_halt,
433 (void *)cpu, 1); 433 (void *)(unsigned long)cpu, 1);
434 if (err) 434 if (err)
435 panic("Failed to call remote sibling CPU\n"); 435 panic("Failed to call remote sibling CPU\n");
436 } 436 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index faa46ebd9dda..d0744cc77ea7 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
63cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 63cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
64EXPORT_SYMBOL(cpu_core_map); 64EXPORT_SYMBOL(cpu_core_map);
65 65
66/*
67 * A logcal cpu mask containing only one VPE per core to
68 * reduce the number of IPIs on large MT systems.
69 */
70cpumask_t cpu_foreign_map __read_mostly;
71EXPORT_SYMBOL(cpu_foreign_map);
72
66/* representing cpus for which sibling maps can be computed */ 73/* representing cpus for which sibling maps can be computed */
67static cpumask_t cpu_sibling_setup_map; 74static cpumask_t cpu_sibling_setup_map;
68 75
@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
103 } 110 }
104} 111}
105 112
113/*
114 * Calculate a new cpu_foreign_map mask whenever a
115 * new cpu appears or disappears.
116 */
117static inline void calculate_cpu_foreign_map(void)
118{
119 int i, k, core_present;
120 cpumask_t temp_foreign_map;
121
122 /* Re-calculate the mask */
123 for_each_online_cpu(i) {
124 core_present = 0;
125 for_each_cpu(k, &temp_foreign_map)
126 if (cpu_data[i].package == cpu_data[k].package &&
127 cpu_data[i].core == cpu_data[k].core)
128 core_present = 1;
129 if (!core_present)
130 cpumask_set_cpu(i, &temp_foreign_map);
131 }
132
133 cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
134}
135
106struct plat_smp_ops *mp_ops; 136struct plat_smp_ops *mp_ops;
107EXPORT_SYMBOL(mp_ops); 137EXPORT_SYMBOL(mp_ops);
108 138
@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
146 set_cpu_sibling_map(cpu); 176 set_cpu_sibling_map(cpu);
147 set_cpu_core_map(cpu); 177 set_cpu_core_map(cpu);
148 178
179 calculate_cpu_foreign_map();
180
149 cpumask_set_cpu(cpu, &cpu_callin_map); 181 cpumask_set_cpu(cpu, &cpu_callin_map);
150 182
151 synchronise_count_slave(cpu); 183 synchronise_count_slave(cpu);
@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
173static void stop_this_cpu(void *dummy) 205static void stop_this_cpu(void *dummy)
174{ 206{
175 /* 207 /*
176 * Remove this CPU: 208 * Remove this CPU. Be a bit slow here and
209 * set the bits for every online CPU so we don't miss
210 * any IPI whilst taking this VPE down.
177 */ 211 */
212
213 cpumask_copy(&cpu_foreign_map, cpu_online_mask);
214
215 /* Make it visible to every other CPU */
216 smp_mb();
217
178 set_cpu_online(smp_processor_id(), false); 218 set_cpu_online(smp_processor_id(), false);
219 calculate_cpu_foreign_map();
179 local_irq_disable(); 220 local_irq_disable();
180 while (1); 221 while (1);
181} 222}
@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
197 mp_ops->prepare_cpus(max_cpus); 238 mp_ops->prepare_cpus(max_cpus);
198 set_cpu_sibling_map(0); 239 set_cpu_sibling_map(0);
199 set_cpu_core_map(0); 240 set_cpu_core_map(0);
241 calculate_cpu_foreign_map();
200#ifndef CONFIG_HOTPLUG_CPU 242#ifndef CONFIG_HOTPLUG_CPU
201 init_cpu_present(cpu_possible_mask); 243 init_cpu_present(cpu_possible_mask);
202#endif 244#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2a7b38ed23f0..e207a43b5f8f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
2130 BUG_ON(current->mm); 2130 BUG_ON(current->mm);
2131 enter_lazy_tlb(&init_mm, current); 2131 enter_lazy_tlb(&init_mm, current);
2132 2132
2133 /* Boot CPU's cache setup in setup_arch(). */ 2133 /* Boot CPU's cache setup in setup_arch(). */
2134 if (!is_boot_cpu) 2134 if (!is_boot_cpu)
2135 cpu_cache_init(); 2135 cpu_cache_init();
2136 tlb_init(); 2136 tlb_init();
2137 TLBMISS_HANDLER_SETUP(); 2137 TLBMISS_HANDLER_SETUP();
2138} 2138}
2139 2139
diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson64/common/bonito-irq.c
index cc0e4fd548e6..4e116d23bab3 100644
--- a/arch/mips/loongson64/common/bonito-irq.c
+++ b/arch/mips/loongson64/common/bonito-irq.c
@@ -3,7 +3,7 @@
3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net 3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4 * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org) 4 * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
5 * 5 *
6 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 6 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
7 * Author: Fuxin Zhang, zhangfx@lemote.com 7 * Author: Fuxin Zhang, zhangfx@lemote.com
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c
index 72fed003a536..01fbed137028 100644
--- a/arch/mips/loongson64/common/cmdline.c
+++ b/arch/mips/loongson64/common/cmdline.c
@@ -6,7 +6,7 @@
6 * Copyright 2003 ICT CAS 6 * Copyright 2003 ICT CAS
7 * Author: Michael Guo <guoyi@ict.ac.cn> 7 * Author: Michael Guo <guoyi@ict.ac.cn>
8 * 8 *
9 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 9 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
10 * Author: Fuxin Zhang, zhangfx@lemote.com 10 * Author: Fuxin Zhang, zhangfx@lemote.com
11 * 11 *
12 * Copyright (C) 2009 Lemote Inc. 12 * Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 12c75db23420..875037063a80 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * CS5536 General timer functions 2 * CS5536 General timer functions
3 * 3 *
4 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 4 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
5 * Author: Yanhua, yanh@lemote.com 5 * Author: Yanhua, yanh@lemote.com
6 * 6 *
7 * Copyright (C) 2009 Lemote Inc. 7 * Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 22f04ca2ff3e..f6c44dd332e2 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -6,7 +6,7 @@
6 * Copyright 2003 ICT CAS 6 * Copyright 2003 ICT CAS
7 * Author: Michael Guo <guoyi@ict.ac.cn> 7 * Author: Michael Guo <guoyi@ict.ac.cn>
8 * 8 *
9 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 9 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
10 * Author: Fuxin Zhang, zhangfx@lemote.com 10 * Author: Fuxin Zhang, zhangfx@lemote.com
11 * 11 *
12 * Copyright (C) 2009 Lemote Inc. 12 * Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson64/common/irq.c
index 687003b19b45..d36d969a4a87 100644
--- a/arch/mips/loongson64/common/irq.c
+++ b/arch/mips/loongson64/common/irq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
3 * Author: Fuxin Zhang, zhangfx@lemote.com 3 * Author: Fuxin Zhang, zhangfx@lemote.com
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson64/common/setup.c
index d477dd6bb326..2dc5122f0e09 100644
--- a/arch/mips/loongson64/common/setup.c
+++ b/arch/mips/loongson64/common/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
3 * Author: Fuxin Zhang, zhangfx@lemote.com 3 * Author: Fuxin Zhang, zhangfx@lemote.com
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson64/fuloong-2e/irq.c
index ef5ec8f3de5f..892963f860b7 100644
--- a/arch/mips/loongson64/fuloong-2e/irq.c
+++ b/arch/mips/loongson64/fuloong-2e/irq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
3 * Author: Fuxin Zhang, zhangfx@lemote.com 3 * Author: Fuxin Zhang, zhangfx@lemote.com
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c
index 462e34d46b4a..a78fb657068c 100644
--- a/arch/mips/loongson64/lemote-2f/clock.c
+++ b/arch/mips/loongson64/lemote-2f/clock.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
3 * Author: Yanhua, yanh@lemote.com 3 * Author: Yanhua, yanh@lemote.com
4 * 4 *
5 * This file is subject to the terms and conditions of the GNU General Public 5 * This file is subject to the terms and conditions of the GNU General Public
@@ -15,7 +15,7 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16 16
17#include <asm/clock.h> 17#include <asm/clock.h>
18#include <asm/mach-loongson/loongson.h> 18#include <asm/mach-loongson64/loongson.h>
19 19
20static LIST_HEAD(clock_list); 20static LIST_HEAD(clock_list);
21static DEFINE_SPINLOCK(clock_lock); 21static DEFINE_SPINLOCK(clock_lock);
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 12d14ed48778..6f9e010cec4d 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & 2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
3 * Insititute of Computing Technology 3 * Institute of Computing Technology
4 * Author: Xiang Gao, gaoxiang@ict.ac.cn 4 * Author: Xiang Gao, gaoxiang@ict.ac.cn
5 * Huacai Chen, chenhc@lemote.com 5 * Huacai Chen, chenhc@lemote.com
6 * Xiaofu Meng, Shuangshuang Zhang 6 * Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 22b9b2cb9219..712f17a2ecf2 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
451 /* Fall through */ 451 /* Fall through */
452 case jr_op: 452 case jr_op:
453 /* For R6, JR already emulated in jalr_op */ 453 /* For R6, JR already emulated in jalr_op */
454 if (NO_R6EMU && insn.r_format.opcode == jr_op) 454 if (NO_R6EMU && insn.r_format.func == jr_op)
455 break; 455 break;
456 *contpc = regs->regs[insn.r_format.rs]; 456 *contpc = regs->regs[insn.r_format.rs];
457 return 1; 457 return 1;
@@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
551 dec_insn.next_pc_inc; 551 dec_insn.next_pc_inc;
552 return 1; 552 return 1;
553 case blezl_op: 553 case blezl_op:
554 if (NO_R6EMU) 554 if (!insn.i_format.rt && NO_R6EMU)
555 break; 555 break;
556 case blez_op: 556 case blez_op:
557 557
@@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
588 dec_insn.next_pc_inc; 588 dec_insn.next_pc_inc;
589 return 1; 589 return 1;
590 case bgtzl_op: 590 case bgtzl_op:
591 if (NO_R6EMU) 591 if (!insn.i_format.rt && NO_R6EMU)
592 break; 592 break;
593 case bgtz_op: 593 case bgtz_op:
594 /* 594 /*
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 7f660dc67596..fbea4432f3f2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -37,6 +37,7 @@
37#include <asm/cacheflush.h> /* for run_uncached() */ 37#include <asm/cacheflush.h> /* for run_uncached() */
38#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/dma-coherence.h> 39#include <asm/dma-coherence.h>
40#include <asm/mips-cm.h>
40 41
41/* 42/*
42 * Special Variant of smp_call_function for use by cache functions: 43 * Special Variant of smp_call_function for use by cache functions:
@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
51{ 52{
52 preempt_disable(); 53 preempt_disable();
53 54
54#ifndef CONFIG_MIPS_MT_SMP 55 /*
55 smp_call_function(func, info, 1); 56 * The Coherent Manager propagates address-based cache ops to other
56#endif 57 * cores but not index-based ops. However, r4k_on_each_cpu is used
58 * in both cases so there is no easy way to tell what kind of op is
59 * executed to the other cores. The best we can probably do is
60 * to restrict that call when a CM is not present because both
61 * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
62 */
63 if (!mips_cm_present())
64 smp_call_function_many(&cpu_foreign_map, func, info, 1);
57 func(info); 65 func(info);
58 preempt_enable(); 66 preempt_enable();
59} 67}
@@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void)
937} 945}
938 946
939static char *way_string[] = { NULL, "direct mapped", "2-way", 947static char *way_string[] = { NULL, "direct mapped", "2-way",
940 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 948 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
949 "9-way", "10-way", "11-way", "12-way",
950 "13-way", "14-way", "15-way", "16-way",
941}; 951};
942 952
943static void probe_pcache(void) 953static void probe_pcache(void)
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 185e68261f45..5625b190edc0 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts)
119 119
120int get_c0_fdc_int(void) 120int get_c0_fdc_int(void)
121{ 121{
122 int mips_cpu_fdc_irq; 122 /*
123 * Some cores claim the FDC is routable through the GIC, but it doesn't
124 * actually seem to be connected for those Malta bitstreams.
125 */
126 switch (current_cpu_type()) {
127 case CPU_INTERAPTIV:
128 case CPU_PROAPTIV:
129 return -1;
130 };
123 131
124 if (cpu_has_veic) 132 if (cpu_has_veic)
125 mips_cpu_fdc_irq = -1; 133 return -1;
126 else if (gic_present) 134 else if (gic_present)
127 mips_cpu_fdc_irq = gic_get_c0_fdc_int(); 135 return gic_get_c0_fdc_int();
128 else if (cp0_fdc_irq >= 0) 136 else if (cp0_fdc_irq >= 0)
129 mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 137 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
130 else 138 else
131 mips_cpu_fdc_irq = -1; 139 return -1;
132
133 return mips_cpu_fdc_irq;
134} 140}
135 141
136int get_c0_perfcount_int(void) 142int get_c0_perfcount_int(void)
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index d2dc836523a3..8bd8ebb20a72 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -63,13 +63,19 @@ void __init plat_mem_setup(void)
63 plat_setup_iocoherency(); 63 plat_setup_iocoherency();
64} 64}
65 65
66#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 66#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
67#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
67 68
68phys_addr_t mips_cpc_default_phys_base(void) 69phys_addr_t mips_cpc_default_phys_base(void)
69{ 70{
70 return DEFAULT_CPC_BASE_ADDR; 71 return DEFAULT_CPC_BASE_ADDR;
71} 72}
72 73
74phys_addr_t mips_cdmm_phys_base(void)
75{
76 return DEFAULT_CDMM_BASE_ADDR;
77}
78
73static void __init mips_nmi_setup(void) 79static void __init mips_nmi_setup(void)
74{ 80{
75 void *base; 81 void *base;
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 67889fcea8aa..7c73fcb92a10 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -27,6 +27,11 @@ int get_c0_perfcount_int(void)
27 return gic_get_c0_perfcount_int(); 27 return gic_get_c0_perfcount_int();
28} 28}
29 29
30int get_c0_fdc_int(void)
31{
32 return gic_get_c0_fdc_int();
33}
34
30void __init plat_time_init(void) 35void __init plat_time_init(void)
31{ 36{
32 struct device_node *np; 37 struct device_node *np;
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 0a183756d6ec..f93c4a4e6580 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,7 +16,7 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18 18
19extern spinlock_t pa_dbit_lock; 19extern spinlock_t pa_tlb_lock;
20 20
21/* 21/*
22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
33 */ 33 */
34#define kern_addr_valid(addr) (1) 34#define kern_addr_valid(addr) (1)
35 35
36/* Purge data and instruction TLB entries. Must be called holding
37 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
38 * machines since the purge must be broadcast to all CPUs.
39 */
40
41static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
42{
43 mtsp(mm->context, 1);
44 pdtlb(addr);
45 if (unlikely(split_tlb))
46 pitlb(addr);
47}
48
36/* Certain architectures need to do special things when PTEs 49/* Certain architectures need to do special things when PTEs
37 * within a page table are directly modified. Thus, the following 50 * within a page table are directly modified. Thus, the following
38 * hook is made available. 51 * hook is made available.
@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
42 *(pteptr) = (pteval); \ 55 *(pteptr) = (pteval); \
43 } while(0) 56 } while(0)
44 57
45extern void purge_tlb_entries(struct mm_struct *, unsigned long); 58#define pte_inserted(x) \
59 ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
60 == (_PAGE_PRESENT|_PAGE_ACCESSED))
46 61
47#define set_pte_at(mm, addr, ptep, pteval) \ 62#define set_pte_at(mm, addr, ptep, pteval) \
48 do { \ 63 do { \
64 pte_t old_pte; \
49 unsigned long flags; \ 65 unsigned long flags; \
50 spin_lock_irqsave(&pa_dbit_lock, flags); \ 66 spin_lock_irqsave(&pa_tlb_lock, flags); \
51 set_pte(ptep, pteval); \ 67 old_pte = *ptep; \
52 purge_tlb_entries(mm, addr); \ 68 set_pte(ptep, pteval); \
53 spin_unlock_irqrestore(&pa_dbit_lock, flags); \ 69 if (pte_inserted(old_pte)) \
70 purge_tlb_entries(mm, addr); \
71 spin_unlock_irqrestore(&pa_tlb_lock, flags); \
54 } while (0) 72 } while (0)
55 73
56#endif /* !__ASSEMBLY__ */ 74#endif /* !__ASSEMBLY__ */
@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
268 286
269#define pte_none(x) (pte_val(x) == 0) 287#define pte_none(x) (pte_val(x) == 0)
270#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 288#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
271#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 289#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
272 290
273#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 291#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
274#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 292#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
435 if (!pte_young(*ptep)) 453 if (!pte_young(*ptep))
436 return 0; 454 return 0;
437 455
438 spin_lock_irqsave(&pa_dbit_lock, flags); 456 spin_lock_irqsave(&pa_tlb_lock, flags);
439 pte = *ptep; 457 pte = *ptep;
440 if (!pte_young(pte)) { 458 if (!pte_young(pte)) {
441 spin_unlock_irqrestore(&pa_dbit_lock, flags); 459 spin_unlock_irqrestore(&pa_tlb_lock, flags);
442 return 0; 460 return 0;
443 } 461 }
444 set_pte(ptep, pte_mkold(pte)); 462 set_pte(ptep, pte_mkold(pte));
445 purge_tlb_entries(vma->vm_mm, addr); 463 purge_tlb_entries(vma->vm_mm, addr);
446 spin_unlock_irqrestore(&pa_dbit_lock, flags); 464 spin_unlock_irqrestore(&pa_tlb_lock, flags);
447 return 1; 465 return 1;
448} 466}
449 467
@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
453 pte_t old_pte; 471 pte_t old_pte;
454 unsigned long flags; 472 unsigned long flags;
455 473
456 spin_lock_irqsave(&pa_dbit_lock, flags); 474 spin_lock_irqsave(&pa_tlb_lock, flags);
457 old_pte = *ptep; 475 old_pte = *ptep;
458 pte_clear(mm,addr,ptep); 476 set_pte(ptep, __pte(0));
459 purge_tlb_entries(mm, addr); 477 if (pte_inserted(old_pte))
460 spin_unlock_irqrestore(&pa_dbit_lock, flags); 478 purge_tlb_entries(mm, addr);
479 spin_unlock_irqrestore(&pa_tlb_lock, flags);
461 480
462 return old_pte; 481 return old_pte;
463} 482}
@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
465static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 484static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
466{ 485{
467 unsigned long flags; 486 unsigned long flags;
468 spin_lock_irqsave(&pa_dbit_lock, flags); 487 spin_lock_irqsave(&pa_tlb_lock, flags);
469 set_pte(ptep, pte_wrprotect(*ptep)); 488 set_pte(ptep, pte_wrprotect(*ptep));
470 purge_tlb_entries(mm, addr); 489 purge_tlb_entries(mm, addr);
471 spin_unlock_irqrestore(&pa_dbit_lock, flags); 490 spin_unlock_irqrestore(&pa_tlb_lock, flags);
472} 491}
473 492
474#define pte_same(A,B) (pte_val(A) == pte_val(B)) 493#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 9d086a599fa0..e84b96478193 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -13,6 +13,9 @@
13 * active at any one time on the Merced bus. This tlb purge 13 * active at any one time on the Merced bus. This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate 14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all systems not just the N class. 15 * it on all systems not just the N class.
16
17 * It is also used to ensure PTE updates are atomic and consistent
18 * with the TLB.
16 */ 19 */
17extern spinlock_t pa_tlb_lock; 20extern spinlock_t pa_tlb_lock;
18 21
@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
24 27
25#define smp_flush_tlb_all() flush_tlb_all() 28#define smp_flush_tlb_all() flush_tlb_all()
26 29
30int __flush_tlb_range(unsigned long sid,
31 unsigned long start, unsigned long end);
32
33#define flush_tlb_range(vma, start, end) \
34 __flush_tlb_range((vma)->vm_mm->context, start, end)
35
36#define flush_tlb_kernel_range(start, end) \
37 __flush_tlb_range(0, start, end)
38
27/* 39/*
28 * flush_tlb_mm() 40 * flush_tlb_mm()
29 * 41 *
30 * XXX This code is NOT valid for HP-UX compatibility processes, 42 * The code to switch to a new context is NOT valid for processes
31 * (although it will probably work 99% of the time). HP-UX 43 * which play with the space id's. Thus, we have to preserve the
32 * processes are free to play with the space id's and save them 44 * space and just flush the entire tlb. However, the compilers,
33 * over long periods of time, etc. so we have to preserve the 45 * dynamic linker, etc, do not manipulate space id's, so there
34 * space and just flush the entire tlb. We need to check the 46 * could be a significant performance benefit in switching contexts
35 * personality in order to do that, but the personality is not 47 * and not flushing the whole tlb.
36 * currently being set correctly.
37 *
38 * Of course, Linux processes could do the same thing, but
39 * we don't support that (and the compilers, dynamic linker,
40 * etc. do not do that).
41 */ 48 */
42 49
43static inline void flush_tlb_mm(struct mm_struct *mm) 50static inline void flush_tlb_mm(struct mm_struct *mm)
@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
45 BUG_ON(mm == &init_mm); /* Should never happen */ 52 BUG_ON(mm == &init_mm); /* Should never happen */
46 53
47#if 1 || defined(CONFIG_SMP) 54#if 1 || defined(CONFIG_SMP)
55 /* Except for very small threads, flushing the whole TLB is
56 * faster than using __flush_tlb_range. The pdtlb and pitlb
57 * instructions are very slow because of the TLB broadcast.
58 * It might be faster to do local range flushes on all CPUs
59 * on PA 2.0 systems.
60 */
48 flush_tlb_all(); 61 flush_tlb_all();
49#else 62#else
50 /* FIXME: currently broken, causing space id and protection ids 63 /* FIXME: currently broken, causing space id and protection ids
51 * to go out of sync, resulting in faults on userspace accesses. 64 * to go out of sync, resulting in faults on userspace accesses.
65 * This approach needs further investigation since running many
66 * small applications (e.g., GCC testsuite) is faster on HP-UX.
52 */ 67 */
53 if (mm) { 68 if (mm) {
54 if (mm->context != 0) 69 if (mm->context != 0)
@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
65{ 80{
66 unsigned long flags, sid; 81 unsigned long flags, sid;
67 82
68 /* For one page, it's not worth testing the split_tlb variable */
69
70 mb();
71 sid = vma->vm_mm->context; 83 sid = vma->vm_mm->context;
72 purge_tlb_start(flags); 84 purge_tlb_start(flags);
73 mtsp(sid, 1); 85 mtsp(sid, 1);
74 pdtlb(addr); 86 pdtlb(addr);
75 pitlb(addr); 87 if (unlikely(split_tlb))
88 pitlb(addr);
76 purge_tlb_end(flags); 89 purge_tlb_end(flags);
77} 90}
78
79void __flush_tlb_range(unsigned long sid,
80 unsigned long start, unsigned long end);
81
82#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
83
84#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
85
86#endif 91#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index f6448c7c62b5..cda6dbbe9842 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
342EXPORT_SYMBOL(flush_kernel_icache_range_asm); 342EXPORT_SYMBOL(flush_kernel_icache_range_asm);
343 343
344#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 344#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
345int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 345static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
346
347#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
348static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
346 349
347void __init parisc_setup_cache_timing(void) 350void __init parisc_setup_cache_timing(void)
348{ 351{
349 unsigned long rangetime, alltime; 352 unsigned long rangetime, alltime;
350 unsigned long size; 353 unsigned long size, start;
351 354
352 alltime = mfctl(16); 355 alltime = mfctl(16);
353 flush_data_cache(); 356 flush_data_cache();
@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
364 /* Racy, but if we see an intermediate value, it's ok too... */ 367 /* Racy, but if we see an intermediate value, it's ok too... */
365 parisc_cache_flush_threshold = size * alltime / rangetime; 368 parisc_cache_flush_threshold = size * alltime / rangetime;
366 369
367 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 370 parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
368 if (!parisc_cache_flush_threshold) 371 if (!parisc_cache_flush_threshold)
369 parisc_cache_flush_threshold = FLUSH_THRESHOLD; 372 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
370 373
371 if (parisc_cache_flush_threshold > cache_info.dc_size) 374 if (parisc_cache_flush_threshold > cache_info.dc_size)
372 parisc_cache_flush_threshold = cache_info.dc_size; 375 parisc_cache_flush_threshold = cache_info.dc_size;
373 376
374 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 377 printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
378 parisc_cache_flush_threshold/1024);
379
380 /* calculate TLB flush threshold */
381
382 alltime = mfctl(16);
383 flush_tlb_all();
384 alltime = mfctl(16) - alltime;
385
386 size = PAGE_SIZE;
387 start = (unsigned long) _text;
388 rangetime = mfctl(16);
389 while (start < (unsigned long) _end) {
390 flush_tlb_kernel_range(start, start + PAGE_SIZE);
391 start += PAGE_SIZE;
392 size += PAGE_SIZE;
393 }
394 rangetime = mfctl(16) - rangetime;
395
396 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
397 alltime, size, rangetime);
398
399 parisc_tlb_flush_threshold = size * alltime / rangetime;
400 parisc_tlb_flush_threshold *= num_online_cpus();
401 parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
402 if (!parisc_tlb_flush_threshold)
403 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
404
405 printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
406 parisc_tlb_flush_threshold/1024);
375} 407}
376 408
377extern void purge_kernel_dcache_page_asm(unsigned long); 409extern void purge_kernel_dcache_page_asm(unsigned long);
@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
403} 435}
404EXPORT_SYMBOL(copy_user_page); 436EXPORT_SYMBOL(copy_user_page);
405 437
406void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 438/* __flush_tlb_range()
407{ 439 *
408 unsigned long flags; 440 * returns 1 if all TLBs were flushed.
409 441 */
410 /* Note: purge_tlb_entries can be called at startup with 442int __flush_tlb_range(unsigned long sid, unsigned long start,
411 no context. */ 443 unsigned long end)
412
413 purge_tlb_start(flags);
414 mtsp(mm->context, 1);
415 pdtlb(addr);
416 pitlb(addr);
417 purge_tlb_end(flags);
418}
419EXPORT_SYMBOL(purge_tlb_entries);
420
421void __flush_tlb_range(unsigned long sid, unsigned long start,
422 unsigned long end)
423{ 444{
424 unsigned long npages; 445 unsigned long flags, size;
425 446
426 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 447 size = (end - start);
427 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 448 if (size >= parisc_tlb_flush_threshold) {
428 flush_tlb_all(); 449 flush_tlb_all();
429 else { 450 return 1;
430 unsigned long flags; 451 }
431 452
453 /* Purge TLB entries for small ranges using the pdtlb and
454 pitlb instructions. These instructions execute locally
455 but cause a purge request to be broadcast to other TLBs. */
456 if (likely(!split_tlb)) {
457 while (start < end) {
458 purge_tlb_start(flags);
459 mtsp(sid, 1);
460 pdtlb(start);
461 purge_tlb_end(flags);
462 start += PAGE_SIZE;
463 }
464 return 0;
465 }
466
467 /* split TLB case */
468 while (start < end) {
432 purge_tlb_start(flags); 469 purge_tlb_start(flags);
433 mtsp(sid, 1); 470 mtsp(sid, 1);
434 if (split_tlb) { 471 pdtlb(start);
435 while (npages--) { 472 pitlb(start);
436 pdtlb(start);
437 pitlb(start);
438 start += PAGE_SIZE;
439 }
440 } else {
441 while (npages--) {
442 pdtlb(start);
443 start += PAGE_SIZE;
444 }
445 }
446 purge_tlb_end(flags); 473 purge_tlb_end(flags);
474 start += PAGE_SIZE;
447 } 475 }
476 return 0;
448} 477}
449 478
450static void cacheflush_h_tmp_function(void *dummy) 479static void cacheflush_h_tmp_function(void *dummy)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 75819617f93b..c5ef4081b01d 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -45,7 +45,7 @@
45 .level 2.0 45 .level 2.0
46#endif 46#endif
47 47
48 .import pa_dbit_lock,data 48 .import pa_tlb_lock,data
49 49
50 /* space_to_prot macro creates a prot id from a space id */ 50 /* space_to_prot macro creates a prot id from a space id */
51 51
@@ -420,8 +420,8 @@
420 SHLREG %r9,PxD_VALUE_SHIFT,\pmd 420 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
421 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 421 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 422 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
423 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 423 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
424 LDREG %r0(\pmd),\pte /* pmd is now pte */ 424 LDREG %r0(\pmd),\pte
425 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 425 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
426 .endm 426 .endm
427 427
@@ -453,57 +453,53 @@
453 L2_ptep \pgd,\pte,\index,\va,\fault 453 L2_ptep \pgd,\pte,\index,\va,\fault
454 .endm 454 .endm
455 455
456 /* Acquire pa_dbit_lock lock. */ 456 /* Acquire pa_tlb_lock lock and recheck page is still present. */
457 .macro dbit_lock spc,tmp,tmp1 457 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f 459 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_dbit_lock),\tmp 460 load32 PA(pa_tlb_lock),\tmp
4611: LDCW 0(\tmp),\tmp1 4611: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b 462 cmpib,COND(=) 0,\tmp1,1b
463 nop 463 nop
464 LDREG 0(\ptp),\pte
465 bb,<,n \pte,_PAGE_PRESENT_BIT,2f
466 b \fault
467 stw \spc,0(\tmp)
4642: 4682:
465#endif 469#endif
466 .endm 470 .endm
467 471
468 /* Release pa_dbit_lock lock without reloading lock address. */ 472 /* Release pa_tlb_lock lock without reloading lock address. */
469 .macro dbit_unlock0 spc,tmp 473 .macro tlb_unlock0 spc,tmp
470#ifdef CONFIG_SMP 474#ifdef CONFIG_SMP
471 or,COND(=) %r0,\spc,%r0 475 or,COND(=) %r0,\spc,%r0
472 stw \spc,0(\tmp) 476 stw \spc,0(\tmp)
473#endif 477#endif
474 .endm 478 .endm
475 479
476 /* Release pa_dbit_lock lock. */ 480 /* Release pa_tlb_lock lock. */
477 .macro dbit_unlock1 spc,tmp 481 .macro tlb_unlock1 spc,tmp
478#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
479 load32 PA(pa_dbit_lock),\tmp 483 load32 PA(pa_tlb_lock),\tmp
480 dbit_unlock0 \spc,\tmp 484 tlb_unlock0 \spc,\tmp
481#endif 485#endif
482 .endm 486 .endm
483 487
484 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 488 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
485 * don't needlessly dirty the cache line if it was already set */ 489 * don't needlessly dirty the cache line if it was already set */
486 .macro update_ptep spc,ptep,pte,tmp,tmp1 490 .macro update_accessed ptp,pte,tmp,tmp1
487#ifdef CONFIG_SMP
488 or,COND(=) %r0,\spc,%r0
489 LDREG 0(\ptep),\pte
490#endif
491 ldi _PAGE_ACCESSED,\tmp1 491 ldi _PAGE_ACCESSED,\tmp1
492 or \tmp1,\pte,\tmp 492 or \tmp1,\pte,\tmp
493 and,COND(<>) \tmp1,\pte,%r0 493 and,COND(<>) \tmp1,\pte,%r0
494 STREG \tmp,0(\ptep) 494 STREG \tmp,0(\ptp)
495 .endm 495 .endm
496 496
497 /* Set the dirty bit (and accessed bit). No need to be 497 /* Set the dirty bit (and accessed bit). No need to be
498 * clever, this is only used from the dirty fault */ 498 * clever, this is only used from the dirty fault */
499 .macro update_dirty spc,ptep,pte,tmp 499 .macro update_dirty ptp,pte,tmp
500#ifdef CONFIG_SMP
501 or,COND(=) %r0,\spc,%r0
502 LDREG 0(\ptep),\pte
503#endif
504 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 500 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
505 or \tmp,\pte,\pte 501 or \tmp,\pte,\pte
506 STREG \pte,0(\ptep) 502 STREG \pte,0(\ptp)
507 .endm 503 .endm
508 504
509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
1148 1144
1149 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1145 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1150 1146
1151 dbit_lock spc,t0,t1 1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1152 update_ptep spc,ptp,pte,t0,t1 1148 update_accessed ptp,pte,t0,t1
1153 1149
1154 make_insert_tlb spc,pte,prot 1150 make_insert_tlb spc,pte,prot
1155 1151
1156 idtlbt pte,prot 1152 idtlbt pte,prot
1157 dbit_unlock1 spc,t0
1158 1153
1154 tlb_unlock1 spc,t0
1159 rfir 1155 rfir
1160 nop 1156 nop
1161 1157
@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
1174 1170
1175 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1171 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1176 1172
1177 dbit_lock spc,t0,t1 1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1178 update_ptep spc,ptp,pte,t0,t1 1174 update_accessed ptp,pte,t0,t1
1179 1175
1180 make_insert_tlb spc,pte,prot 1176 make_insert_tlb spc,pte,prot
1181 1177
1182 idtlbt pte,prot 1178 idtlbt pte,prot
1183 dbit_unlock1 spc,t0
1184 1179
1180 tlb_unlock1 spc,t0
1185 rfir 1181 rfir
1186 nop 1182 nop
1187 1183
@@ -1202,20 +1198,20 @@ dtlb_miss_11:
1202 1198
1203 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1199 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1204 1200
1205 dbit_lock spc,t0,t1 1201 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1206 update_ptep spc,ptp,pte,t0,t1 1202 update_accessed ptp,pte,t0,t1
1207 1203
1208 make_insert_tlb_11 spc,pte,prot 1204 make_insert_tlb_11 spc,pte,prot
1209 1205
1210 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1206 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1211 mtsp spc,%sr1 1207 mtsp spc,%sr1
1212 1208
1213 idtlba pte,(%sr1,va) 1209 idtlba pte,(%sr1,va)
1214 idtlbp prot,(%sr1,va) 1210 idtlbp prot,(%sr1,va)
1215 1211
1216 mtsp t0, %sr1 /* Restore sr1 */ 1212 mtsp t1, %sr1 /* Restore sr1 */
1217 dbit_unlock1 spc,t0
1218 1213
1214 tlb_unlock1 spc,t0
1219 rfir 1215 rfir
1220 nop 1216 nop
1221 1217
@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
1235 1231
1236 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1232 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1237 1233
1238 dbit_lock spc,t0,t1 1234 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1239 update_ptep spc,ptp,pte,t0,t1 1235 update_accessed ptp,pte,t0,t1
1240 1236
1241 make_insert_tlb_11 spc,pte,prot 1237 make_insert_tlb_11 spc,pte,prot
1242 1238
1243 1239 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1244 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1245 mtsp spc,%sr1 1240 mtsp spc,%sr1
1246 1241
1247 idtlba pte,(%sr1,va) 1242 idtlba pte,(%sr1,va)
1248 idtlbp prot,(%sr1,va) 1243 idtlbp prot,(%sr1,va)
1249 1244
1250 mtsp t0, %sr1 /* Restore sr1 */ 1245 mtsp t1, %sr1 /* Restore sr1 */
1251 dbit_unlock1 spc,t0
1252 1246
1247 tlb_unlock1 spc,t0
1253 rfir 1248 rfir
1254 nop 1249 nop
1255 1250
@@ -1269,16 +1264,16 @@ dtlb_miss_20:
1269 1264
1270 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1265 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1271 1266
1272 dbit_lock spc,t0,t1 1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1273 update_ptep spc,ptp,pte,t0,t1 1268 update_accessed ptp,pte,t0,t1
1274 1269
1275 make_insert_tlb spc,pte,prot 1270 make_insert_tlb spc,pte,prot
1276 1271
1277 f_extend pte,t0 1272 f_extend pte,t1
1278 1273
1279 idtlbt pte,prot 1274 idtlbt pte,prot
1280 dbit_unlock1 spc,t0
1281 1275
1276 tlb_unlock1 spc,t0
1282 rfir 1277 rfir
1283 nop 1278 nop
1284 1279
@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
1297 1292
1298 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1293 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1299 1294
1300 dbit_lock spc,t0,t1 1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1301 update_ptep spc,ptp,pte,t0,t1 1296 update_accessed ptp,pte,t0,t1
1302 1297
1303 make_insert_tlb spc,pte,prot 1298 make_insert_tlb spc,pte,prot
1304 1299
1305 f_extend pte,t0 1300 f_extend pte,t1
1306 1301
1307 idtlbt pte,prot 1302 idtlbt pte,prot
1308 dbit_unlock1 spc,t0
1309 1303
1304 tlb_unlock1 spc,t0
1310 rfir 1305 rfir
1311 nop 1306 nop
1312 1307
@@ -1406,14 +1401,14 @@ itlb_miss_20w:
1406 1401
1407 L3_ptep ptp,pte,t0,va,itlb_fault 1402 L3_ptep ptp,pte,t0,va,itlb_fault
1408 1403
1409 dbit_lock spc,t0,t1 1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1410 update_ptep spc,ptp,pte,t0,t1 1405 update_accessed ptp,pte,t0,t1
1411 1406
1412 make_insert_tlb spc,pte,prot 1407 make_insert_tlb spc,pte,prot
1413 1408
1414 iitlbt pte,prot 1409 iitlbt pte,prot
1415 dbit_unlock1 spc,t0
1416 1410
1411 tlb_unlock1 spc,t0
1417 rfir 1412 rfir
1418 nop 1413 nop
1419 1414
@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
1430 1425
1431 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1426 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1432 1427
1433 dbit_lock spc,t0,t1 1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1434 update_ptep spc,ptp,pte,t0,t1 1429 update_accessed ptp,pte,t0,t1
1435 1430
1436 make_insert_tlb spc,pte,prot 1431 make_insert_tlb spc,pte,prot
1437 1432
1438 iitlbt pte,prot 1433 iitlbt pte,prot
1439 dbit_unlock1 spc,t0
1440 1434
1435 tlb_unlock1 spc,t0
1441 rfir 1436 rfir
1442 nop 1437 nop
1443 1438
@@ -1458,20 +1453,20 @@ itlb_miss_11:
1458 1453
1459 L2_ptep ptp,pte,t0,va,itlb_fault 1454 L2_ptep ptp,pte,t0,va,itlb_fault
1460 1455
1461 dbit_lock spc,t0,t1 1456 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1462 update_ptep spc,ptp,pte,t0,t1 1457 update_accessed ptp,pte,t0,t1
1463 1458
1464 make_insert_tlb_11 spc,pte,prot 1459 make_insert_tlb_11 spc,pte,prot
1465 1460
1466 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1461 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1467 mtsp spc,%sr1 1462 mtsp spc,%sr1
1468 1463
1469 iitlba pte,(%sr1,va) 1464 iitlba pte,(%sr1,va)
1470 iitlbp prot,(%sr1,va) 1465 iitlbp prot,(%sr1,va)
1471 1466
1472 mtsp t0, %sr1 /* Restore sr1 */ 1467 mtsp t1, %sr1 /* Restore sr1 */
1473 dbit_unlock1 spc,t0
1474 1468
1469 tlb_unlock1 spc,t0
1475 rfir 1470 rfir
1476 nop 1471 nop
1477 1472
@@ -1482,20 +1477,20 @@ naitlb_miss_11:
1482 1477
1483 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1478 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1484 1479
1485 dbit_lock spc,t0,t1 1480 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1486 update_ptep spc,ptp,pte,t0,t1 1481 update_accessed ptp,pte,t0,t1
1487 1482
1488 make_insert_tlb_11 spc,pte,prot 1483 make_insert_tlb_11 spc,pte,prot
1489 1484
1490 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1485 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1491 mtsp spc,%sr1 1486 mtsp spc,%sr1
1492 1487
1493 iitlba pte,(%sr1,va) 1488 iitlba pte,(%sr1,va)
1494 iitlbp prot,(%sr1,va) 1489 iitlbp prot,(%sr1,va)
1495 1490
1496 mtsp t0, %sr1 /* Restore sr1 */ 1491 mtsp t1, %sr1 /* Restore sr1 */
1497 dbit_unlock1 spc,t0
1498 1492
1493 tlb_unlock1 spc,t0
1499 rfir 1494 rfir
1500 nop 1495 nop
1501 1496
@@ -1516,16 +1511,16 @@ itlb_miss_20:
1516 1511
1517 L2_ptep ptp,pte,t0,va,itlb_fault 1512 L2_ptep ptp,pte,t0,va,itlb_fault
1518 1513
1519 dbit_lock spc,t0,t1 1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1520 update_ptep spc,ptp,pte,t0,t1 1515 update_accessed ptp,pte,t0,t1
1521 1516
1522 make_insert_tlb spc,pte,prot 1517 make_insert_tlb spc,pte,prot
1523 1518
1524 f_extend pte,t0 1519 f_extend pte,t1
1525 1520
1526 iitlbt pte,prot 1521 iitlbt pte,prot
1527 dbit_unlock1 spc,t0
1528 1522
1523 tlb_unlock1 spc,t0
1529 rfir 1524 rfir
1530 nop 1525 nop
1531 1526
@@ -1536,16 +1531,16 @@ naitlb_miss_20:
1536 1531
1537 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1532 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1538 1533
1539 dbit_lock spc,t0,t1 1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1540 update_ptep spc,ptp,pte,t0,t1 1535 update_accessed ptp,pte,t0,t1
1541 1536
1542 make_insert_tlb spc,pte,prot 1537 make_insert_tlb spc,pte,prot
1543 1538
1544 f_extend pte,t0 1539 f_extend pte,t1
1545 1540
1546 iitlbt pte,prot 1541 iitlbt pte,prot
1547 dbit_unlock1 spc,t0
1548 1542
1543 tlb_unlock1 spc,t0
1549 rfir 1544 rfir
1550 nop 1545 nop
1551 1546
@@ -1568,14 +1563,14 @@ dbit_trap_20w:
1568 1563
1569 L3_ptep ptp,pte,t0,va,dbit_fault 1564 L3_ptep ptp,pte,t0,va,dbit_fault
1570 1565
1571 dbit_lock spc,t0,t1 1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1572 update_dirty spc,ptp,pte,t1 1567 update_dirty ptp,pte,t1
1573 1568
1574 make_insert_tlb spc,pte,prot 1569 make_insert_tlb spc,pte,prot
1575 1570
1576 idtlbt pte,prot 1571 idtlbt pte,prot
1577 dbit_unlock0 spc,t0
1578 1572
1573 tlb_unlock0 spc,t0
1579 rfir 1574 rfir
1580 nop 1575 nop
1581#else 1576#else
@@ -1588,8 +1583,8 @@ dbit_trap_11:
1588 1583
1589 L2_ptep ptp,pte,t0,va,dbit_fault 1584 L2_ptep ptp,pte,t0,va,dbit_fault
1590 1585
1591 dbit_lock spc,t0,t1 1586 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1592 update_dirty spc,ptp,pte,t1 1587 update_dirty ptp,pte,t1
1593 1588
1594 make_insert_tlb_11 spc,pte,prot 1589 make_insert_tlb_11 spc,pte,prot
1595 1590
@@ -1600,8 +1595,8 @@ dbit_trap_11:
1600 idtlbp prot,(%sr1,va) 1595 idtlbp prot,(%sr1,va)
1601 1596
1602 mtsp t1, %sr1 /* Restore sr1 */ 1597 mtsp t1, %sr1 /* Restore sr1 */
1603 dbit_unlock0 spc,t0
1604 1598
1599 tlb_unlock0 spc,t0
1605 rfir 1600 rfir
1606 nop 1601 nop
1607 1602
@@ -1612,16 +1607,16 @@ dbit_trap_20:
1612 1607
1613 L2_ptep ptp,pte,t0,va,dbit_fault 1608 L2_ptep ptp,pte,t0,va,dbit_fault
1614 1609
1615 dbit_lock spc,t0,t1 1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1616 update_dirty spc,ptp,pte,t1 1611 update_dirty ptp,pte,t1
1617 1612
1618 make_insert_tlb spc,pte,prot 1613 make_insert_tlb spc,pte,prot
1619 1614
1620 f_extend pte,t1 1615 f_extend pte,t1
1621 1616
1622 idtlbt pte,prot 1617 idtlbt pte,prot
1623 dbit_unlock0 spc,t0
1624 1618
1619 tlb_unlock0 spc,t0
1625 rfir 1620 rfir
1626 nop 1621 nop
1627#endif 1622#endif
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 6548fd1d2e62..b99b39f1da02 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -43,10 +43,6 @@
43 43
44#include "../math-emu/math-emu.h" /* for handle_fpe() */ 44#include "../math-emu/math-emu.h" /* for handle_fpe() */
45 45
46#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
47DEFINE_SPINLOCK(pa_dbit_lock);
48#endif
49
50static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 46static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
51 struct pt_regs *regs); 47 struct pt_regs *regs);
52 48
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index ccde8f084ce4..112ccf497562 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -52,6 +52,22 @@
52 .text 52 .text
53 53
54/* 54/*
55 * Used by threads when the lock bit of core_idle_state is set.
56 * Threads will spin in HMT_LOW until the lock bit is cleared.
57 * r14 - pointer to core_idle_state
58 * r15 - used to load contents of core_idle_state
59 */
60
61core_idle_lock_held:
62 HMT_LOW
633: lwz r15,0(r14)
64 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
65 bne 3b
66 HMT_MEDIUM
67 lwarx r15,0,r14
68 blr
69
70/*
55 * Pass requested state in r3: 71 * Pass requested state in r3:
56 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE 72 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
57 * 73 *
@@ -150,6 +166,10 @@ power7_enter_nap_mode:
150 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 166 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
151lwarx_loop1: 167lwarx_loop1:
152 lwarx r15,0,r14 168 lwarx r15,0,r14
169
170 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
171 bnel core_idle_lock_held
172
153 andc r15,r15,r7 /* Clear thread bit */ 173 andc r15,r15,r7 /* Clear thread bit */
154 174
155 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS 175 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
@@ -294,7 +314,7 @@ lwarx_loop2:
294 * workaround undo code or resyncing timebase or restoring context 314 * workaround undo code or resyncing timebase or restoring context
295 * In either case loop until the lock bit is cleared. 315 * In either case loop until the lock bit is cleared.
296 */ 316 */
297 bne core_idle_lock_held 317 bnel core_idle_lock_held
298 318
299 cmpwi cr2,r15,0 319 cmpwi cr2,r15,0
300 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 320 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
@@ -319,15 +339,6 @@ lwarx_loop2:
319 isync 339 isync
320 b common_exit 340 b common_exit
321 341
322core_idle_lock_held:
323 HMT_LOW
324core_idle_lock_loop:
325 lwz r15,0(14)
326 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
327 bne core_idle_lock_loop
328 HMT_MEDIUM
329 b lwarx_loop2
330
331first_thread_in_subcore: 342first_thread_in_subcore:
332 /* First thread in subcore to wakeup */ 343 /* First thread in subcore to wakeup */
333 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 344 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6530f1b8874d..37de90f8a845 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs)
297 297
298 __this_cpu_inc(irq_stat.mce_exceptions); 298 __this_cpu_inc(irq_stat.mce_exceptions);
299 299
300 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
301
300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 302 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
301 handled = cur_cpu_spec->machine_check_early(regs); 303 handled = cur_cpu_spec->machine_check_early(regs);
302 return handled; 304 return handled;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6d535973b200..a67c6d781c52 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
529 printk(KERN_ALERT "Unable to handle kernel paging request for " 529 printk(KERN_ALERT "Unable to handle kernel paging request for "
530 "instruction fetch\n"); 530 "instruction fetch\n");
531 break; 531 break;
532 case 0x600:
533 printk(KERN_ALERT "Unable to handle kernel paging request for "
534 "unaligned access at address 0x%08lx\n", regs->dar);
535 break;
532 default: 536 default:
533 printk(KERN_ALERT "Unable to handle kernel paging request for " 537 printk(KERN_ALERT "Unable to handle kernel paging request for "
534 "unknown fault\n"); 538 "unknown fault\n");
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index ec2eb20631d1..df956295c2a7 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
320 if (!attr) 320 if (!attr)
321 return NULL; 321 return NULL;
322 322
323 sysfs_attr_init(&attr->attr.attr);
324
323 attr->var = str; 325 attr->var = str;
324 attr->attr.attr.name = name; 326 attr->attr.attr.name = name;
325 attr->attr.attr.mode = 0444; 327 attr->attr.attr.mode = 0444;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 4949ef0d9400..37f959bf392e 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
237 return elog; 237 return elog;
238} 238}
239 239
240static void elog_work_fn(struct work_struct *work) 240static irqreturn_t elog_event(int irq, void *data)
241{ 241{
242 __be64 size; 242 __be64 size;
243 __be64 id; 243 __be64 id;
@@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work)
251 rc = opal_get_elog_size(&id, &size, &type); 251 rc = opal_get_elog_size(&id, &size, &type);
252 if (rc != OPAL_SUCCESS) { 252 if (rc != OPAL_SUCCESS) {
253 pr_err("ELOG: OPAL log info read failed\n"); 253 pr_err("ELOG: OPAL log info read failed\n");
254 return; 254 return IRQ_HANDLED;
255 } 255 }
256 256
257 elog_size = be64_to_cpu(size); 257 elog_size = be64_to_cpu(size);
@@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work)
270 * entries. 270 * entries.
271 */ 271 */
272 if (kset_find_obj(elog_kset, name)) 272 if (kset_find_obj(elog_kset, name))
273 return; 273 return IRQ_HANDLED;
274 274
275 create_elog_obj(log_id, elog_size, elog_type); 275 create_elog_obj(log_id, elog_size, elog_type);
276}
277
278static DECLARE_WORK(elog_work, elog_work_fn);
279 276
280static irqreturn_t elog_event(int irq, void *data)
281{
282 schedule_work(&elog_work);
283 return IRQ_HANDLED; 277 return IRQ_HANDLED;
284} 278}
285 279
@@ -304,8 +298,8 @@ int __init opal_elog_init(void)
304 return irq; 298 return irq;
305 } 299 }
306 300
307 rc = request_irq(irq, elog_event, 301 rc = request_threaded_irq(irq, NULL, elog_event,
308 IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL); 302 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
309 if (rc) { 303 if (rc) {
310 pr_err("%s: Can't request OPAL event irq (%d)\n", 304 pr_err("%s: Can't request OPAL event irq (%d)\n",
311 __func__, rc); 305 __func__, rc);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 46cb3feb0a13..4ece8e40dd54 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file)
112static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) 112static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
113{ 113{
114 size_t addr, size; 114 size_t addr, size;
115 pgprot_t page_prot;
115 int rc; 116 int rc;
116 117
117 pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", 118 pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
@@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
125 if (!opal_prd_range_is_valid(addr, size)) 126 if (!opal_prd_range_is_valid(addr, size))
126 return -EINVAL; 127 return -EINVAL;
127 128
128 vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file, 129 page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
129 vma->vm_pgoff, 130 size, vma->vm_page_prot);
130 size, vma->vm_page_prot))
131 | _PAGE_SPECIAL);
132 131
133 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, 132 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
134 vma->vm_page_prot); 133 page_prot);
135 134
136 return rc; 135 return rc;
137} 136}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 2bc33674ebfc..87f9623ca805 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -18,6 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/semaphore.h> 19#include <linux/semaphore.h>
20#include <asm/msi_bitmap.h> 20#include <asm/msi_bitmap.h>
21#include <asm/ppc-pci.h>
21 22
22struct ppc4xx_hsta_msi { 23struct ppc4xx_hsta_msi {
23 struct device *dev; 24 struct device *dev;
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 88c7016492c4..97bbb6060b25 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -28,7 +28,7 @@
28#define _ST(p, inst, v) \ 28#define _ST(p, inst, v) \
29 ({ \ 29 ({ \
30 asm("1: " #inst " %0, %1;" \ 30 asm("1: " #inst " %0, %1;" \
31 ".pushsection .coldtext.memcpy,\"ax\";" \ 31 ".pushsection .coldtext,\"ax\";" \
32 "2: { move r0, %2; jrp lr };" \ 32 "2: { move r0, %2; jrp lr };" \
33 ".section __ex_table,\"a\";" \ 33 ".section __ex_table,\"a\";" \
34 ".align 8;" \ 34 ".align 8;" \
@@ -41,7 +41,7 @@
41 ({ \ 41 ({ \
42 unsigned long __v; \ 42 unsigned long __v; \
43 asm("1: " #inst " %0, %1;" \ 43 asm("1: " #inst " %0, %1;" \
44 ".pushsection .coldtext.memcpy,\"ax\";" \ 44 ".pushsection .coldtext,\"ax\";" \
45 "2: { move r0, %2; jrp lr };" \ 45 "2: { move r0, %2; jrp lr };" \
46 ".section __ex_table,\"a\";" \ 46 ".section __ex_table,\"a\";" \
47 ".align 8;" \ 47 ".align 8;" \
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 55bced17dc95..3dbb7e7909ca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -254,6 +254,11 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
254config ARCH_SUPPORTS_DEBUG_PAGEALLOC 254config ARCH_SUPPORTS_DEBUG_PAGEALLOC
255 def_bool y 255 def_bool y
256 256
257config KASAN_SHADOW_OFFSET
258 hex
259 depends on KASAN
260 default 0xdffffc0000000000
261
257config HAVE_INTEL_TXT 262config HAVE_INTEL_TXT
258 def_bool y 263 def_bool y
259 depends on INTEL_IOMMU && ACPI 264 depends on INTEL_IOMMU && ACPI
@@ -2015,7 +2020,7 @@ config CMDLINE_BOOL
2015 2020
2016 To compile command line arguments into the kernel, 2021 To compile command line arguments into the kernel,
2017 set this option to 'Y', then fill in the 2022 set this option to 'Y', then fill in the
2018 the boot arguments in CONFIG_CMDLINE. 2023 boot arguments in CONFIG_CMDLINE.
2019 2024
2020 Systems with fully functional boot loaders (i.e. non-embedded) 2025 Systems with fully functional boot loaders (i.e. non-embedded)
2021 should leave this option set to 'N'. 2026 should leave this option set to 'N'.
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
index 99efebb2f69d..ca3ce9ab9385 100644
--- a/arch/x86/include/asm/espfix.h
+++ b/arch/x86/include/asm/espfix.h
@@ -9,7 +9,7 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
9DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); 9DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
10 10
11extern void init_espfix_bsp(void); 11extern void init_espfix_bsp(void);
12extern void init_espfix_ap(void); 12extern void init_espfix_ap(int cpu);
13 13
14#endif /* CONFIG_X86_64 */ 14#endif /* CONFIG_X86_64 */
15 15
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422fbad8..74a2a8dc9908 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17extern pte_t kasan_zero_pte[];
18extern pte_t kasan_zero_pmd[];
19extern pte_t kasan_zero_pud[];
20
21#ifdef CONFIG_KASAN 17#ifdef CONFIG_KASAN
22void __init kasan_map_early_shadow(pgd_t *pgd); 18void __init kasan_early_init(void);
23void __init kasan_init(void); 19void __init kasan_init(void);
24#else 20#else
25static inline void kasan_map_early_shadow(pgd_t *pgd) { } 21static inline void kasan_early_init(void) { }
26static inline void kasan_init(void) { } 22static inline void kasan_init(void) { }
27#endif 23#endif
28 24
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 28eba2d38b15..f813261d9740 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -409,12 +409,6 @@ static void __setup_vector_irq(int cpu)
409 int irq, vector; 409 int irq, vector;
410 struct apic_chip_data *data; 410 struct apic_chip_data *data;
411 411
412 /*
413 * vector_lock will make sure that we don't run into irq vector
414 * assignments that might be happening on another cpu in parallel,
415 * while we setup our initial vector to irq mappings.
416 */
417 raw_spin_lock(&vector_lock);
418 /* Mark the inuse vectors */ 412 /* Mark the inuse vectors */
419 for_each_active_irq(irq) { 413 for_each_active_irq(irq) {
420 data = apic_chip_data(irq_get_irq_data(irq)); 414 data = apic_chip_data(irq_get_irq_data(irq));
@@ -436,16 +430,16 @@ static void __setup_vector_irq(int cpu)
436 if (!cpumask_test_cpu(cpu, data->domain)) 430 if (!cpumask_test_cpu(cpu, data->domain))
437 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; 431 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
438 } 432 }
439 raw_spin_unlock(&vector_lock);
440} 433}
441 434
442/* 435/*
443 * Setup the vector to irq mappings. 436 * Setup the vector to irq mappings. Must be called with vector_lock held.
444 */ 437 */
445void setup_vector_irq(int cpu) 438void setup_vector_irq(int cpu)
446{ 439{
447 int irq; 440 int irq;
448 441
442 lockdep_assert_held(&vector_lock);
449 /* 443 /*
450 * On most of the platforms, legacy PIC delivers the interrupts on the 444 * On most of the platforms, legacy PIC delivers the interrupts on the
451 * boot cpu. But there are certain platforms where PIC interrupts are 445 * boot cpu. But there are certain platforms where PIC interrupts are
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 89427d8d4fc5..eec40f595ab9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -175,7 +175,9 @@ static __init void early_serial_init(char *s)
175 } 175 }
176 176
177 if (*s) { 177 if (*s) {
178 if (kstrtoul(s, 0, &baud) < 0 || baud == 0) 178 baud = simple_strtoull(s, &e, 0);
179
180 if (baud == 0 || s == e)
179 baud = DEFAULT_BAUD; 181 baud = DEFAULT_BAUD;
180 } 182 }
181 183
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index f5d0730e7b08..ce95676abd60 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -131,25 +131,24 @@ void __init init_espfix_bsp(void)
131 init_espfix_random(); 131 init_espfix_random();
132 132
133 /* The rest is the same as for any other processor */ 133 /* The rest is the same as for any other processor */
134 init_espfix_ap(); 134 init_espfix_ap(0);
135} 135}
136 136
137void init_espfix_ap(void) 137void init_espfix_ap(int cpu)
138{ 138{
139 unsigned int cpu, page; 139 unsigned int page;
140 unsigned long addr; 140 unsigned long addr;
141 pud_t pud, *pud_p; 141 pud_t pud, *pud_p;
142 pmd_t pmd, *pmd_p; 142 pmd_t pmd, *pmd_p;
143 pte_t pte, *pte_p; 143 pte_t pte, *pte_p;
144 int n; 144 int n, node;
145 void *stack_page; 145 void *stack_page;
146 pteval_t ptemask; 146 pteval_t ptemask;
147 147
148 /* We only have to do this once... */ 148 /* We only have to do this once... */
149 if (likely(this_cpu_read(espfix_stack))) 149 if (likely(per_cpu(espfix_stack, cpu)))
150 return; /* Already initialized */ 150 return; /* Already initialized */
151 151
152 cpu = smp_processor_id();
153 addr = espfix_base_addr(cpu); 152 addr = espfix_base_addr(cpu);
154 page = cpu/ESPFIX_STACKS_PER_PAGE; 153 page = cpu/ESPFIX_STACKS_PER_PAGE;
155 154
@@ -165,12 +164,15 @@ void init_espfix_ap(void)
165 if (stack_page) 164 if (stack_page)
166 goto unlock_done; 165 goto unlock_done;
167 166
167 node = cpu_to_node(cpu);
168 ptemask = __supported_pte_mask; 168 ptemask = __supported_pte_mask;
169 169
170 pud_p = &espfix_pud_page[pud_index(addr)]; 170 pud_p = &espfix_pud_page[pud_index(addr)];
171 pud = *pud_p; 171 pud = *pud_p;
172 if (!pud_present(pud)) { 172 if (!pud_present(pud)) {
173 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); 173 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
174
175 pmd_p = (pmd_t *)page_address(page);
174 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); 176 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
175 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); 177 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
176 for (n = 0; n < ESPFIX_PUD_CLONES; n++) 178 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
@@ -180,7 +182,9 @@ void init_espfix_ap(void)
180 pmd_p = pmd_offset(&pud, addr); 182 pmd_p = pmd_offset(&pud, addr);
181 pmd = *pmd_p; 183 pmd = *pmd_p;
182 if (!pmd_present(pmd)) { 184 if (!pmd_present(pmd)) {
183 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); 185 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
186
187 pte_p = (pte_t *)page_address(page);
184 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); 188 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
185 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); 189 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
186 for (n = 0; n < ESPFIX_PMD_CLONES; n++) 190 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
@@ -188,7 +192,7 @@ void init_espfix_ap(void)
188 } 192 }
189 193
190 pte_p = pte_offset_kernel(&pmd, addr); 194 pte_p = pte_offset_kernel(&pmd, addr);
191 stack_page = (void *)__get_free_page(GFP_KERNEL); 195 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
192 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 196 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
193 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 197 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
194 set_pte(&pte_p[n*PTE_STRIDE], pte); 198 set_pte(&pte_p[n*PTE_STRIDE], pte);
@@ -199,7 +203,7 @@ void init_espfix_ap(void)
199unlock_done: 203unlock_done:
200 mutex_unlock(&espfix_init_mutex); 204 mutex_unlock(&espfix_init_mutex);
201done: 205done:
202 this_cpu_write(espfix_stack, addr); 206 per_cpu(espfix_stack, cpu) = addr;
203 this_cpu_write(espfix_waddr, (unsigned long)stack_page 207 per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
204 + (addr & ~PAGE_MASK)); 208 + (addr & ~PAGE_MASK);
205} 209}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5a4668136e98..f129a9af6357 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
161 /* Kill off the identity-map trampoline */ 161 /* Kill off the identity-map trampoline */
162 reset_early_page_tables(); 162 reset_early_page_tables();
163 163
164 kasan_map_early_shadow(early_level4_pgt);
165
166 /* clear bss before set_intr_gate with early_idt_handler */
167 clear_bss(); 164 clear_bss();
168 165
166 clear_page(init_level4_pgt);
167
168 kasan_early_init();
169
169 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 170 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
170 set_intr_gate(i, early_idt_handler_array[i]); 171 set_intr_gate(i, early_idt_handler_array[i]);
171 load_idt((const struct desc_ptr *)&idt_descr); 172 load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
177 */ 178 */
178 load_ucode_bsp(); 179 load_ucode_bsp();
179 180
180 clear_page(init_level4_pgt);
181 /* set init_level4_pgt kernel high mapping*/ 181 /* set init_level4_pgt kernel high mapping*/
182 init_level4_pgt[511] = early_level4_pgt[511]; 182 init_level4_pgt[511] = early_level4_pgt[511];
183 183
184 kasan_map_early_shadow(init_level4_pgt);
185
186 x86_64_start_reservations(real_mode_data); 184 x86_64_start_reservations(real_mode_data);
187} 185}
188 186
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e5c27f729a38..1d40ca8a73f2 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@ ENTRY(phys_base)
516 /* This must match the first entry in level2_kernel_pgt */ 516 /* This must match the first entry in level2_kernel_pgt */
517 .quad 0x0000000000000000 517 .quad 0x0000000000000000
518 518
519#ifdef CONFIG_KASAN
520#define FILL(VAL, COUNT) \
521 .rept (COUNT) ; \
522 .quad (VAL) ; \
523 .endr
524
525NEXT_PAGE(kasan_zero_pte)
526 FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
527NEXT_PAGE(kasan_zero_pmd)
528 FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
529NEXT_PAGE(kasan_zero_pud)
530 FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
531
532#undef FILL
533#endif
534
535
536#include "../../x86/xen/xen-head.S" 519#include "../../x86/xen/xen-head.S"
537 520
538 __PAGE_ALIGNED_BSS 521 __PAGE_ALIGNED_BSS
539NEXT_PAGE(empty_zero_page) 522NEXT_PAGE(empty_zero_page)
540 .skip PAGE_SIZE 523 .skip PAGE_SIZE
541 524
542#ifdef CONFIG_KASAN
543/*
544 * This page used as early shadow. We don't use empty_zero_page
545 * at early stages, stack instrumentation could write some garbage
546 * to this page.
547 * Latter we reuse it as zero shadow for large ranges of memory
548 * that allowed to access, but not instrumented by kasan
549 * (vmalloc/vmemmap ...).
550 */
551NEXT_PAGE(kasan_zero_page)
552 .skip PAGE_SIZE
553#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 88b366487b0e..c7dfe1be784e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void)
347 if (!desc) 347 if (!desc)
348 continue; 348 continue;
349 349
350 /*
351 * Protect against concurrent action removal,
352 * affinity changes etc.
353 */
354 raw_spin_lock(&desc->lock);
350 data = irq_desc_get_irq_data(desc); 355 data = irq_desc_get_irq_data(desc);
351 cpumask_copy(&affinity_new, data->affinity); 356 cpumask_copy(&affinity_new, data->affinity);
352 cpumask_clear_cpu(this_cpu, &affinity_new); 357 cpumask_clear_cpu(this_cpu, &affinity_new);
353 358
354 /* Do not count inactive or per-cpu irqs. */ 359 /* Do not count inactive or per-cpu irqs. */
355 if (!irq_has_action(irq) || irqd_is_per_cpu(data)) 360 if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
361 raw_spin_unlock(&desc->lock);
356 continue; 362 continue;
363 }
357 364
365 raw_spin_unlock(&desc->lock);
358 /* 366 /*
359 * A single irq may be mapped to multiple 367 * A single irq may be mapped to multiple
360 * cpu's vector_irq[] (for example IOAPIC cluster 368 * cpu's vector_irq[] (for example IOAPIC cluster
@@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void)
385 * vector. If the vector is marked in the used vectors 393 * vector. If the vector is marked in the used vectors
386 * bitmap or an irq is assigned to it, we don't count 394 * bitmap or an irq is assigned to it, we don't count
387 * it as available. 395 * it as available.
396 *
397 * As this is an inaccurate snapshot anyway, we can do
398 * this w/o holding vector_lock.
388 */ 399 */
389 for (vector = FIRST_EXTERNAL_VECTOR; 400 for (vector = FIRST_EXTERNAL_VECTOR;
390 vector < first_system_vector; vector++) { 401 vector < first_system_vector; vector++) {
@@ -486,6 +497,11 @@ void fixup_irqs(void)
486 */ 497 */
487 mdelay(1); 498 mdelay(1);
488 499
500 /*
501 * We can walk the vector array of this cpu without holding
502 * vector_lock because the cpu is already marked !online, so
503 * nothing else will touch it.
504 */
489 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 505 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
490 unsigned int irr; 506 unsigned int irr;
491 507
@@ -497,9 +513,9 @@ void fixup_irqs(void)
497 irq = __this_cpu_read(vector_irq[vector]); 513 irq = __this_cpu_read(vector_irq[vector]);
498 514
499 desc = irq_to_desc(irq); 515 desc = irq_to_desc(irq);
516 raw_spin_lock(&desc->lock);
500 data = irq_desc_get_irq_data(desc); 517 data = irq_desc_get_irq_data(desc);
501 chip = irq_data_get_irq_chip(data); 518 chip = irq_data_get_irq_chip(data);
502 raw_spin_lock(&desc->lock);
503 if (chip->irq_retrigger) { 519 if (chip->irq_retrigger) {
504 chip->irq_retrigger(data); 520 chip->irq_retrigger(data);
505 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); 521 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8add66b22f33..d3010aa79daf 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -171,11 +171,6 @@ static void smp_callin(void)
171 apic_ap_setup(); 171 apic_ap_setup();
172 172
173 /* 173 /*
174 * Need to setup vector mappings before we enable interrupts.
175 */
176 setup_vector_irq(smp_processor_id());
177
178 /*
179 * Save our processor parameters. Note: this information 174 * Save our processor parameters. Note: this information
180 * is needed for clock calibration. 175 * is needed for clock calibration.
181 */ 176 */
@@ -239,18 +234,13 @@ static void notrace start_secondary(void *unused)
239 check_tsc_sync_target(); 234 check_tsc_sync_target();
240 235
241 /* 236 /*
242 * Enable the espfix hack for this CPU 237 * Lock vector_lock and initialize the vectors on this cpu
243 */ 238 * before setting the cpu online. We must set it online with
244#ifdef CONFIG_X86_ESPFIX64 239 * vector_lock held to prevent a concurrent setup/teardown
245 init_espfix_ap(); 240 * from seeing a half valid vector space.
246#endif
247
248 /*
249 * We need to hold vector_lock so there the set of online cpus
250 * does not change while we are assigning vectors to cpus. Holding
251 * this lock ensures we don't half assign or remove an irq from a cpu.
252 */ 241 */
253 lock_vector_lock(); 242 lock_vector_lock();
243 setup_vector_irq(smp_processor_id());
254 set_cpu_online(smp_processor_id(), true); 244 set_cpu_online(smp_processor_id(), true);
255 unlock_vector_lock(); 245 unlock_vector_lock();
256 cpu_set_state_online(smp_processor_id()); 246 cpu_set_state_online(smp_processor_id());
@@ -854,6 +844,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
854 initial_code = (unsigned long)start_secondary; 844 initial_code = (unsigned long)start_secondary;
855 stack_start = idle->thread.sp; 845 stack_start = idle->thread.sp;
856 846
847 /*
848 * Enable the espfix hack for this CPU
849 */
850#ifdef CONFIG_X86_ESPFIX64
851 init_espfix_ap(cpu);
852#endif
853
857 /* So we see what's up */ 854 /* So we see what's up */
858 announce_cpu(cpu, apicid); 855 announce_cpu(cpu, apicid);
859 856
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 505449700e0c..7437b41f6a47 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -598,10 +598,19 @@ static unsigned long quick_pit_calibrate(void)
598 if (!pit_expect_msb(0xff-i, &delta, &d2)) 598 if (!pit_expect_msb(0xff-i, &delta, &d2))
599 break; 599 break;
600 600
601 delta -= tsc;
602
603 /*
604 * Extrapolate the error and fail fast if the error will
605 * never be below 500 ppm.
606 */
607 if (i == 1 &&
608 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
609 return 0;
610
601 /* 611 /*
602 * Iterate until the error is less than 500 ppm 612 * Iterate until the error is less than 500 ppm
603 */ 613 */
604 delta -= tsc;
605 if (d1+d2 >= delta >> 11) 614 if (d1+d2 >= delta >> 11)
606 continue; 615 continue;
607 616
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ddf9ecb53cc3..e342586db6e4 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
20 unsigned long ret; 20 unsigned long ret;
21 21
22 if (__range_not_ok(from, n, TASK_SIZE)) 22 if (__range_not_ok(from, n, TASK_SIZE))
23 return 0; 23 return n;
24 24
25 /* 25 /*
26 * Even though this function is typically called from NMI/IRQ context 26 * Even though this function is typically called from NMI/IRQ context
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906c6b9f..e1840f3db5b5 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) "kasan: " fmt
1#include <linux/bootmem.h> 2#include <linux/bootmem.h>
2#include <linux/kasan.h> 3#include <linux/kasan.h>
3#include <linux/kdebug.h> 4#include <linux/kdebug.h>
@@ -11,7 +12,19 @@
11extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 12extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX]; 13extern struct range pfn_mapped[E820_X_MAX];
13 14
14extern unsigned char kasan_zero_page[PAGE_SIZE]; 15static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
16static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
17static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
18
19/*
20 * This page used as early shadow. We don't use empty_zero_page
21 * at early stages, stack instrumentation could write some garbage
22 * to this page.
23 * Latter we reuse it as zero shadow for large ranges of memory
24 * that allowed to access, but not instrumented by kasan
25 * (vmalloc/vmemmap ...).
26 */
27static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
15 28
16static int __init map_range(struct range *range) 29static int __init map_range(struct range *range)
17{ 30{
@@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start,
36 pgd_clear(pgd_offset_k(start)); 49 pgd_clear(pgd_offset_k(start));
37} 50}
38 51
39void __init kasan_map_early_shadow(pgd_t *pgd) 52static void __init kasan_map_early_shadow(pgd_t *pgd)
40{ 53{
41 int i; 54 int i;
42 unsigned long start = KASAN_SHADOW_START; 55 unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
73 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { 86 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
74 WARN_ON(!pmd_none(*pmd)); 87 WARN_ON(!pmd_none(*pmd));
75 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) 88 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
76 | __PAGE_KERNEL_RO)); 89 | _KERNPG_TABLE));
77 addr += PMD_SIZE; 90 addr += PMD_SIZE;
78 pmd = pmd_offset(pud, addr); 91 pmd = pmd_offset(pud, addr);
79 } 92 }
@@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
99 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { 112 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
100 WARN_ON(!pud_none(*pud)); 113 WARN_ON(!pud_none(*pud));
101 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) 114 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
102 | __PAGE_KERNEL_RO)); 115 | _KERNPG_TABLE));
103 addr += PUD_SIZE; 116 addr += PUD_SIZE;
104 pud = pud_offset(pgd, addr); 117 pud = pud_offset(pgd, addr);
105 } 118 }
@@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
124 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { 137 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
125 WARN_ON(!pgd_none(*pgd)); 138 WARN_ON(!pgd_none(*pgd));
126 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) 139 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
127 | __PAGE_KERNEL_RO)); 140 | _KERNPG_TABLE));
128 addr += PGDIR_SIZE; 141 addr += PGDIR_SIZE;
129 pgd = pgd_offset_k(addr); 142 pgd = pgd_offset_k(addr);
130 } 143 }
@@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = {
166}; 179};
167#endif 180#endif
168 181
182void __init kasan_early_init(void)
183{
184 int i;
185 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
186 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
187 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
188
189 for (i = 0; i < PTRS_PER_PTE; i++)
190 kasan_zero_pte[i] = __pte(pte_val);
191
192 for (i = 0; i < PTRS_PER_PMD; i++)
193 kasan_zero_pmd[i] = __pmd(pmd_val);
194
195 for (i = 0; i < PTRS_PER_PUD; i++)
196 kasan_zero_pud[i] = __pud(pud_val);
197
198 kasan_map_early_shadow(early_level4_pgt);
199 kasan_map_early_shadow(init_level4_pgt);
200}
201
169void __init kasan_init(void) 202void __init kasan_init(void)
170{ 203{
171 int i; 204 int i;
@@ -176,6 +209,7 @@ void __init kasan_init(void)
176 209
177 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); 210 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
178 load_cr3(early_level4_pgt); 211 load_cr3(early_level4_pgt);
212 __flush_tlb_all();
179 213
180 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 214 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
181 215
@@ -202,5 +236,8 @@ void __init kasan_init(void)
202 memset(kasan_zero_page, 0, PAGE_SIZE); 236 memset(kasan_zero_page, 0, PAGE_SIZE);
203 237
204 load_cr3(init_level4_pgt); 238 load_cr3(init_level4_pgt);
239 __flush_tlb_all();
205 init_task.kasan_depth = 0; 240 init_task.kasan_depth = 0;
241
242 pr_info("Kernel address sanitizer initialized\n");
206} 243}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 569ee090343f..46b58abb08c5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
352 pdata->mmio_size = resource_size(rentry->res); 352 pdata->mmio_size = resource_size(rentry->res);
353 pdata->mmio_base = ioremap(rentry->res->start, 353 pdata->mmio_base = ioremap(rentry->res->start,
354 pdata->mmio_size); 354 pdata->mmio_size);
355 if (!pdata->mmio_base)
356 goto err_out;
357 break; 355 break;
358 } 356 }
359 357
360 acpi_dev_free_resource_list(&resource_list); 358 acpi_dev_free_resource_list(&resource_list);
361 359
360 if (!pdata->mmio_base) {
361 ret = -ENOMEM;
362 goto err_out;
363 }
364
362 pdata->dev_desc = dev_desc; 365 pdata->dev_desc = dev_desc;
363 366
364 if (dev_desc->setup) 367 if (dev_desc->setup)
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2161fa178c8d..628a42c41ab1 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/sort.h> 20#include <linux/sort.h>
21#include <linux/pmem.h>
21#include <linux/io.h> 22#include <linux/io.h>
22#include "nfit.h" 23#include "nfit.h"
23 24
@@ -305,6 +306,23 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
305 return true; 306 return true;
306} 307}
307 308
309static bool add_flush(struct acpi_nfit_desc *acpi_desc,
310 struct acpi_nfit_flush_address *flush)
311{
312 struct device *dev = acpi_desc->dev;
313 struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
314 GFP_KERNEL);
315
316 if (!nfit_flush)
317 return false;
318 INIT_LIST_HEAD(&nfit_flush->list);
319 nfit_flush->flush = flush;
320 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
321 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
322 flush->device_handle, flush->hint_count);
323 return true;
324}
325
308static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table, 326static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
309 const void *end) 327 const void *end)
310{ 328{
@@ -338,7 +356,8 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
338 return err; 356 return err;
339 break; 357 break;
340 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 358 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
341 dev_dbg(dev, "%s: flush\n", __func__); 359 if (!add_flush(acpi_desc, table))
360 return err;
342 break; 361 break;
343 case ACPI_NFIT_TYPE_SMBIOS: 362 case ACPI_NFIT_TYPE_SMBIOS:
344 dev_dbg(dev, "%s: smbios\n", __func__); 363 dev_dbg(dev, "%s: smbios\n", __func__);
@@ -389,6 +408,7 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
389{ 408{
390 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 409 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
391 struct nfit_memdev *nfit_memdev; 410 struct nfit_memdev *nfit_memdev;
411 struct nfit_flush *nfit_flush;
392 struct nfit_dcr *nfit_dcr; 412 struct nfit_dcr *nfit_dcr;
393 struct nfit_bdw *nfit_bdw; 413 struct nfit_bdw *nfit_bdw;
394 struct nfit_idt *nfit_idt; 414 struct nfit_idt *nfit_idt;
@@ -442,6 +462,14 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
442 nfit_mem->idt_bdw = nfit_idt->idt; 462 nfit_mem->idt_bdw = nfit_idt->idt;
443 break; 463 break;
444 } 464 }
465
466 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
467 if (nfit_flush->flush->device_handle !=
468 nfit_memdev->memdev->device_handle)
469 continue;
470 nfit_mem->nfit_flush = nfit_flush;
471 break;
472 }
445 break; 473 break;
446 } 474 }
447 475
@@ -978,6 +1006,24 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
978 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1006 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
979} 1007}
980 1008
1009static void wmb_blk(struct nfit_blk *nfit_blk)
1010{
1011
1012 if (nfit_blk->nvdimm_flush) {
1013 /*
1014 * The first wmb() is needed to 'sfence' all previous writes
1015 * such that they are architecturally visible for the platform
1016 * buffer flush. Note that we've already arranged for pmem
1017 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1018 * final wmb() ensures ordering for the NVDIMM flush write.
1019 */
1020 wmb();
1021 writeq(1, nfit_blk->nvdimm_flush);
1022 wmb();
1023 } else
1024 wmb_pmem();
1025}
1026
981static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1027static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
982{ 1028{
983 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1029 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
@@ -1012,7 +1058,10 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1012 offset = to_interleave_offset(offset, mmio); 1058 offset = to_interleave_offset(offset, mmio);
1013 1059
1014 writeq(cmd, mmio->base + offset); 1060 writeq(cmd, mmio->base + offset);
1015 /* FIXME: conditionally perform read-back if mandated by firmware */ 1061 wmb_blk(nfit_blk);
1062
1063 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
1064 readq(mmio->base + offset);
1016} 1065}
1017 1066
1018static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1067static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1026,7 +1075,6 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1026 1075
1027 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1076 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1028 + lane * mmio->size; 1077 + lane * mmio->size;
1029 /* TODO: non-temporal access, flush hints, cache management etc... */
1030 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1078 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1031 while (len) { 1079 while (len) {
1032 unsigned int c; 1080 unsigned int c;
@@ -1045,13 +1093,19 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1045 } 1093 }
1046 1094
1047 if (rw) 1095 if (rw)
1048 memcpy(mmio->aperture + offset, iobuf + copied, c); 1096 memcpy_to_pmem(mmio->aperture + offset,
1097 iobuf + copied, c);
1049 else 1098 else
1050 memcpy(iobuf + copied, mmio->aperture + offset, c); 1099 memcpy_from_pmem(iobuf + copied,
1100 mmio->aperture + offset, c);
1051 1101
1052 copied += c; 1102 copied += c;
1053 len -= c; 1103 len -= c;
1054 } 1104 }
1105
1106 if (rw)
1107 wmb_blk(nfit_blk);
1108
1055 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1109 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1056 return rc; 1110 return rc;
1057} 1111}
@@ -1124,7 +1178,7 @@ static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1124} 1178}
1125 1179
1126static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1180static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1127 struct acpi_nfit_system_address *spa) 1181 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1128{ 1182{
1129 resource_size_t start = spa->address; 1183 resource_size_t start = spa->address;
1130 resource_size_t n = spa->length; 1184 resource_size_t n = spa->length;
@@ -1152,8 +1206,15 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1152 if (!res) 1206 if (!res)
1153 goto err_mem; 1207 goto err_mem;
1154 1208
1155 /* TODO: cacheability based on the spa type */ 1209 if (type == SPA_MAP_APERTURE) {
1156 spa_map->iomem = ioremap_nocache(start, n); 1210 /*
1211 * TODO: memremap_pmem() support, but that requires cache
1212 * flushing when the aperture is moved.
1213 */
1214 spa_map->iomem = ioremap_wc(start, n);
1215 } else
1216 spa_map->iomem = ioremap_nocache(start, n);
1217
1157 if (!spa_map->iomem) 1218 if (!spa_map->iomem)
1158 goto err_map; 1219 goto err_map;
1159 1220
@@ -1171,6 +1232,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1171 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges 1232 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1172 * @nvdimm_bus: NFIT-bus that provided the spa table entry 1233 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1173 * @nfit_spa: spa table to map 1234 * @nfit_spa: spa table to map
1235 * @type: aperture or control region
1174 * 1236 *
1175 * In the case where block-data-window apertures and 1237 * In the case where block-data-window apertures and
1176 * dimm-control-regions are interleaved they will end up sharing a 1238 * dimm-control-regions are interleaved they will end up sharing a
@@ -1180,12 +1242,12 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1180 * unbound. 1242 * unbound.
1181 */ 1243 */
1182static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1244static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1183 struct acpi_nfit_system_address *spa) 1245 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1184{ 1246{
1185 void __iomem *iomem; 1247 void __iomem *iomem;
1186 1248
1187 mutex_lock(&acpi_desc->spa_map_mutex); 1249 mutex_lock(&acpi_desc->spa_map_mutex);
1188 iomem = __nfit_spa_map(acpi_desc, spa); 1250 iomem = __nfit_spa_map(acpi_desc, spa, type);
1189 mutex_unlock(&acpi_desc->spa_map_mutex); 1251 mutex_unlock(&acpi_desc->spa_map_mutex);
1190 1252
1191 return iomem; 1253 return iomem;
@@ -1206,12 +1268,35 @@ static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1206 return 0; 1268 return 0;
1207} 1269}
1208 1270
1271static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1272 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1273{
1274 struct nd_cmd_dimm_flags flags;
1275 int rc;
1276
1277 memset(&flags, 0, sizeof(flags));
1278 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1279 sizeof(flags));
1280
1281 if (rc >= 0 && flags.status == 0)
1282 nfit_blk->dimm_flags = flags.flags;
1283 else if (rc == -ENOTTY) {
1284 /* fall back to a conservative default */
1285 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
1286 rc = 0;
1287 } else
1288 rc = -ENXIO;
1289
1290 return rc;
1291}
1292
1209static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1293static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1210 struct device *dev) 1294 struct device *dev)
1211{ 1295{
1212 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1296 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1213 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1297 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1214 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1298 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1299 struct nfit_flush *nfit_flush;
1215 struct nfit_blk_mmio *mmio; 1300 struct nfit_blk_mmio *mmio;
1216 struct nfit_blk *nfit_blk; 1301 struct nfit_blk *nfit_blk;
1217 struct nfit_mem *nfit_mem; 1302 struct nfit_mem *nfit_mem;
@@ -1223,8 +1308,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1223 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1308 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1224 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1309 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1225 nfit_mem ? "" : " nfit_mem", 1310 nfit_mem ? "" : " nfit_mem",
1226 nfit_mem->dcr ? "" : " dcr", 1311 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1227 nfit_mem->bdw ? "" : " bdw"); 1312 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
1228 return -ENXIO; 1313 return -ENXIO;
1229 } 1314 }
1230 1315
@@ -1237,7 +1322,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1237 /* map block aperture memory */ 1322 /* map block aperture memory */
1238 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1323 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1239 mmio = &nfit_blk->mmio[BDW]; 1324 mmio = &nfit_blk->mmio[BDW];
1240 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw); 1325 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
1326 SPA_MAP_APERTURE);
1241 if (!mmio->base) { 1327 if (!mmio->base) {
1242 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1328 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1243 nvdimm_name(nvdimm)); 1329 nvdimm_name(nvdimm));
@@ -1259,7 +1345,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1259 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1345 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1260 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1346 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1261 mmio = &nfit_blk->mmio[DCR]; 1347 mmio = &nfit_blk->mmio[DCR];
1262 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr); 1348 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
1349 SPA_MAP_CONTROL);
1263 if (!mmio->base) { 1350 if (!mmio->base) {
1264 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1351 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1265 nvdimm_name(nvdimm)); 1352 nvdimm_name(nvdimm));
@@ -1277,6 +1364,24 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1277 return rc; 1364 return rc;
1278 } 1365 }
1279 1366
1367 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1368 if (rc < 0) {
1369 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1370 __func__, nvdimm_name(nvdimm));
1371 return rc;
1372 }
1373
1374 nfit_flush = nfit_mem->nfit_flush;
1375 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1376 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1377 nfit_flush->flush->hint_address[0], 8);
1378 if (!nfit_blk->nvdimm_flush)
1379 return -ENOMEM;
1380 }
1381
1382 if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
1383 dev_warn(dev, "unable to guarantee persistence of writes\n");
1384
1280 if (mmio->line_size == 0) 1385 if (mmio->line_size == 0)
1281 return 0; 1386 return 0;
1282 1387
@@ -1459,6 +1564,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1459 INIT_LIST_HEAD(&acpi_desc->dcrs); 1564 INIT_LIST_HEAD(&acpi_desc->dcrs);
1460 INIT_LIST_HEAD(&acpi_desc->bdws); 1565 INIT_LIST_HEAD(&acpi_desc->bdws);
1461 INIT_LIST_HEAD(&acpi_desc->idts); 1566 INIT_LIST_HEAD(&acpi_desc->idts);
1567 INIT_LIST_HEAD(&acpi_desc->flushes);
1462 INIT_LIST_HEAD(&acpi_desc->memdevs); 1568 INIT_LIST_HEAD(&acpi_desc->memdevs);
1463 INIT_LIST_HEAD(&acpi_desc->dimms); 1569 INIT_LIST_HEAD(&acpi_desc->dimms);
1464 mutex_init(&acpi_desc->spa_map_mutex); 1570 mutex_init(&acpi_desc->spa_map_mutex);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 81f2e8c5a79c..79b6d83875c1 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -40,6 +40,10 @@ enum nfit_uuids {
40 NFIT_UUID_MAX, 40 NFIT_UUID_MAX,
41}; 41};
42 42
43enum {
44 ND_BLK_DCR_LATCH = 2,
45};
46
43struct nfit_spa { 47struct nfit_spa {
44 struct acpi_nfit_system_address *spa; 48 struct acpi_nfit_system_address *spa;
45 struct list_head list; 49 struct list_head list;
@@ -60,6 +64,11 @@ struct nfit_idt {
60 struct list_head list; 64 struct list_head list;
61}; 65};
62 66
67struct nfit_flush {
68 struct acpi_nfit_flush_address *flush;
69 struct list_head list;
70};
71
63struct nfit_memdev { 72struct nfit_memdev {
64 struct acpi_nfit_memory_map *memdev; 73 struct acpi_nfit_memory_map *memdev;
65 struct list_head list; 74 struct list_head list;
@@ -77,6 +86,7 @@ struct nfit_mem {
77 struct acpi_nfit_system_address *spa_bdw; 86 struct acpi_nfit_system_address *spa_bdw;
78 struct acpi_nfit_interleave *idt_dcr; 87 struct acpi_nfit_interleave *idt_dcr;
79 struct acpi_nfit_interleave *idt_bdw; 88 struct acpi_nfit_interleave *idt_bdw;
89 struct nfit_flush *nfit_flush;
80 struct list_head list; 90 struct list_head list;
81 struct acpi_device *adev; 91 struct acpi_device *adev;
82 unsigned long dsm_mask; 92 unsigned long dsm_mask;
@@ -88,6 +98,7 @@ struct acpi_nfit_desc {
88 struct mutex spa_map_mutex; 98 struct mutex spa_map_mutex;
89 struct list_head spa_maps; 99 struct list_head spa_maps;
90 struct list_head memdevs; 100 struct list_head memdevs;
101 struct list_head flushes;
91 struct list_head dimms; 102 struct list_head dimms;
92 struct list_head spas; 103 struct list_head spas;
93 struct list_head dcrs; 104 struct list_head dcrs;
@@ -109,7 +120,7 @@ struct nfit_blk {
109 struct nfit_blk_mmio { 120 struct nfit_blk_mmio {
110 union { 121 union {
111 void __iomem *base; 122 void __iomem *base;
112 void *aperture; 123 void __pmem *aperture;
113 }; 124 };
114 u64 size; 125 u64 size;
115 u64 base_offset; 126 u64 base_offset;
@@ -123,6 +134,13 @@ struct nfit_blk {
123 u64 bdw_offset; /* post interleave offset */ 134 u64 bdw_offset; /* post interleave offset */
124 u64 stat_offset; 135 u64 stat_offset;
125 u64 cmd_offset; 136 u64 cmd_offset;
137 void __iomem *nvdimm_flush;
138 u32 dimm_flags;
139};
140
141enum spa_map_type {
142 SPA_MAP_CONTROL,
143 SPA_MAP_APERTURE,
126}; 144};
127 145
128struct nfit_spa_mapping { 146struct nfit_spa_mapping {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c262e4acd68d..3b8963f21b36 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
175 if (!addr || !length) 175 if (!addr || !length)
176 return; 176 return;
177 177
178 acpi_reserve_region(addr, length, gas->space_id, 0, desc); 178 /* Resources are never freed */
179 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
180 request_region(addr, length, desc);
181 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
182 request_mem_region(addr, length, desc);
179} 183}
180 184
181static void __init acpi_reserve_resources(void) 185static int __init acpi_reserve_resources(void)
182{ 186{
183 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 187 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
184 "ACPI PM1a_EVT_BLK"); 188 "ACPI PM1a_EVT_BLK");
@@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
207 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 211 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
208 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 212 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
209 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 213 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
214
215 return 0;
210} 216}
217fs_initcall_sync(acpi_reserve_resources);
211 218
212void acpi_os_printf(const char *fmt, ...) 219void acpi_os_printf(const char *fmt, ...)
213{ 220{
@@ -1862,7 +1869,6 @@ acpi_status __init acpi_os_initialize(void)
1862 1869
1863acpi_status __init acpi_os_initialize1(void) 1870acpi_status __init acpi_os_initialize1(void)
1864{ 1871{
1865 acpi_reserve_resources();
1866 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1872 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1867 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1873 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1868 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1874 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 10561ce16ed1..8244f013f210 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/list.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31 30
32#ifdef CONFIG_X86 31#ifdef CONFIG_X86
@@ -622,164 +621,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
622 return (type & types) ? 0 : 1; 621 return (type & types) ? 0 : 1;
623} 622}
624EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); 623EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
625
626struct reserved_region {
627 struct list_head node;
628 u64 start;
629 u64 end;
630};
631
632static LIST_HEAD(reserved_io_regions);
633static LIST_HEAD(reserved_mem_regions);
634
635static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
636 char *desc)
637{
638 unsigned int length = end - start + 1;
639 struct resource *res;
640
641 res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
642 request_region(start, length, desc) :
643 request_mem_region(start, length, desc);
644 if (!res)
645 return -EIO;
646
647 res->flags &= ~flags;
648 return 0;
649}
650
651static int add_region_before(u64 start, u64 end, u8 space_id,
652 unsigned long flags, char *desc,
653 struct list_head *head)
654{
655 struct reserved_region *reg;
656 int error;
657
658 reg = kmalloc(sizeof(*reg), GFP_KERNEL);
659 if (!reg)
660 return -ENOMEM;
661
662 error = request_range(start, end, space_id, flags, desc);
663 if (error) {
664 kfree(reg);
665 return error;
666 }
667
668 reg->start = start;
669 reg->end = end;
670 list_add_tail(&reg->node, head);
671 return 0;
672}
673
674/**
675 * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
676 * @start: Starting address of the region.
677 * @length: Length of the region.
678 * @space_id: Identifier of address space to reserve the region from.
679 * @flags: Resource flags to clear for the region after requesting it.
680 * @desc: Region description (for messages).
681 *
682 * Reserve an I/O or memory region as a system resource to prevent others from
683 * using it. If the new region overlaps with one of the regions (in the given
684 * address space) already reserved by this routine, only the non-overlapping
685 * parts of it will be reserved.
686 *
687 * Returned is either 0 (success) or a negative error code indicating a resource
688 * reservation problem. It is the code of the first encountered error, but the
689 * routine doesn't abort until it has attempted to request all of the parts of
690 * the new region that don't overlap with other regions reserved previously.
691 *
692 * The resources requested by this routine are never released.
693 */
694int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
695 unsigned long flags, char *desc)
696{
697 struct list_head *regions;
698 struct reserved_region *reg;
699 u64 end = start + length - 1;
700 int ret = 0, error = 0;
701
702 if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
703 regions = &reserved_io_regions;
704 else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
705 regions = &reserved_mem_regions;
706 else
707 return -EINVAL;
708
709 if (list_empty(regions))
710 return add_region_before(start, end, space_id, flags, desc, regions);
711
712 list_for_each_entry(reg, regions, node)
713 if (reg->start == end + 1) {
714 /* The new region can be prepended to this one. */
715 ret = request_range(start, end, space_id, flags, desc);
716 if (!ret)
717 reg->start = start;
718
719 return ret;
720 } else if (reg->start > end) {
721 /* No overlap. Add the new region here and get out. */
722 return add_region_before(start, end, space_id, flags,
723 desc, &reg->node);
724 } else if (reg->end == start - 1) {
725 goto combine;
726 } else if (reg->end >= start) {
727 goto overlap;
728 }
729
730 /* The new region goes after the last existing one. */
731 return add_region_before(start, end, space_id, flags, desc, regions);
732
733 overlap:
734 /*
735 * The new region overlaps an existing one.
736 *
737 * The head part of the new region immediately preceding the existing
738 * overlapping one can be combined with it right away.
739 */
740 if (reg->start > start) {
741 error = request_range(start, reg->start - 1, space_id, flags, desc);
742 if (error)
743 ret = error;
744 else
745 reg->start = start;
746 }
747
748 combine:
749 /*
750 * The new region is adjacent to an existing one. If it extends beyond
751 * that region all the way to the next one, it is possible to combine
752 * all three of them.
753 */
754 while (reg->end < end) {
755 struct reserved_region *next = NULL;
756 u64 a = reg->end + 1, b = end;
757
758 if (!list_is_last(&reg->node, regions)) {
759 next = list_next_entry(reg, node);
760 if (next->start <= end)
761 b = next->start - 1;
762 }
763 error = request_range(a, b, space_id, flags, desc);
764 if (!error) {
765 if (next && next->start == b + 1) {
766 reg->end = next->end;
767 list_del(&next->node);
768 kfree(next);
769 } else {
770 reg->end = end;
771 break;
772 }
773 } else if (next) {
774 if (!ret)
775 ret = error;
776
777 reg = next;
778 } else {
779 break;
780 }
781 }
782
783 return ret ? ret : error;
784}
785EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2649a068671d..ec256352f423 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1019,6 +1019,29 @@ static bool acpi_of_match_device(struct acpi_device *adev,
1019 return false; 1019 return false;
1020} 1020}
1021 1021
1022static bool __acpi_match_device_cls(const struct acpi_device_id *id,
1023 struct acpi_hardware_id *hwid)
1024{
1025 int i, msk, byte_shift;
1026 char buf[3];
1027
1028 if (!id->cls)
1029 return false;
1030
1031 /* Apply class-code bitmask, before checking each class-code byte */
1032 for (i = 1; i <= 3; i++) {
1033 byte_shift = 8 * (3 - i);
1034 msk = (id->cls_msk >> byte_shift) & 0xFF;
1035 if (!msk)
1036 continue;
1037
1038 sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
1039 if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
1040 return false;
1041 }
1042 return true;
1043}
1044
1022static const struct acpi_device_id *__acpi_match_device( 1045static const struct acpi_device_id *__acpi_match_device(
1023 struct acpi_device *device, 1046 struct acpi_device *device,
1024 const struct acpi_device_id *ids, 1047 const struct acpi_device_id *ids,
@@ -1036,9 +1059,12 @@ static const struct acpi_device_id *__acpi_match_device(
1036 1059
1037 list_for_each_entry(hwid, &device->pnp.ids, list) { 1060 list_for_each_entry(hwid, &device->pnp.ids, list) {
1038 /* First, check the ACPI/PNP IDs provided by the caller. */ 1061 /* First, check the ACPI/PNP IDs provided by the caller. */
1039 for (id = ids; id->id[0]; id++) 1062 for (id = ids; id->id[0] || id->cls; id++) {
1040 if (!strcmp((char *) id->id, hwid->id)) 1063 if (id->id[0] && !strcmp((char *) id->id, hwid->id))
1041 return id; 1064 return id;
1065 else if (id->cls && __acpi_match_device_cls(id, hwid))
1066 return id;
1067 }
1042 1068
1043 /* 1069 /*
1044 * Next, check ACPI_DT_NAMESPACE_HID and try to match the 1070 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
@@ -2101,6 +2127,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
2101 if (info->valid & ACPI_VALID_UID) 2127 if (info->valid & ACPI_VALID_UID)
2102 pnp->unique_id = kstrdup(info->unique_id.string, 2128 pnp->unique_id = kstrdup(info->unique_id.string,
2103 GFP_KERNEL); 2129 GFP_KERNEL);
2130 if (info->valid & ACPI_VALID_CLS)
2131 acpi_add_id(pnp, info->class_code.string);
2104 2132
2105 kfree(info); 2133 kfree(info);
2106 2134
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6d17a3b65ef7..15e40ee62a94 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
48 48
49config ATA_ACPI 49config ATA_ACPI
50 bool "ATA ACPI Support" 50 bool "ATA ACPI Support"
51 depends on ACPI && PCI 51 depends on ACPI
52 default y 52 default y
53 help 53 help
54 This option adds support for ATA-related ACPI objects. 54 This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 614c78f510f0..1befb114c384 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -20,6 +20,8 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/libata.h> 21#include <linux/libata.h>
22#include <linux/ahci_platform.h> 22#include <linux/ahci_platform.h>
23#include <linux/acpi.h>
24#include <linux/pci_ids.h>
23#include "ahci.h" 25#include "ahci.h"
24 26
25#define DRV_NAME "ahci" 27#define DRV_NAME "ahci"
@@ -79,12 +81,19 @@ static const struct of_device_id ahci_of_match[] = {
79}; 81};
80MODULE_DEVICE_TABLE(of, ahci_of_match); 82MODULE_DEVICE_TABLE(of, ahci_of_match);
81 83
84static const struct acpi_device_id ahci_acpi_match[] = {
85 { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
86 {},
87};
88MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
89
82static struct platform_driver ahci_driver = { 90static struct platform_driver ahci_driver = {
83 .probe = ahci_probe, 91 .probe = ahci_probe,
84 .remove = ata_platform_remove_one, 92 .remove = ata_platform_remove_one,
85 .driver = { 93 .driver = {
86 .name = DRV_NAME, 94 .name = DRV_NAME,
87 .of_match_table = ahci_of_match, 95 .of_match_table = ahci_of_match,
96 .acpi_match_table = ahci_acpi_match,
88 .pm = &ahci_pm_ops, 97 .pm = &ahci_pm_ops,
89 }, 98 },
90}; 99};
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 9c4288362a8e..894bda114224 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -563,10 +563,8 @@ static void fw_dev_release(struct device *dev)
563 kfree(fw_priv); 563 kfree(fw_priv);
564} 564}
565 565
566static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 566static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
567{ 567{
568 struct firmware_priv *fw_priv = to_firmware_priv(dev);
569
570 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) 568 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
571 return -ENOMEM; 569 return -ENOMEM;
572 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) 570 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -577,6 +575,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
577 return 0; 575 return 0;
578} 576}
579 577
578static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
579{
580 struct firmware_priv *fw_priv = to_firmware_priv(dev);
581 int err = 0;
582
583 mutex_lock(&fw_lock);
584 if (fw_priv->buf)
585 err = do_firmware_uevent(fw_priv, env);
586 mutex_unlock(&fw_lock);
587 return err;
588}
589
580static struct class firmware_class = { 590static struct class firmware_class = {
581 .name = "firmware", 591 .name = "firmware",
582 .class_attrs = firmware_class_attrs, 592 .class_attrs = firmware_class_attrs,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cdd547bd67df..0ee43c1056e0 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,7 @@
6 * This file is released under the GPLv2. 6 * This file is released under the GPLv2.
7 */ 7 */
8 8
9#include <linux/delay.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
@@ -19,6 +20,8 @@
19#include <linux/suspend.h> 20#include <linux/suspend.h>
20#include <linux/export.h> 21#include <linux/export.h>
21 22
23#define GENPD_RETRY_MAX_MS 250 /* Approximate */
24
22#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 25#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
23({ \ 26({ \
24 type (*__routine)(struct device *__d); \ 27 type (*__routine)(struct device *__d); \
@@ -2131,6 +2134,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2131static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2134static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2132{ 2135{
2133 struct generic_pm_domain *pd; 2136 struct generic_pm_domain *pd;
2137 unsigned int i;
2134 int ret = 0; 2138 int ret = 0;
2135 2139
2136 pd = pm_genpd_lookup_dev(dev); 2140 pd = pm_genpd_lookup_dev(dev);
@@ -2139,10 +2143,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2139 2143
2140 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2144 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2141 2145
2142 while (1) { 2146 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2143 ret = pm_genpd_remove_device(pd, dev); 2147 ret = pm_genpd_remove_device(pd, dev);
2144 if (ret != -EAGAIN) 2148 if (ret != -EAGAIN)
2145 break; 2149 break;
2150
2151 mdelay(i);
2146 cond_resched(); 2152 cond_resched();
2147 } 2153 }
2148 2154
@@ -2183,6 +2189,7 @@ int genpd_dev_pm_attach(struct device *dev)
2183{ 2189{
2184 struct of_phandle_args pd_args; 2190 struct of_phandle_args pd_args;
2185 struct generic_pm_domain *pd; 2191 struct generic_pm_domain *pd;
2192 unsigned int i;
2186 int ret; 2193 int ret;
2187 2194
2188 if (!dev->of_node) 2195 if (!dev->of_node)
@@ -2218,10 +2225,12 @@ int genpd_dev_pm_attach(struct device *dev)
2218 2225
2219 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2226 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2220 2227
2221 while (1) { 2228 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2222 ret = pm_genpd_add_device(pd, dev); 2229 ret = pm_genpd_add_device(pd, dev);
2223 if (ret != -EAGAIN) 2230 if (ret != -EAGAIN)
2224 break; 2231 break;
2232
2233 mdelay(i);
2225 cond_resched(); 2234 cond_resched();
2226 } 2235 }
2227 2236
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 7470004ca810..eb6e67451dec 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -45,14 +45,12 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
45 return -EEXIST; 45 return -EEXIST;
46 } 46 }
47 47
48 dev->power.wakeirq = wirq;
49 spin_unlock_irqrestore(&dev->power.lock, flags);
50
51 err = device_wakeup_attach_irq(dev, wirq); 48 err = device_wakeup_attach_irq(dev, wirq);
52 if (err) 49 if (!err)
53 return err; 50 dev->power.wakeirq = wirq;
54 51
55 return 0; 52 spin_unlock_irqrestore(&dev->power.lock, flags);
53 return err;
56} 54}
57 55
58/** 56/**
@@ -105,10 +103,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
105 return; 103 return;
106 104
107 spin_lock_irqsave(&dev->power.lock, flags); 105 spin_lock_irqsave(&dev->power.lock, flags);
106 device_wakeup_detach_irq(dev);
108 dev->power.wakeirq = NULL; 107 dev->power.wakeirq = NULL;
109 spin_unlock_irqrestore(&dev->power.lock, flags); 108 spin_unlock_irqrestore(&dev->power.lock, flags);
110 109
111 device_wakeup_detach_irq(dev);
112 if (wirq->dedicated_irq) 110 if (wirq->dedicated_irq)
113 free_irq(wirq->irq, wirq); 111 free_irq(wirq->irq, wirq);
114 kfree(wirq); 112 kfree(wirq);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 40f71603378c..51f15bc15774 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -281,32 +281,25 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
281 * Attach a device wakeirq to the wakeup source so the device 281 * Attach a device wakeirq to the wakeup source so the device
282 * wake IRQ can be configured automatically for suspend and 282 * wake IRQ can be configured automatically for suspend and
283 * resume. 283 * resume.
284 *
285 * Call under the device's power.lock lock.
284 */ 286 */
285int device_wakeup_attach_irq(struct device *dev, 287int device_wakeup_attach_irq(struct device *dev,
286 struct wake_irq *wakeirq) 288 struct wake_irq *wakeirq)
287{ 289{
288 struct wakeup_source *ws; 290 struct wakeup_source *ws;
289 int ret = 0;
290 291
291 spin_lock_irq(&dev->power.lock);
292 ws = dev->power.wakeup; 292 ws = dev->power.wakeup;
293 if (!ws) { 293 if (!ws) {
294 dev_err(dev, "forgot to call call device_init_wakeup?\n"); 294 dev_err(dev, "forgot to call call device_init_wakeup?\n");
295 ret = -EINVAL; 295 return -EINVAL;
296 goto unlock;
297 } 296 }
298 297
299 if (ws->wakeirq) { 298 if (ws->wakeirq)
300 ret = -EEXIST; 299 return -EEXIST;
301 goto unlock;
302 }
303 300
304 ws->wakeirq = wakeirq; 301 ws->wakeirq = wakeirq;
305 302 return 0;
306unlock:
307 spin_unlock_irq(&dev->power.lock);
308
309 return ret;
310} 303}
311 304
312/** 305/**
@@ -314,20 +307,16 @@ unlock:
314 * @dev: Device to handle 307 * @dev: Device to handle
315 * 308 *
316 * Removes a device wakeirq from the wakeup source. 309 * Removes a device wakeirq from the wakeup source.
310 *
311 * Call under the device's power.lock lock.
317 */ 312 */
318void device_wakeup_detach_irq(struct device *dev) 313void device_wakeup_detach_irq(struct device *dev)
319{ 314{
320 struct wakeup_source *ws; 315 struct wakeup_source *ws;
321 316
322 spin_lock_irq(&dev->power.lock);
323 ws = dev->power.wakeup; 317 ws = dev->power.wakeup;
324 if (!ws) 318 if (ws)
325 goto unlock; 319 ws->wakeirq = NULL;
326
327 ws->wakeirq = NULL;
328
329unlock:
330 spin_unlock_irq(&dev->power.lock);
331} 320}
332 321
333/** 322/**
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 152dcb3f7b5f..61566bcefa53 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -116,8 +116,10 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
116 h32mxclk->pmc = pmc; 116 h32mxclk->pmc = pmc;
117 117
118 clk = clk_register(NULL, &h32mxclk->hw); 118 clk = clk_register(NULL, &h32mxclk->hw);
119 if (!clk) 119 if (!clk) {
120 kfree(h32mxclk);
120 return; 121 return;
122 }
121 123
122 of_clk_add_provider(np, of_clk_src_simple_get, clk); 124 of_clk_add_provider(np, of_clk_src_simple_get, clk);
123} 125}
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c2400456a044..27dfa965cfed 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -171,8 +171,10 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
171 irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); 171 irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
172 ret = request_irq(osc->irq, clk_main_osc_irq_handler, 172 ret = request_irq(osc->irq, clk_main_osc_irq_handler,
173 IRQF_TRIGGER_HIGH, name, osc); 173 IRQF_TRIGGER_HIGH, name, osc);
174 if (ret) 174 if (ret) {
175 kfree(osc);
175 return ERR_PTR(ret); 176 return ERR_PTR(ret);
177 }
176 178
177 if (bypass) 179 if (bypass)
178 pmc_write(pmc, AT91_CKGR_MOR, 180 pmc_write(pmc, AT91_CKGR_MOR,
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index f98eafe9b12d..5b3ded5205a2 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -165,12 +165,16 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
165 irq_set_status_flags(master->irq, IRQ_NOAUTOEN); 165 irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
166 ret = request_irq(master->irq, clk_master_irq_handler, 166 ret = request_irq(master->irq, clk_master_irq_handler,
167 IRQF_TRIGGER_HIGH, "clk-master", master); 167 IRQF_TRIGGER_HIGH, "clk-master", master);
168 if (ret) 168 if (ret) {
169 kfree(master);
169 return ERR_PTR(ret); 170 return ERR_PTR(ret);
171 }
170 172
171 clk = clk_register(NULL, &master->hw); 173 clk = clk_register(NULL, &master->hw);
172 if (IS_ERR(clk)) 174 if (IS_ERR(clk)) {
175 free_irq(master->irq, master);
173 kfree(master); 176 kfree(master);
177 }
174 178
175 return clk; 179 return clk;
176} 180}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index cbbe40377ad6..18b60f4895a6 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -346,12 +346,16 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
346 irq_set_status_flags(pll->irq, IRQ_NOAUTOEN); 346 irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
347 ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH, 347 ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
348 id ? "clk-pllb" : "clk-plla", pll); 348 id ? "clk-pllb" : "clk-plla", pll);
349 if (ret) 349 if (ret) {
350 kfree(pll);
350 return ERR_PTR(ret); 351 return ERR_PTR(ret);
352 }
351 353
352 clk = clk_register(NULL, &pll->hw); 354 clk = clk_register(NULL, &pll->hw);
353 if (IS_ERR(clk)) 355 if (IS_ERR(clk)) {
356 free_irq(pll->irq, pll);
354 kfree(pll); 357 kfree(pll);
358 }
355 359
356 return clk; 360 return clk;
357} 361}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index a76d03fd577b..58008b3e8bc1 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -130,13 +130,17 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
130 irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); 130 irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
131 ret = request_irq(sys->irq, clk_system_irq_handler, 131 ret = request_irq(sys->irq, clk_system_irq_handler,
132 IRQF_TRIGGER_HIGH, name, sys); 132 IRQF_TRIGGER_HIGH, name, sys);
133 if (ret) 133 if (ret) {
134 kfree(sys);
134 return ERR_PTR(ret); 135 return ERR_PTR(ret);
136 }
135 } 137 }
136 138
137 clk = clk_register(NULL, &sys->hw); 139 clk = clk_register(NULL, &sys->hw);
138 if (IS_ERR(clk)) 140 if (IS_ERR(clk)) {
141 free_irq(sys->irq, sys);
139 kfree(sys); 142 kfree(sys);
143 }
140 144
141 return clk; 145 return clk;
142} 146}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ae3263bc1476..30dd697b1668 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -118,12 +118,16 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
118 irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN); 118 irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
119 ret = request_irq(utmi->irq, clk_utmi_irq_handler, 119 ret = request_irq(utmi->irq, clk_utmi_irq_handler,
120 IRQF_TRIGGER_HIGH, "clk-utmi", utmi); 120 IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
121 if (ret) 121 if (ret) {
122 kfree(utmi);
122 return ERR_PTR(ret); 123 return ERR_PTR(ret);
124 }
123 125
124 clk = clk_register(NULL, &utmi->hw); 126 clk = clk_register(NULL, &utmi->hw);
125 if (IS_ERR(clk)) 127 if (IS_ERR(clk)) {
128 free_irq(utmi->irq, utmi);
126 kfree(utmi); 129 kfree(utmi);
130 }
127 131
128 return clk; 132 return clk;
129} 133}
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index e19c09cd9645..f630e1bbdcfe 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -222,10 +222,6 @@ void __init iproc_asiu_setup(struct device_node *node,
222 struct iproc_asiu_clk *asiu_clk; 222 struct iproc_asiu_clk *asiu_clk;
223 const char *clk_name; 223 const char *clk_name;
224 224
225 clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
226 if (WARN_ON(!clk_name))
227 goto err_clk_register;
228
229 ret = of_property_read_string_index(node, "clock-output-names", 225 ret = of_property_read_string_index(node, "clock-output-names",
230 i, &clk_name); 226 i, &clk_name);
231 if (WARN_ON(ret)) 227 if (WARN_ON(ret))
@@ -259,7 +255,7 @@ void __init iproc_asiu_setup(struct device_node *node,
259 255
260err_clk_register: 256err_clk_register:
261 for (i = 0; i < num_clks; i++) 257 for (i = 0; i < num_clks; i++)
262 kfree(asiu->clks[i].name); 258 clk_unregister(asiu->clk_data.clks[i]);
263 iounmap(asiu->gate_base); 259 iounmap(asiu->gate_base);
264 260
265err_iomap_gate: 261err_iomap_gate:
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 46fb84bc2674..2dda4e8295a9 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -366,7 +366,7 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
366 val = readl(pll->pll_base + ctrl->ndiv_int.offset); 366 val = readl(pll->pll_base + ctrl->ndiv_int.offset);
367 ndiv_int = (val >> ctrl->ndiv_int.shift) & 367 ndiv_int = (val >> ctrl->ndiv_int.shift) &
368 bit_mask(ctrl->ndiv_int.width); 368 bit_mask(ctrl->ndiv_int.width);
369 ndiv = ndiv_int << ctrl->ndiv_int.shift; 369 ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
370 370
371 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { 371 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
372 val = readl(pll->pll_base + ctrl->ndiv_frac.offset); 372 val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
@@ -374,7 +374,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
374 bit_mask(ctrl->ndiv_frac.width); 374 bit_mask(ctrl->ndiv_frac.width);
375 375
376 if (ndiv_frac != 0) 376 if (ndiv_frac != 0)
377 ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac; 377 ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
378 ndiv_frac;
378 } 379 }
379 380
380 val = readl(pll->pll_base + ctrl->pdiv.offset); 381 val = readl(pll->pll_base + ctrl->pdiv.offset);
@@ -655,10 +656,6 @@ void __init iproc_pll_clk_setup(struct device_node *node,
655 memset(&init, 0, sizeof(init)); 656 memset(&init, 0, sizeof(init));
656 parent_name = node->name; 657 parent_name = node->name;
657 658
658 clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
659 if (WARN_ON(!clk_name))
660 goto err_clk_register;
661
662 ret = of_property_read_string_index(node, "clock-output-names", 659 ret = of_property_read_string_index(node, "clock-output-names",
663 i, &clk_name); 660 i, &clk_name);
664 if (WARN_ON(ret)) 661 if (WARN_ON(ret))
@@ -690,10 +687,8 @@ void __init iproc_pll_clk_setup(struct device_node *node,
690 return; 687 return;
691 688
692err_clk_register: 689err_clk_register:
693 for (i = 0; i < num_clks; i++) { 690 for (i = 0; i < num_clks; i++)
694 kfree(pll->clks[i].name);
695 clk_unregister(pll->clk_data.clks[i]); 691 clk_unregister(pll->clk_data.clks[i]);
696 }
697 692
698err_pll_register: 693err_pll_register:
699 if (pll->asiu_base) 694 if (pll->asiu_base)
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index b9b12a742970..3f6f7ad39490 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -268,7 +268,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
268 memcpy(table, stm32f42xx_gate_map, sizeof(table)); 268 memcpy(table, stm32f42xx_gate_map, sizeof(table));
269 269
270 /* only bits set in table can be used as indices */ 270 /* only bits set in table can be used as indices */
271 if (WARN_ON(secondary > 8 * sizeof(table) || 271 if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
272 0 == (table[BIT_ULL_WORD(secondary)] & 272 0 == (table[BIT_ULL_WORD(secondary)] &
273 BIT_ULL_MASK(secondary)))) 273 BIT_ULL_MASK(secondary))))
274 return -EINVAL; 274 return -EINVAL;
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 4b9e04cdf7e8..8b6523d15fb8 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -700,6 +700,22 @@ static const struct mtk_composite peri_clks[] __initconst = {
700 MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1), 700 MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
701}; 701};
702 702
703static struct clk_onecell_data *mt8173_top_clk_data __initdata;
704static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
705
706static void __init mtk_clk_enable_critical(void)
707{
708 if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
709 return;
710
711 clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
712 clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
713 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
714 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
715 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
716 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
717}
718
703static void __init mtk_topckgen_init(struct device_node *node) 719static void __init mtk_topckgen_init(struct device_node *node)
704{ 720{
705 struct clk_onecell_data *clk_data; 721 struct clk_onecell_data *clk_data;
@@ -712,19 +728,19 @@ static void __init mtk_topckgen_init(struct device_node *node)
712 return; 728 return;
713 } 729 }
714 730
715 clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); 731 mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
716 732
717 mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data); 733 mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
718 mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); 734 mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
719 mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, 735 mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
720 &mt8173_clk_lock, clk_data); 736 &mt8173_clk_lock, clk_data);
721 737
722 clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
723
724 r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); 738 r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
725 if (r) 739 if (r)
726 pr_err("%s(): could not register clock provider: %d\n", 740 pr_err("%s(): could not register clock provider: %d\n",
727 __func__, r); 741 __func__, r);
742
743 mtk_clk_enable_critical();
728} 744}
729CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init); 745CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
730 746
@@ -818,13 +834,13 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
818{ 834{
819 struct clk_onecell_data *clk_data; 835 struct clk_onecell_data *clk_data;
820 836
821 clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); 837 mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
822 if (!clk_data) 838 if (!clk_data)
823 return; 839 return;
824 840
825 mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); 841 mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
826 842
827 clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]); 843 mtk_clk_enable_critical();
828} 844}
829CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys", 845CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
830 mtk_apmixedsys_init); 846 mtk_apmixedsys_init);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b95d17fbb8d7..92936f0912d2 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
530 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 530 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
531 struct freq_tbl f = *rcg->freq_tbl; 531 struct freq_tbl f = *rcg->freq_tbl;
532 const struct frac_entry *frac = frac_table_pixel; 532 const struct frac_entry *frac = frac_table_pixel;
533 unsigned long request, src_rate; 533 unsigned long request;
534 int delta = 100000; 534 int delta = 100000;
535 u32 mask = BIT(rcg->hid_width) - 1; 535 u32 mask = BIT(rcg->hid_width) - 1;
536 u32 hid_div; 536 u32 hid_div;
537 int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
538 struct clk *parent = clk_get_parent_by_index(hw->clk, index);
539 537
540 for (; frac->num; frac++) { 538 for (; frac->num; frac++) {
541 request = (rate * frac->den) / frac->num; 539 request = (rate * frac->den) / frac->num;
542 540
543 src_rate = __clk_round_rate(parent, request); 541 if ((parent_rate < (request - delta)) ||
544 if ((src_rate < (request - delta)) || 542 (parent_rate > (request + delta)))
545 (src_rate > (request + delta)))
546 continue; 543 continue;
547 544
548 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 545 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 657ca14ba709..8dd8cce27361 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -190,7 +190,7 @@ static struct clk *clk_register_flexgen(const char *name,
190 190
191 init.name = name; 191 init.name = name;
192 init.ops = &flexgen_ops; 192 init.ops = &flexgen_ops;
193 init.flags = CLK_IS_BASIC | flexgen_flags; 193 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
194 init.parent_names = parent_names; 194 init.parent_names = parent_names;
195 init.num_parents = num_parents; 195 init.num_parents = num_parents;
196 196
@@ -303,6 +303,8 @@ static void __init st_of_flexgen_setup(struct device_node *np)
303 if (!rlock) 303 if (!rlock)
304 goto err; 304 goto err;
305 305
306 spin_lock_init(rlock);
307
306 for (i = 0; i < clk_data->clk_num; i++) { 308 for (i = 0; i < clk_data->clk_num; i++) {
307 struct clk *clk; 309 struct clk *clk;
308 const char *clk_name; 310 const char *clk_name;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index e94197f04b0b..d9eb2e1d8471 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
340 CLKGEN_FIELD(0x30c, 0xf, 20), 340 CLKGEN_FIELD(0x30c, 0xf, 20),
341 CLKGEN_FIELD(0x310, 0xf, 20) }, 341 CLKGEN_FIELD(0x310, 0xf, 20) },
342 .lockstatus_present = true, 342 .lockstatus_present = true,
343 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), 343 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
344 .powerup_polarity = 1, 344 .powerup_polarity = 1,
345 .standby_polarity = 1, 345 .standby_polarity = 1,
346 .pll_ops = &st_quadfs_pll_c32_ops, 346 .pll_ops = &st_quadfs_pll_c32_ops,
@@ -489,7 +489,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw)
489 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 489 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
490 u32 npda = CLKGEN_READ(pll, npda); 490 u32 npda = CLKGEN_READ(pll, npda);
491 491
492 return !!npda; 492 return pll->data->powerup_polarity ? !npda : !!npda;
493} 493}
494 494
495static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, 495static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
@@ -635,7 +635,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
635 635
636 init.name = name; 636 init.name = name;
637 init.ops = quadfs->pll_ops; 637 init.ops = quadfs->pll_ops;
638 init.flags = CLK_IS_BASIC; 638 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
639 init.parent_names = &parent_name; 639 init.parent_names = &parent_name;
640 init.num_parents = 1; 640 init.num_parents = 1;
641 641
@@ -774,7 +774,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw)
774 if (fs->lock) 774 if (fs->lock)
775 spin_lock_irqsave(fs->lock, flags); 775 spin_lock_irqsave(fs->lock, flags);
776 776
777 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); 777 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
778 778
779 if (fs->lock) 779 if (fs->lock)
780 spin_unlock_irqrestore(fs->lock, flags); 780 spin_unlock_irqrestore(fs->lock, flags);
@@ -1082,10 +1082,6 @@ static const struct of_device_id quadfs_of_match[] = {
1082 .compatible = "st,stih407-quadfs660-D", 1082 .compatible = "st,stih407-quadfs660-D",
1083 .data = &st_fs660c32_D_407 1083 .data = &st_fs660c32_D_407
1084 }, 1084 },
1085 {
1086 .compatible = "st,stih407-quadfs660-D",
1087 .data = (void *)&st_fs660c32_D_407
1088 },
1089 {} 1085 {}
1090}; 1086};
1091 1087
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 4fbe6e099587..717c4a91a17b 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -237,7 +237,7 @@ static struct clk *clk_register_genamux(const char *name,
237 237
238 init.name = name; 238 init.name = name;
239 init.ops = &clkgena_divmux_ops; 239 init.ops = &clkgena_divmux_ops;
240 init.flags = CLK_IS_BASIC; 240 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
241 init.parent_names = parent_names; 241 init.parent_names = parent_names;
242 init.num_parents = num_parents; 242 init.num_parents = num_parents;
243 243
@@ -513,7 +513,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
513 0, &clk_name)) 513 0, &clk_name))
514 return; 514 return;
515 515
516 clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, 516 clk = clk_register_divider_table(NULL, clk_name, parent_name,
517 CLK_GET_RATE_NOCACHE,
517 reg + data->offset, data->shift, 1, 518 reg + data->offset, data->shift, 1,
518 0, data->table, NULL); 519 0, data->table, NULL);
519 if (IS_ERR(clk)) 520 if (IS_ERR(clk))
@@ -582,7 +583,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
582}; 583};
583static struct clkgen_mux_data stih407_a9_mux_data = { 584static struct clkgen_mux_data stih407_a9_mux_data = {
584 .offset = 0x1a4, 585 .offset = 0x1a4,
585 .shift = 1, 586 .shift = 0,
586 .width = 2, 587 .width = 2,
587}; 588};
588 589
@@ -786,7 +787,8 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
786 &mux->hw, &clk_mux_ops, 787 &mux->hw, &clk_mux_ops,
787 &div->hw, &clk_divider_ops, 788 &div->hw, &clk_divider_ops,
788 &gate->hw, &clk_gate_ops, 789 &gate->hw, &clk_gate_ops,
789 data->clk_flags); 790 data->clk_flags |
791 CLK_GET_RATE_NOCACHE);
790 if (IS_ERR(clk)) { 792 if (IS_ERR(clk)) {
791 kfree(gate); 793 kfree(gate);
792 kfree(div); 794 kfree(div);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 106532207213..72d1c27eaffa 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -406,7 +406,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
406 init.name = clk_name; 406 init.name = clk_name;
407 init.ops = pll_data->ops; 407 init.ops = pll_data->ops;
408 408
409 init.flags = CLK_IS_BASIC; 409 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
410 init.parent_names = &parent_name; 410 init.parent_names = &parent_name;
411 init.num_parents = 1; 411 init.num_parents = 1;
412 412
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9a82f17d2d73..abf7b37faf73 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -1391,6 +1391,7 @@ static void __init sun6i_init_clocks(struct device_node *node)
1391CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks); 1391CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
1392CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks); 1392CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
1393CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks); 1393CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
1394CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
1394 1395
1395static void __init sun9i_init_clocks(struct device_node *node) 1396static void __init sun9i_init_clocks(struct device_node *node)
1396{ 1397{
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 879c78423546..2d59038dec43 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -529,6 +529,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np)
529 529
530CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 530CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
531CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); 531CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
532CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
532CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); 533CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
533CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); 534CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
534CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); 535CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index fc897babab55..e362860c2b50 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * The 2E revision of loongson processor not support this feature. 4 * The 2E revision of loongson processor not support this feature.
5 * 5 *
6 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology 6 * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
7 * Author: Yanhua, yanh@lemote.com 7 * Author: Yanhua, yanh@lemote.com
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 975edb1000a2..ae43b58c9733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
352 if (((int64_t)timeout_ns) < 0) 352 if (((int64_t)timeout_ns) < 0)
353 return MAX_SCHEDULE_TIMEOUT; 353 return MAX_SCHEDULE_TIMEOUT;
354 354
355 timeout = ktime_sub_ns(ktime_get(), timeout_ns); 355 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
356 if (ktime_to_ns(timeout) < 0) 356 if (ktime_to_ns(timeout) < 0)
357 return 0; 357 return 0;
358 358
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..6e77964f1b64 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3403,19 +3403,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3403 3403
3404 switch (entry->src_data) { 3404 switch (entry->src_data) {
3405 case 0: /* vblank */ 3405 case 0: /* vblank */
3406 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3406 if (disp_int & interrupt_status_offsets[crtc].vblank)
3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc); 3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3408 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3408 else
3409 drm_handle_vblank(adev->ddev, crtc); 3409 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3410 } 3410
3411 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3411 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3412 drm_handle_vblank(adev->ddev, crtc);
3412 } 3413 }
3414 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3415
3413 break; 3416 break;
3414 case 1: /* vline */ 3417 case 1: /* vline */
3415 if (disp_int & interrupt_status_offsets[crtc].vline) { 3418 if (disp_int & interrupt_status_offsets[crtc].vline)
3416 dce_v10_0_crtc_vline_int_ack(adev, crtc); 3419 dce_v10_0_crtc_vline_int_ack(adev, crtc);
3417 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3420 else
3418 } 3421 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3422
3423 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3424
3419 break; 3425 break;
3420 default: 3426 default:
3421 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3427 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..7f7abb0e0be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3402,19 +3402,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3402 3402
3403 switch (entry->src_data) { 3403 switch (entry->src_data) {
3404 case 0: /* vblank */ 3404 case 0: /* vblank */
3405 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3405 if (disp_int & interrupt_status_offsets[crtc].vblank)
3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc); 3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3407 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3407 else
3408 drm_handle_vblank(adev->ddev, crtc); 3408 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3409 } 3409
3410 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3410 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3411 drm_handle_vblank(adev->ddev, crtc);
3411 } 3412 }
3413 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3414
3412 break; 3415 break;
3413 case 1: /* vline */ 3416 case 1: /* vline */
3414 if (disp_int & interrupt_status_offsets[crtc].vline) { 3417 if (disp_int & interrupt_status_offsets[crtc].vline)
3415 dce_v11_0_crtc_vline_int_ack(adev, crtc); 3418 dce_v11_0_crtc_vline_int_ack(adev, crtc);
3416 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3419 else
3417 } 3420 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3421
3422 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3423
3418 break; 3424 break;
3419 default: 3425 default:
3420 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3426 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index aaca8d663f2c..08387dfd98a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3237,19 +3237,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3237 3237
3238 switch (entry->src_data) { 3238 switch (entry->src_data) {
3239 case 0: /* vblank */ 3239 case 0: /* vblank */
3240 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3240 if (disp_int & interrupt_status_offsets[crtc].vblank)
3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3242 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3242 else
3243 drm_handle_vblank(adev->ddev, crtc); 3243 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3244 } 3244
3245 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3245 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3246 drm_handle_vblank(adev->ddev, crtc);
3246 } 3247 }
3248 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3249
3247 break; 3250 break;
3248 case 1: /* vline */ 3251 case 1: /* vline */
3249 if (disp_int & interrupt_status_offsets[crtc].vline) { 3252 if (disp_int & interrupt_status_offsets[crtc].vline)
3250 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3253 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3251 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3254 else
3252 } 3255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3256
3257 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3258
3253 break; 3259 break;
3254 default: 3260 default:
3255 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3261 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
420 pqm_uninit(&p->pqm); 420 pqm_uninit(&p->pqm);
421 421
422 pdd = kfd_get_process_device_data(dev, p); 422 pdd = kfd_get_process_device_data(dev, p);
423
424 if (!pdd) {
425 mutex_unlock(&p->mutex);
426 return;
427 }
428
423 if (pdd->reset_wavefronts) { 429 if (pdd->reset_wavefronts) {
424 dbgdev_wave_reset_wavefronts(pdd->dev, p); 430 dbgdev_wave_reset_wavefronts(pdd->dev, p);
425 pdd->reset_wavefronts = false; 431 pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
431 * We don't call amd_iommu_unbind_pasid() here 437 * We don't call amd_iommu_unbind_pasid() here
432 * because the IOMMU called us. 438 * because the IOMMU called us.
433 */ 439 */
434 if (pdd) 440 pdd->bound = false;
435 pdd->bound = false;
436 441
437 mutex_unlock(&p->mutex); 442 mutex_unlock(&p->mutex);
438} 443}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..d65cbe6afb92 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,9 +157,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
157 struct drm_i915_gem_object *obj; 157 struct drm_i915_gem_object *obj;
158 int ret; 158 int ret;
159 159
160 obj = i915_gem_object_create_stolen(dev, size); 160 obj = i915_gem_alloc_object(dev, size);
161 if (obj == NULL)
162 obj = i915_gem_alloc_object(dev, size);
163 if (obj == NULL) 161 if (obj == NULL)
164 return ERR_PTR(-ENOMEM); 162 return ERR_PTR(-ENOMEM);
165 163
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9daa2883ac18..dcc6a88c560e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2546,6 +2546,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2546 struct drm_i915_private *dev_priv = dev->dev_private; 2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 struct drm_i915_gem_object *obj; 2547 struct drm_i915_gem_object *obj;
2548 struct i915_address_space *vm; 2548 struct i915_address_space *vm;
2549 struct i915_vma *vma;
2550 bool flush;
2549 2551
2550 i915_check_and_clear_faults(dev); 2552 i915_check_and_clear_faults(dev);
2551 2553
@@ -2555,16 +2557,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2555 dev_priv->gtt.base.total, 2557 dev_priv->gtt.base.total,
2556 true); 2558 true);
2557 2559
2560 /* Cache flush objects bound into GGTT and rebind them. */
2561 vm = &dev_priv->gtt.base;
2558 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2562 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2559 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 2563 flush = false;
2560 &dev_priv->gtt.base); 2564 list_for_each_entry(vma, &obj->vma_list, vma_link) {
2561 if (!vma) 2565 if (vma->vm != vm)
2562 continue; 2566 continue;
2563 2567
2564 i915_gem_clflush_object(obj, obj->pin_display); 2568 WARN_ON(i915_vma_bind(vma, obj->cache_level,
2565 WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); 2569 PIN_UPDATE));
2566 }
2567 2570
2571 flush = true;
2572 }
2573
2574 if (flush)
2575 i915_gem_clflush_object(obj, obj->pin_display);
2576 }
2568 2577
2569 if (INTEL_INFO(dev)->gen >= 8) { 2578 if (INTEL_INFO(dev)->gen >= 8) {
2570 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 2579 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d61e74a08f82 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
183 if (IS_GEN4(dev)) { 183 if (IS_GEN4(dev)) {
184 uint32_t ddc2 = I915_READ(DCC2); 184 uint32_t ddc2 = I915_READ(DCC2);
185 185
186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
187 /* Since the swizzling may vary within an
188 * object, we have no idea what the swizzling
189 * is for any page in particular. Thus we
190 * cannot migrate tiled pages using the GPU,
191 * nor can we tell userspace what the exact
192 * swizzling is for any object.
193 */
187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; 194 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
195 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
196 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
197 }
188 } 198 }
189 199
190 if (dcc == 0xffffffff) { 200 if (dcc == 0xffffffff) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b61f9810387..647b1404c441 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4854,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4854 struct intel_plane *intel_plane; 4854 struct intel_plane *intel_plane;
4855 int pipe = intel_crtc->pipe; 4855 int pipe = intel_crtc->pipe;
4856 4856
4857 if (!intel_crtc->active)
4858 return;
4859
4857 intel_crtc_wait_for_pending_flips(crtc); 4860 intel_crtc_wait_for_pending_flips(crtc);
4858 4861
4859 intel_pre_disable_primary(crtc); 4862 intel_pre_disable_primary(crtc);
@@ -7887,7 +7890,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7887 int pipe = pipe_config->cpu_transcoder; 7890 int pipe = pipe_config->cpu_transcoder;
7888 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7891 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7889 intel_clock_t clock; 7892 intel_clock_t clock;
7890 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 7893 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7891 int refclk = 100000; 7894 int refclk = 100000;
7892 7895
7893 mutex_lock(&dev_priv->sb_lock); 7896 mutex_lock(&dev_priv->sb_lock);
@@ -7895,10 +7898,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7895 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7898 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7896 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7899 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7897 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7900 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7901 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7898 mutex_unlock(&dev_priv->sb_lock); 7902 mutex_unlock(&dev_priv->sb_lock);
7899 7903
7900 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7904 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7901 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 7905 clock.m2 = (pll_dw0 & 0xff) << 22;
7906 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7907 clock.m2 |= pll_dw2 & 0x3fffff;
7902 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7908 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7903 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7909 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7904 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7910 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8c3d96..7841970de48d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
285 285
286 if (wait) { 286 if (wait) {
287 if (!wait_for_completion_timeout(&engine->compl, 287 if (!wait_for_completion_timeout(&engine->compl,
288 msecs_to_jiffies(1))) { 288 msecs_to_jiffies(100))) {
289 dev_err(dmm->dev, "timed out waiting for done\n"); 289 dev_err(dmm->dev, "timed out waiting for done\n");
290 ret = -ETIMEDOUT; 290 ret = -ETIMEDOUT;
291 } 291 }
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41f216f..12081e61d45a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
179int omap_framebuffer_pin(struct drm_framebuffer *fb); 179int omap_framebuffer_pin(struct drm_framebuffer *fb);
180int omap_framebuffer_unpin(struct drm_framebuffer *fb); 180void omap_framebuffer_unpin(struct drm_framebuffer *fb);
181void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 181void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
182 struct omap_drm_window *win, struct omap_overlay_info *info); 182 struct omap_drm_window *win, struct omap_overlay_info *info);
183struct drm_connector *omap_framebuffer_get_next_connector( 183struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
211 enum dma_data_direction dir); 211 enum dma_data_direction dir);
212int omap_gem_get_paddr(struct drm_gem_object *obj, 212int omap_gem_get_paddr(struct drm_gem_object *obj,
213 dma_addr_t *paddr, bool remap); 213 dma_addr_t *paddr, bool remap);
214int omap_gem_put_paddr(struct drm_gem_object *obj); 214void omap_gem_put_paddr(struct drm_gem_object *obj);
215int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 215int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
216 bool remap); 216 bool remap);
217int omap_gem_put_pages(struct drm_gem_object *obj); 217int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
236 /* PVR needs alignment to 8 pixels.. right now that is the most 236 /* PVR needs alignment to 8 pixels.. right now that is the most
237 * restrictive stride requirement.. 237 * restrictive stride requirement..
238 */ 238 */
239 return ALIGN(pitch, 8 * bytespp); 239 return roundup(pitch, 8 * bytespp);
240} 240}
241 241
242/* map crtc to vblank mask */ 242/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e76df1a..51b1219af87f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@ fail:
287} 287}
288 288
289/* unpin, no longer being scanned out: */ 289/* unpin, no longer being scanned out: */
290int omap_framebuffer_unpin(struct drm_framebuffer *fb) 290void omap_framebuffer_unpin(struct drm_framebuffer *fb)
291{ 291{
292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
293 int ret, i, n = drm_format_num_planes(fb->pixel_format); 293 int i, n = drm_format_num_planes(fb->pixel_format);
294 294
295 mutex_lock(&omap_fb->lock); 295 mutex_lock(&omap_fb->lock);
296 296
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
298 298
299 if (omap_fb->pin_count > 0) { 299 if (omap_fb->pin_count > 0) {
300 mutex_unlock(&omap_fb->lock); 300 mutex_unlock(&omap_fb->lock);
301 return 0; 301 return;
302 } 302 }
303 303
304 for (i = 0; i < n; i++) { 304 for (i = 0; i < n; i++) {
305 struct plane *plane = &omap_fb->planes[i]; 305 struct plane *plane = &omap_fb->planes[i];
306 ret = omap_gem_put_paddr(plane->bo); 306 omap_gem_put_paddr(plane->bo);
307 if (ret)
308 goto fail;
309 plane->paddr = 0; 307 plane->paddr = 0;
310 } 308 }
311 309
312 mutex_unlock(&omap_fb->lock); 310 mutex_unlock(&omap_fb->lock);
313
314 return 0;
315
316fail:
317 mutex_unlock(&omap_fb->lock);
318 return ret;
319} 311}
320 312
321struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) 313struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84389e3..720d16bce7e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
136 if (fbdev->ywrap_enabled) { 136 if (fbdev->ywrap_enabled) {
137 /* need to align pitch to page size if using DMM scrolling */ 137 /* need to align pitch to page size if using DMM scrolling */
138 mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE); 138 mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
139 } 139 }
140 140
141 /* allocate backing bo */ 141 /* allocate backing bo */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab77801cf5f..7ed08fdc4c42 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@ fail:
808/* Release physical address, when DMA is no longer being performed.. this 808/* Release physical address, when DMA is no longer being performed.. this
809 * could potentially unpin and unmap buffers from TILER 809 * could potentially unpin and unmap buffers from TILER
810 */ 810 */
811int omap_gem_put_paddr(struct drm_gem_object *obj) 811void omap_gem_put_paddr(struct drm_gem_object *obj)
812{ 812{
813 struct omap_gem_object *omap_obj = to_omap_bo(obj); 813 struct omap_gem_object *omap_obj = to_omap_bo(obj);
814 int ret = 0; 814 int ret;
815 815
816 mutex_lock(&obj->dev->struct_mutex); 816 mutex_lock(&obj->dev->struct_mutex);
817 if (omap_obj->paddr_cnt > 0) { 817 if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
821 if (ret) { 821 if (ret) {
822 dev_err(obj->dev->dev, 822 dev_err(obj->dev->dev,
823 "could not unpin pages: %d\n", ret); 823 "could not unpin pages: %d\n", ret);
824 goto fail;
825 } 824 }
826 ret = tiler_release(omap_obj->block); 825 ret = tiler_release(omap_obj->block);
827 if (ret) { 826 if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
832 omap_obj->block = NULL; 831 omap_obj->block = NULL;
833 } 832 }
834 } 833 }
835fail: 834
836 mutex_unlock(&obj->dev->struct_mutex); 835 mutex_unlock(&obj->dev->struct_mutex);
837 return ret;
838} 836}
839 837
840/* Get rotated scanout address (only valid if already pinned), at the 838/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1378 1376
1379 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1377 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1380 if (!omap_obj) 1378 if (!omap_obj)
1381 goto fail; 1379 return NULL;
1382
1383 spin_lock(&priv->list_lock);
1384 list_add(&omap_obj->mm_list, &priv->obj_list);
1385 spin_unlock(&priv->list_lock);
1386 1380
1387 obj = &omap_obj->base; 1381 obj = &omap_obj->base;
1388 1382
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1392 */ 1386 */
1393 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, 1387 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1394 &omap_obj->paddr, GFP_KERNEL); 1388 &omap_obj->paddr, GFP_KERNEL);
1395 if (omap_obj->vaddr) 1389 if (!omap_obj->vaddr) {
1396 flags |= OMAP_BO_DMA; 1390 kfree(omap_obj);
1391
1392 return NULL;
1393 }
1397 1394
1395 flags |= OMAP_BO_DMA;
1398 } 1396 }
1399 1397
1398 spin_lock(&priv->list_lock);
1399 list_add(&omap_obj->mm_list, &priv->obj_list);
1400 spin_unlock(&priv->list_lock);
1401
1400 omap_obj->flags = flags; 1402 omap_obj->flags = flags;
1401 1403
1402 if (flags & OMAP_BO_TILED) { 1404 if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276c4deb..098904696a5c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 21#include <drm/drm_atomic_helper.h>
21#include <drm/drm_plane_helper.h> 22#include <drm/drm_plane_helper.h>
22 23
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
153 dispc_ovl_enable(omap_plane->id, false); 154 dispc_ovl_enable(omap_plane->id, false);
154} 155}
155 156
157static int omap_plane_atomic_check(struct drm_plane *plane,
158 struct drm_plane_state *state)
159{
160 struct drm_crtc_state *crtc_state;
161
162 if (!state->crtc)
163 return 0;
164
165 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
166 if (IS_ERR(crtc_state))
167 return PTR_ERR(crtc_state);
168
169 if (state->crtc_x < 0 || state->crtc_y < 0)
170 return -EINVAL;
171
172 if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
173 return -EINVAL;
174
175 if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
176 return -EINVAL;
177
178 return 0;
179}
180
156static const struct drm_plane_helper_funcs omap_plane_helper_funcs = { 181static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
157 .prepare_fb = omap_plane_prepare_fb, 182 .prepare_fb = omap_plane_prepare_fb,
158 .cleanup_fb = omap_plane_cleanup_fb, 183 .cleanup_fb = omap_plane_cleanup_fb,
184 .atomic_check = omap_plane_atomic_check,
159 .atomic_update = omap_plane_atomic_update, 185 .atomic_update = omap_plane_atomic_update,
160 .atomic_disable = omap_plane_atomic_disable, 186 .atomic_disable = omap_plane_atomic_disable,
161}; 187};
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4ecf5caa8c6d..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7964,23 +7964,27 @@ restart_ih:
7964 case 1: /* D1 vblank/vline */ 7964 case 1: /* D1 vblank/vline */
7965 switch (src_data) { 7965 switch (src_data) {
7966 case 0: /* D1 vblank */ 7966 case 0: /* D1 vblank */
7967 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) { 7967 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
7968 if (rdev->irq.crtc_vblank_int[0]) { 7968 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7969 drm_handle_vblank(rdev->ddev, 0); 7969
7970 rdev->pm.vblank_sync = true; 7970 if (rdev->irq.crtc_vblank_int[0]) {
7971 wake_up(&rdev->irq.vblank_queue); 7971 drm_handle_vblank(rdev->ddev, 0);
7972 } 7972 rdev->pm.vblank_sync = true;
7973 if (atomic_read(&rdev->irq.pflip[0])) 7973 wake_up(&rdev->irq.vblank_queue);
7974 radeon_crtc_handle_vblank(rdev, 0);
7975 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7976 DRM_DEBUG("IH: D1 vblank\n");
7977 } 7974 }
7975 if (atomic_read(&rdev->irq.pflip[0]))
7976 radeon_crtc_handle_vblank(rdev, 0);
7977 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7978 DRM_DEBUG("IH: D1 vblank\n");
7979
7978 break; 7980 break;
7979 case 1: /* D1 vline */ 7981 case 1: /* D1 vline */
7980 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) { 7982 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
7981 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; 7983 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7982 DRM_DEBUG("IH: D1 vline\n"); 7984
7983 } 7985 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7986 DRM_DEBUG("IH: D1 vline\n");
7987
7984 break; 7988 break;
7985 default: 7989 default:
7986 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 7990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@ restart_ih:
7990 case 2: /* D2 vblank/vline */ 7994 case 2: /* D2 vblank/vline */
7991 switch (src_data) { 7995 switch (src_data) {
7992 case 0: /* D2 vblank */ 7996 case 0: /* D2 vblank */
7993 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 7997 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
7994 if (rdev->irq.crtc_vblank_int[1]) { 7998 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7995 drm_handle_vblank(rdev->ddev, 1); 7999
7996 rdev->pm.vblank_sync = true; 8000 if (rdev->irq.crtc_vblank_int[1]) {
7997 wake_up(&rdev->irq.vblank_queue); 8001 drm_handle_vblank(rdev->ddev, 1);
7998 } 8002 rdev->pm.vblank_sync = true;
7999 if (atomic_read(&rdev->irq.pflip[1])) 8003 wake_up(&rdev->irq.vblank_queue);
8000 radeon_crtc_handle_vblank(rdev, 1);
8001 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8002 DRM_DEBUG("IH: D2 vblank\n");
8003 } 8004 }
8005 if (atomic_read(&rdev->irq.pflip[1]))
8006 radeon_crtc_handle_vblank(rdev, 1);
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8008 DRM_DEBUG("IH: D2 vblank\n");
8009
8004 break; 8010 break;
8005 case 1: /* D2 vline */ 8011 case 1: /* D2 vline */
8006 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 8012 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 8013 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8008 DRM_DEBUG("IH: D2 vline\n"); 8014
8009 } 8015 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
8016 DRM_DEBUG("IH: D2 vline\n");
8017
8010 break; 8018 break;
8011 default: 8019 default:
8012 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8020 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@ restart_ih:
8016 case 3: /* D3 vblank/vline */ 8024 case 3: /* D3 vblank/vline */
8017 switch (src_data) { 8025 switch (src_data) {
8018 case 0: /* D3 vblank */ 8026 case 0: /* D3 vblank */
8019 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 8027 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
8020 if (rdev->irq.crtc_vblank_int[2]) { 8028 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8021 drm_handle_vblank(rdev->ddev, 2); 8029
8022 rdev->pm.vblank_sync = true; 8030 if (rdev->irq.crtc_vblank_int[2]) {
8023 wake_up(&rdev->irq.vblank_queue); 8031 drm_handle_vblank(rdev->ddev, 2);
8024 } 8032 rdev->pm.vblank_sync = true;
8025 if (atomic_read(&rdev->irq.pflip[2])) 8033 wake_up(&rdev->irq.vblank_queue);
8026 radeon_crtc_handle_vblank(rdev, 2);
8027 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8028 DRM_DEBUG("IH: D3 vblank\n");
8029 } 8034 }
8035 if (atomic_read(&rdev->irq.pflip[2]))
8036 radeon_crtc_handle_vblank(rdev, 2);
8037 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8038 DRM_DEBUG("IH: D3 vblank\n");
8039
8030 break; 8040 break;
8031 case 1: /* D3 vline */ 8041 case 1: /* D3 vline */
8032 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 8042 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
8033 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 8043 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8034 DRM_DEBUG("IH: D3 vline\n"); 8044
8035 } 8045 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
8046 DRM_DEBUG("IH: D3 vline\n");
8047
8036 break; 8048 break;
8037 default: 8049 default:
8038 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8050 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@ restart_ih:
8042 case 4: /* D4 vblank/vline */ 8054 case 4: /* D4 vblank/vline */
8043 switch (src_data) { 8055 switch (src_data) {
8044 case 0: /* D4 vblank */ 8056 case 0: /* D4 vblank */
8045 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 8057 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
8046 if (rdev->irq.crtc_vblank_int[3]) { 8058 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8047 drm_handle_vblank(rdev->ddev, 3); 8059
8048 rdev->pm.vblank_sync = true; 8060 if (rdev->irq.crtc_vblank_int[3]) {
8049 wake_up(&rdev->irq.vblank_queue); 8061 drm_handle_vblank(rdev->ddev, 3);
8050 } 8062 rdev->pm.vblank_sync = true;
8051 if (atomic_read(&rdev->irq.pflip[3])) 8063 wake_up(&rdev->irq.vblank_queue);
8052 radeon_crtc_handle_vblank(rdev, 3);
8053 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8054 DRM_DEBUG("IH: D4 vblank\n");
8055 } 8064 }
8065 if (atomic_read(&rdev->irq.pflip[3]))
8066 radeon_crtc_handle_vblank(rdev, 3);
8067 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8068 DRM_DEBUG("IH: D4 vblank\n");
8069
8056 break; 8070 break;
8057 case 1: /* D4 vline */ 8071 case 1: /* D4 vline */
8058 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 8072 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
8059 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 8073 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8060 DRM_DEBUG("IH: D4 vline\n"); 8074
8061 } 8075 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
8076 DRM_DEBUG("IH: D4 vline\n");
8077
8062 break; 8078 break;
8063 default: 8079 default:
8064 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8080 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@ restart_ih:
8068 case 5: /* D5 vblank/vline */ 8084 case 5: /* D5 vblank/vline */
8069 switch (src_data) { 8085 switch (src_data) {
8070 case 0: /* D5 vblank */ 8086 case 0: /* D5 vblank */
8071 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 8087 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
8072 if (rdev->irq.crtc_vblank_int[4]) { 8088 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8073 drm_handle_vblank(rdev->ddev, 4); 8089
8074 rdev->pm.vblank_sync = true; 8090 if (rdev->irq.crtc_vblank_int[4]) {
8075 wake_up(&rdev->irq.vblank_queue); 8091 drm_handle_vblank(rdev->ddev, 4);
8076 } 8092 rdev->pm.vblank_sync = true;
8077 if (atomic_read(&rdev->irq.pflip[4])) 8093 wake_up(&rdev->irq.vblank_queue);
8078 radeon_crtc_handle_vblank(rdev, 4);
8079 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8080 DRM_DEBUG("IH: D5 vblank\n");
8081 } 8094 }
8095 if (atomic_read(&rdev->irq.pflip[4]))
8096 radeon_crtc_handle_vblank(rdev, 4);
8097 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8098 DRM_DEBUG("IH: D5 vblank\n");
8099
8082 break; 8100 break;
8083 case 1: /* D5 vline */ 8101 case 1: /* D5 vline */
8084 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 8102 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
8085 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 8103 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8086 DRM_DEBUG("IH: D5 vline\n"); 8104
8087 } 8105 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
8106 DRM_DEBUG("IH: D5 vline\n");
8107
8088 break; 8108 break;
8089 default: 8109 default:
8090 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8110 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@ restart_ih:
8094 case 6: /* D6 vblank/vline */ 8114 case 6: /* D6 vblank/vline */
8095 switch (src_data) { 8115 switch (src_data) {
8096 case 0: /* D6 vblank */ 8116 case 0: /* D6 vblank */
8097 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 8117 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
8098 if (rdev->irq.crtc_vblank_int[5]) { 8118 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8099 drm_handle_vblank(rdev->ddev, 5); 8119
8100 rdev->pm.vblank_sync = true; 8120 if (rdev->irq.crtc_vblank_int[5]) {
8101 wake_up(&rdev->irq.vblank_queue); 8121 drm_handle_vblank(rdev->ddev, 5);
8102 } 8122 rdev->pm.vblank_sync = true;
8103 if (atomic_read(&rdev->irq.pflip[5])) 8123 wake_up(&rdev->irq.vblank_queue);
8104 radeon_crtc_handle_vblank(rdev, 5);
8105 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8106 DRM_DEBUG("IH: D6 vblank\n");
8107 } 8124 }
8125 if (atomic_read(&rdev->irq.pflip[5]))
8126 radeon_crtc_handle_vblank(rdev, 5);
8127 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8128 DRM_DEBUG("IH: D6 vblank\n");
8129
8108 break; 8130 break;
8109 case 1: /* D6 vline */ 8131 case 1: /* D6 vline */
8110 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 8132 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
8111 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 8133 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8112 DRM_DEBUG("IH: D6 vline\n"); 8134
8113 } 8135 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
8136 DRM_DEBUG("IH: D6 vline\n");
8137
8114 break; 8138 break;
8115 default: 8139 default:
8116 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8140 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@ restart_ih:
8130 case 42: /* HPD hotplug */ 8154 case 42: /* HPD hotplug */
8131 switch (src_data) { 8155 switch (src_data) {
8132 case 0: 8156 case 0:
8133 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { 8157 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
8134 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; 8158 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8135 queue_hotplug = true; 8159
8136 DRM_DEBUG("IH: HPD1\n"); 8160 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
8137 } 8161 queue_hotplug = true;
8162 DRM_DEBUG("IH: HPD1\n");
8163
8138 break; 8164 break;
8139 case 1: 8165 case 1:
8140 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) { 8166 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
8141 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; 8167 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8142 queue_hotplug = true; 8168
8143 DRM_DEBUG("IH: HPD2\n"); 8169 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
8144 } 8170 queue_hotplug = true;
8171 DRM_DEBUG("IH: HPD2\n");
8172
8145 break; 8173 break;
8146 case 2: 8174 case 2:
8147 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) { 8175 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
8148 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 8176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8149 queue_hotplug = true; 8177
8150 DRM_DEBUG("IH: HPD3\n"); 8178 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
8151 } 8179 queue_hotplug = true;
8180 DRM_DEBUG("IH: HPD3\n");
8181
8152 break; 8182 break;
8153 case 3: 8183 case 3:
8154 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) { 8184 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
8155 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 8185 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8156 queue_hotplug = true; 8186
8157 DRM_DEBUG("IH: HPD4\n"); 8187 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
8158 } 8188 queue_hotplug = true;
8189 DRM_DEBUG("IH: HPD4\n");
8190
8159 break; 8191 break;
8160 case 4: 8192 case 4:
8161 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) { 8193 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
8162 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 8194 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8163 queue_hotplug = true; 8195
8164 DRM_DEBUG("IH: HPD5\n"); 8196 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
8165 } 8197 queue_hotplug = true;
8198 DRM_DEBUG("IH: HPD5\n");
8199
8166 break; 8200 break;
8167 case 5: 8201 case 5:
8168 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 8202 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
8169 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 8203 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8170 queue_hotplug = true; 8204
8171 DRM_DEBUG("IH: HPD6\n"); 8205 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
8172 } 8206 queue_hotplug = true;
8207 DRM_DEBUG("IH: HPD6\n");
8208
8173 break; 8209 break;
8174 case 6: 8210 case 6:
8175 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { 8211 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
8176 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; 8212 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8177 queue_dp = true; 8213
8178 DRM_DEBUG("IH: HPD_RX 1\n"); 8214 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
8179 } 8215 queue_dp = true;
8216 DRM_DEBUG("IH: HPD_RX 1\n");
8217
8180 break; 8218 break;
8181 case 7: 8219 case 7:
8182 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 8220 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
8183 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 8221 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8184 queue_dp = true; 8222
8185 DRM_DEBUG("IH: HPD_RX 2\n"); 8223 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
8186 } 8224 queue_dp = true;
8225 DRM_DEBUG("IH: HPD_RX 2\n");
8226
8187 break; 8227 break;
8188 case 8: 8228 case 8:
8189 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 8229 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
8190 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 8230 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8191 queue_dp = true; 8231
8192 DRM_DEBUG("IH: HPD_RX 3\n"); 8232 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
8193 } 8233 queue_dp = true;
8234 DRM_DEBUG("IH: HPD_RX 3\n");
8235
8194 break; 8236 break;
8195 case 9: 8237 case 9:
8196 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 8238 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
8197 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 8239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8198 queue_dp = true; 8240
8199 DRM_DEBUG("IH: HPD_RX 4\n"); 8241 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
8200 } 8242 queue_dp = true;
8243 DRM_DEBUG("IH: HPD_RX 4\n");
8244
8201 break; 8245 break;
8202 case 10: 8246 case 10:
8203 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 8247 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
8204 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 8248 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8205 queue_dp = true; 8249
8206 DRM_DEBUG("IH: HPD_RX 5\n"); 8250 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
8207 } 8251 queue_dp = true;
8252 DRM_DEBUG("IH: HPD_RX 5\n");
8253
8208 break; 8254 break;
8209 case 11: 8255 case 11:
8210 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 8256 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
8211 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 8257 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8212 queue_dp = true; 8258
8213 DRM_DEBUG("IH: HPD_RX 6\n"); 8259 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
8214 } 8260 queue_dp = true;
8261 DRM_DEBUG("IH: HPD_RX 6\n");
8262
8215 break; 8263 break;
8216 default: 8264 default:
8217 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8265 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
4924 return IRQ_NONE; 4924 return IRQ_NONE;
4925 4925
4926 rptr = rdev->ih.rptr; 4926 rptr = rdev->ih.rptr;
4927 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4927 DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4928 4928
4929 /* Order reading of wptr vs. reading of IH ring data */ 4929 /* Order reading of wptr vs. reading of IH ring data */
4930 rmb(); 4930 rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
4942 case 1: /* D1 vblank/vline */ 4942 case 1: /* D1 vblank/vline */
4943 switch (src_data) { 4943 switch (src_data) {
4944 case 0: /* D1 vblank */ 4944 case 0: /* D1 vblank */
4945 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 4945 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
4946 if (rdev->irq.crtc_vblank_int[0]) { 4946 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4947 drm_handle_vblank(rdev->ddev, 0); 4947
4948 rdev->pm.vblank_sync = true; 4948 if (rdev->irq.crtc_vblank_int[0]) {
4949 wake_up(&rdev->irq.vblank_queue); 4949 drm_handle_vblank(rdev->ddev, 0);
4950 } 4950 rdev->pm.vblank_sync = true;
4951 if (atomic_read(&rdev->irq.pflip[0])) 4951 wake_up(&rdev->irq.vblank_queue);
4952 radeon_crtc_handle_vblank(rdev, 0);
4953 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4954 DRM_DEBUG("IH: D1 vblank\n");
4955 } 4952 }
4953 if (atomic_read(&rdev->irq.pflip[0]))
4954 radeon_crtc_handle_vblank(rdev, 0);
4955 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4956 DRM_DEBUG("IH: D1 vblank\n");
4957
4956 break; 4958 break;
4957 case 1: /* D1 vline */ 4959 case 1: /* D1 vline */
4958 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 4960 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
4959 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4961 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4960 DRM_DEBUG("IH: D1 vline\n"); 4962
4961 } 4963 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4964 DRM_DEBUG("IH: D1 vline\n");
4965
4962 break; 4966 break;
4963 default: 4967 default:
4964 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4968 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
4968 case 2: /* D2 vblank/vline */ 4972 case 2: /* D2 vblank/vline */
4969 switch (src_data) { 4973 switch (src_data) {
4970 case 0: /* D2 vblank */ 4974 case 0: /* D2 vblank */
4971 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 4975 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
4972 if (rdev->irq.crtc_vblank_int[1]) { 4976 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4973 drm_handle_vblank(rdev->ddev, 1); 4977
4974 rdev->pm.vblank_sync = true; 4978 if (rdev->irq.crtc_vblank_int[1]) {
4975 wake_up(&rdev->irq.vblank_queue); 4979 drm_handle_vblank(rdev->ddev, 1);
4976 } 4980 rdev->pm.vblank_sync = true;
4977 if (atomic_read(&rdev->irq.pflip[1])) 4981 wake_up(&rdev->irq.vblank_queue);
4978 radeon_crtc_handle_vblank(rdev, 1);
4979 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4980 DRM_DEBUG("IH: D2 vblank\n");
4981 } 4982 }
4983 if (atomic_read(&rdev->irq.pflip[1]))
4984 radeon_crtc_handle_vblank(rdev, 1);
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4986 DRM_DEBUG("IH: D2 vblank\n");
4987
4982 break; 4988 break;
4983 case 1: /* D2 vline */ 4989 case 1: /* D2 vline */
4984 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 4990 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 4991 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4986 DRM_DEBUG("IH: D2 vline\n"); 4992
4987 } 4993 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
4994 DRM_DEBUG("IH: D2 vline\n");
4995
4988 break; 4996 break;
4989 default: 4997 default:
4990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4998 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
4994 case 3: /* D3 vblank/vline */ 5002 case 3: /* D3 vblank/vline */
4995 switch (src_data) { 5003 switch (src_data) {
4996 case 0: /* D3 vblank */ 5004 case 0: /* D3 vblank */
4997 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 5005 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
4998 if (rdev->irq.crtc_vblank_int[2]) { 5006 DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
4999 drm_handle_vblank(rdev->ddev, 2); 5007
5000 rdev->pm.vblank_sync = true; 5008 if (rdev->irq.crtc_vblank_int[2]) {
5001 wake_up(&rdev->irq.vblank_queue); 5009 drm_handle_vblank(rdev->ddev, 2);
5002 } 5010 rdev->pm.vblank_sync = true;
5003 if (atomic_read(&rdev->irq.pflip[2])) 5011 wake_up(&rdev->irq.vblank_queue);
5004 radeon_crtc_handle_vblank(rdev, 2);
5005 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5006 DRM_DEBUG("IH: D3 vblank\n");
5007 } 5012 }
5013 if (atomic_read(&rdev->irq.pflip[2]))
5014 radeon_crtc_handle_vblank(rdev, 2);
5015 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5016 DRM_DEBUG("IH: D3 vblank\n");
5017
5008 break; 5018 break;
5009 case 1: /* D3 vline */ 5019 case 1: /* D3 vline */
5010 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 5020 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
5011 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 5021 DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
5012 DRM_DEBUG("IH: D3 vline\n"); 5022
5013 } 5023 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
5024 DRM_DEBUG("IH: D3 vline\n");
5025
5014 break; 5026 break;
5015 default: 5027 default:
5016 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5028 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
5020 case 4: /* D4 vblank/vline */ 5032 case 4: /* D4 vblank/vline */
5021 switch (src_data) { 5033 switch (src_data) {
5022 case 0: /* D4 vblank */ 5034 case 0: /* D4 vblank */
5023 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 5035 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
5024 if (rdev->irq.crtc_vblank_int[3]) { 5036 DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
5025 drm_handle_vblank(rdev->ddev, 3); 5037
5026 rdev->pm.vblank_sync = true; 5038 if (rdev->irq.crtc_vblank_int[3]) {
5027 wake_up(&rdev->irq.vblank_queue); 5039 drm_handle_vblank(rdev->ddev, 3);
5028 } 5040 rdev->pm.vblank_sync = true;
5029 if (atomic_read(&rdev->irq.pflip[3])) 5041 wake_up(&rdev->irq.vblank_queue);
5030 radeon_crtc_handle_vblank(rdev, 3);
5031 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5032 DRM_DEBUG("IH: D4 vblank\n");
5033 } 5042 }
5043 if (atomic_read(&rdev->irq.pflip[3]))
5044 radeon_crtc_handle_vblank(rdev, 3);
5045 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5046 DRM_DEBUG("IH: D4 vblank\n");
5047
5034 break; 5048 break;
5035 case 1: /* D4 vline */ 5049 case 1: /* D4 vline */
5036 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 5050 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
5037 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 5051 DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
5038 DRM_DEBUG("IH: D4 vline\n"); 5052
5039 } 5053 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
5054 DRM_DEBUG("IH: D4 vline\n");
5055
5040 break; 5056 break;
5041 default: 5057 default:
5042 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5058 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
5046 case 5: /* D5 vblank/vline */ 5062 case 5: /* D5 vblank/vline */
5047 switch (src_data) { 5063 switch (src_data) {
5048 case 0: /* D5 vblank */ 5064 case 0: /* D5 vblank */
5049 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 5065 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
5050 if (rdev->irq.crtc_vblank_int[4]) { 5066 DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
5051 drm_handle_vblank(rdev->ddev, 4); 5067
5052 rdev->pm.vblank_sync = true; 5068 if (rdev->irq.crtc_vblank_int[4]) {
5053 wake_up(&rdev->irq.vblank_queue); 5069 drm_handle_vblank(rdev->ddev, 4);
5054 } 5070 rdev->pm.vblank_sync = true;
5055 if (atomic_read(&rdev->irq.pflip[4])) 5071 wake_up(&rdev->irq.vblank_queue);
5056 radeon_crtc_handle_vblank(rdev, 4);
5057 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5058 DRM_DEBUG("IH: D5 vblank\n");
5059 } 5072 }
5073 if (atomic_read(&rdev->irq.pflip[4]))
5074 radeon_crtc_handle_vblank(rdev, 4);
5075 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5076 DRM_DEBUG("IH: D5 vblank\n");
5077
5060 break; 5078 break;
5061 case 1: /* D5 vline */ 5079 case 1: /* D5 vline */
5062 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 5080 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
5063 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 5081 DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
5064 DRM_DEBUG("IH: D5 vline\n"); 5082
5065 } 5083 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
5084 DRM_DEBUG("IH: D5 vline\n");
5085
5066 break; 5086 break;
5067 default: 5087 default:
5068 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5088 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
5072 case 6: /* D6 vblank/vline */ 5092 case 6: /* D6 vblank/vline */
5073 switch (src_data) { 5093 switch (src_data) {
5074 case 0: /* D6 vblank */ 5094 case 0: /* D6 vblank */
5075 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 5095 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
5076 if (rdev->irq.crtc_vblank_int[5]) { 5096 DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
5077 drm_handle_vblank(rdev->ddev, 5); 5097
5078 rdev->pm.vblank_sync = true; 5098 if (rdev->irq.crtc_vblank_int[5]) {
5079 wake_up(&rdev->irq.vblank_queue); 5099 drm_handle_vblank(rdev->ddev, 5);
5080 } 5100 rdev->pm.vblank_sync = true;
5081 if (atomic_read(&rdev->irq.pflip[5])) 5101 wake_up(&rdev->irq.vblank_queue);
5082 radeon_crtc_handle_vblank(rdev, 5);
5083 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5084 DRM_DEBUG("IH: D6 vblank\n");
5085 } 5102 }
5103 if (atomic_read(&rdev->irq.pflip[5]))
5104 radeon_crtc_handle_vblank(rdev, 5);
5105 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5106 DRM_DEBUG("IH: D6 vblank\n");
5107
5086 break; 5108 break;
5087 case 1: /* D6 vline */ 5109 case 1: /* D6 vline */
5088 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 5110 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
5089 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 5111 DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
5090 DRM_DEBUG("IH: D6 vline\n"); 5112
5091 } 5113 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
5114 DRM_DEBUG("IH: D6 vline\n");
5115
5092 break; 5116 break;
5093 default: 5117 default:
5094 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5118 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
5108 case 42: /* HPD hotplug */ 5132 case 42: /* HPD hotplug */
5109 switch (src_data) { 5133 switch (src_data) {
5110 case 0: 5134 case 0:
5111 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 5135 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
5112 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 5136 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5113 queue_hotplug = true; 5137
5114 DRM_DEBUG("IH: HPD1\n"); 5138 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
5115 } 5139 queue_hotplug = true;
5140 DRM_DEBUG("IH: HPD1\n");
5116 break; 5141 break;
5117 case 1: 5142 case 1:
5118 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 5143 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
5119 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 5144 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5120 queue_hotplug = true; 5145
5121 DRM_DEBUG("IH: HPD2\n"); 5146 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
5122 } 5147 queue_hotplug = true;
5148 DRM_DEBUG("IH: HPD2\n");
5123 break; 5149 break;
5124 case 2: 5150 case 2:
5125 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 5151 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
5126 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 5152 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5127 queue_hotplug = true; 5153
5128 DRM_DEBUG("IH: HPD3\n"); 5154 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5129 } 5155 queue_hotplug = true;
5156 DRM_DEBUG("IH: HPD3\n");
5130 break; 5157 break;
5131 case 3: 5158 case 3:
5132 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 5159 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
5133 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 5160 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5134 queue_hotplug = true; 5161
5135 DRM_DEBUG("IH: HPD4\n"); 5162 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5136 } 5163 queue_hotplug = true;
5164 DRM_DEBUG("IH: HPD4\n");
5137 break; 5165 break;
5138 case 4: 5166 case 4:
5139 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 5167 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
5140 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 5168 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5141 queue_hotplug = true; 5169
5142 DRM_DEBUG("IH: HPD5\n"); 5170 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5143 } 5171 queue_hotplug = true;
5172 DRM_DEBUG("IH: HPD5\n");
5144 break; 5173 break;
5145 case 5: 5174 case 5:
5146 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 5175 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
5147 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 5176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5148 queue_hotplug = true; 5177
5149 DRM_DEBUG("IH: HPD6\n"); 5178 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5150 } 5179 queue_hotplug = true;
5180 DRM_DEBUG("IH: HPD6\n");
5151 break; 5181 break;
5152 case 6: 5182 case 6:
5153 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 5183 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
5154 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 5184 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5155 queue_dp = true; 5185
5156 DRM_DEBUG("IH: HPD_RX 1\n"); 5186 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
5157 } 5187 queue_dp = true;
5188 DRM_DEBUG("IH: HPD_RX 1\n");
5158 break; 5189 break;
5159 case 7: 5190 case 7:
5160 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 5191 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
5161 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 5192 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5162 queue_dp = true; 5193
5163 DRM_DEBUG("IH: HPD_RX 2\n"); 5194 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
5164 } 5195 queue_dp = true;
5196 DRM_DEBUG("IH: HPD_RX 2\n");
5165 break; 5197 break;
5166 case 8: 5198 case 8:
5167 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 5199 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
5168 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 5200 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5169 queue_dp = true; 5201
5170 DRM_DEBUG("IH: HPD_RX 3\n"); 5202 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
5171 } 5203 queue_dp = true;
5204 DRM_DEBUG("IH: HPD_RX 3\n");
5172 break; 5205 break;
5173 case 9: 5206 case 9:
5174 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 5207 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
5175 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 5208 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5176 queue_dp = true; 5209
5177 DRM_DEBUG("IH: HPD_RX 4\n"); 5210 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
5178 } 5211 queue_dp = true;
5212 DRM_DEBUG("IH: HPD_RX 4\n");
5179 break; 5213 break;
5180 case 10: 5214 case 10:
5181 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 5215 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
5182 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 5216 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5183 queue_dp = true; 5217
5184 DRM_DEBUG("IH: HPD_RX 5\n"); 5218 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
5185 } 5219 queue_dp = true;
5220 DRM_DEBUG("IH: HPD_RX 5\n");
5186 break; 5221 break;
5187 case 11: 5222 case 11:
5188 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 5223 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
5189 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 5224 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5190 queue_dp = true; 5225
5191 DRM_DEBUG("IH: HPD_RX 6\n"); 5226 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
5192 } 5227 queue_dp = true;
5228 DRM_DEBUG("IH: HPD_RX 6\n");
5193 break; 5229 break;
5194 default: 5230 default:
5195 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5231 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
5199 case 44: /* hdmi */ 5235 case 44: /* hdmi */
5200 switch (src_data) { 5236 switch (src_data) {
5201 case 0: 5237 case 0:
5202 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { 5238 if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
5203 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; 5239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5204 queue_hdmi = true; 5240
5205 DRM_DEBUG("IH: HDMI0\n"); 5241 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
5206 } 5242 queue_hdmi = true;
5243 DRM_DEBUG("IH: HDMI0\n");
5207 break; 5244 break;
5208 case 1: 5245 case 1:
5209 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { 5246 if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
5210 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; 5247 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5211 queue_hdmi = true; 5248
5212 DRM_DEBUG("IH: HDMI1\n"); 5249 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
5213 } 5250 queue_hdmi = true;
5251 DRM_DEBUG("IH: HDMI1\n");
5214 break; 5252 break;
5215 case 2: 5253 case 2:
5216 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { 5254 if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
5217 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; 5255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5218 queue_hdmi = true; 5256
5219 DRM_DEBUG("IH: HDMI2\n"); 5257 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
5220 } 5258 queue_hdmi = true;
5259 DRM_DEBUG("IH: HDMI2\n");
5221 break; 5260 break;
5222 case 3: 5261 case 3:
5223 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { 5262 if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
5224 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; 5263 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5225 queue_hdmi = true; 5264
5226 DRM_DEBUG("IH: HDMI3\n"); 5265 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
5227 } 5266 queue_hdmi = true;
5267 DRM_DEBUG("IH: HDMI3\n");
5228 break; 5268 break;
5229 case 4: 5269 case 4:
5230 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { 5270 if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
5231 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; 5271 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5232 queue_hdmi = true; 5272
5233 DRM_DEBUG("IH: HDMI4\n"); 5273 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
5234 } 5274 queue_hdmi = true;
5275 DRM_DEBUG("IH: HDMI4\n");
5235 break; 5276 break;
5236 case 5: 5277 case 5:
5237 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { 5278 if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
5238 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; 5279 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5239 queue_hdmi = true; 5280
5240 DRM_DEBUG("IH: HDMI5\n"); 5281 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
5241 } 5282 queue_hdmi = true;
5283 DRM_DEBUG("IH: HDMI5\n");
5242 break; 5284 break;
5243 default: 5285 default:
5244 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5286 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2163 } 2163 }
2164 2164
2165 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 2165 if (rdev->family == CHIP_ARUBA) {
2166 if (ring->ring_size) 2166 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2167 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2167 if (ring->ring_size)
2168 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2168 2169
2169 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 2170 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2170 if (ring->ring_size) 2171 if (ring->ring_size)
2171 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2172 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2172 2173
2173 if (!r) 2174 if (!r)
2174 r = vce_v1_0_init(rdev); 2175 r = vce_v1_0_init(rdev);
2175 else if (r != -ENOENT) 2176 if (r)
2176 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); 2177 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
2178 }
2177 2179
2178 r = radeon_ib_pool_init(rdev); 2180 r = radeon_ib_pool_init(rdev);
2179 if (r) { 2181 if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
2396 radeon_irq_kms_fini(rdev); 2398 radeon_irq_kms_fini(rdev);
2397 uvd_v1_0_fini(rdev); 2399 uvd_v1_0_fini(rdev);
2398 radeon_uvd_fini(rdev); 2400 radeon_uvd_fini(rdev);
2399 radeon_vce_fini(rdev); 2401 if (rdev->family == CHIP_ARUBA)
2402 radeon_vce_fini(rdev);
2400 cayman_pcie_gart_fini(rdev); 2403 cayman_pcie_gart_fini(rdev);
2401 r600_vram_scratch_fini(rdev); 2404 r600_vram_scratch_fini(rdev);
2402 radeon_gem_fini(rdev); 2405 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
4086 case 1: /* D1 vblank/vline */ 4086 case 1: /* D1 vblank/vline */
4087 switch (src_data) { 4087 switch (src_data) {
4088 case 0: /* D1 vblank */ 4088 case 0: /* D1 vblank */
4089 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 4089 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4090 if (rdev->irq.crtc_vblank_int[0]) { 4090 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4091 drm_handle_vblank(rdev->ddev, 0); 4091
4092 rdev->pm.vblank_sync = true; 4092 if (rdev->irq.crtc_vblank_int[0]) {
4093 wake_up(&rdev->irq.vblank_queue); 4093 drm_handle_vblank(rdev->ddev, 0);
4094 } 4094 rdev->pm.vblank_sync = true;
4095 if (atomic_read(&rdev->irq.pflip[0])) 4095 wake_up(&rdev->irq.vblank_queue);
4096 radeon_crtc_handle_vblank(rdev, 0);
4097 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4098 DRM_DEBUG("IH: D1 vblank\n");
4099 } 4096 }
4097 if (atomic_read(&rdev->irq.pflip[0]))
4098 radeon_crtc_handle_vblank(rdev, 0);
4099 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4100 DRM_DEBUG("IH: D1 vblank\n");
4101
4100 break; 4102 break;
4101 case 1: /* D1 vline */ 4103 case 1: /* D1 vline */
4102 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 4104 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4103 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4105 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4104 DRM_DEBUG("IH: D1 vline\n"); 4106
4105 } 4107 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4108 DRM_DEBUG("IH: D1 vline\n");
4109
4106 break; 4110 break;
4107 default: 4111 default:
4108 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4112 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
4112 case 5: /* D2 vblank/vline */ 4116 case 5: /* D2 vblank/vline */
4113 switch (src_data) { 4117 switch (src_data) {
4114 case 0: /* D2 vblank */ 4118 case 0: /* D2 vblank */
4115 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 4119 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4116 if (rdev->irq.crtc_vblank_int[1]) { 4120 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4117 drm_handle_vblank(rdev->ddev, 1); 4121
4118 rdev->pm.vblank_sync = true; 4122 if (rdev->irq.crtc_vblank_int[1]) {
4119 wake_up(&rdev->irq.vblank_queue); 4123 drm_handle_vblank(rdev->ddev, 1);
4120 } 4124 rdev->pm.vblank_sync = true;
4121 if (atomic_read(&rdev->irq.pflip[1])) 4125 wake_up(&rdev->irq.vblank_queue);
4122 radeon_crtc_handle_vblank(rdev, 1);
4123 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4124 DRM_DEBUG("IH: D2 vblank\n");
4125 } 4126 }
4127 if (atomic_read(&rdev->irq.pflip[1]))
4128 radeon_crtc_handle_vblank(rdev, 1);
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4130 DRM_DEBUG("IH: D2 vblank\n");
4131
4126 break; 4132 break;
4127 case 1: /* D1 vline */ 4133 case 1: /* D1 vline */
4128 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 4134 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4135 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4130 DRM_DEBUG("IH: D2 vline\n"); 4136
4131 } 4137 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4138 DRM_DEBUG("IH: D2 vline\n");
4139
4132 break; 4140 break;
4133 default: 4141 default:
4134 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4142 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
4148 case 19: /* HPD/DAC hotplug */ 4156 case 19: /* HPD/DAC hotplug */
4149 switch (src_data) { 4157 switch (src_data) {
4150 case 0: 4158 case 0:
4151 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4159 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4152 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4160 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4153 queue_hotplug = true; 4161
4154 DRM_DEBUG("IH: HPD1\n"); 4162 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4155 } 4163 queue_hotplug = true;
4164 DRM_DEBUG("IH: HPD1\n");
4156 break; 4165 break;
4157 case 1: 4166 case 1:
4158 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4167 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4159 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4168 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4160 queue_hotplug = true; 4169
4161 DRM_DEBUG("IH: HPD2\n"); 4170 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4162 } 4171 queue_hotplug = true;
4172 DRM_DEBUG("IH: HPD2\n");
4163 break; 4173 break;
4164 case 4: 4174 case 4:
4165 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4175 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4166 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4176 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4167 queue_hotplug = true; 4177
4168 DRM_DEBUG("IH: HPD3\n"); 4178 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4169 } 4179 queue_hotplug = true;
4180 DRM_DEBUG("IH: HPD3\n");
4170 break; 4181 break;
4171 case 5: 4182 case 5:
4172 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4183 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4173 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4184 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4174 queue_hotplug = true; 4185
4175 DRM_DEBUG("IH: HPD4\n"); 4186 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4176 } 4187 queue_hotplug = true;
4188 DRM_DEBUG("IH: HPD4\n");
4177 break; 4189 break;
4178 case 10: 4190 case 10:
4179 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4191 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4180 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4192 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4181 queue_hotplug = true; 4193
4182 DRM_DEBUG("IH: HPD5\n"); 4194 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4183 } 4195 queue_hotplug = true;
4196 DRM_DEBUG("IH: HPD5\n");
4184 break; 4197 break;
4185 case 12: 4198 case 12:
4186 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4199 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4187 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4200 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4188 queue_hotplug = true; 4201
4189 DRM_DEBUG("IH: HPD6\n"); 4202 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4190 } 4203 queue_hotplug = true;
4204 DRM_DEBUG("IH: HPD6\n");
4205
4191 break; 4206 break;
4192 default: 4207 default:
4193 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4208 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
4197 case 21: /* hdmi */ 4212 case 21: /* hdmi */
4198 switch (src_data) { 4213 switch (src_data) {
4199 case 4: 4214 case 4:
4200 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4215 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4201 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4216 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4202 queue_hdmi = true; 4217
4203 DRM_DEBUG("IH: HDMI0\n"); 4218 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4204 } 4219 queue_hdmi = true;
4220 DRM_DEBUG("IH: HDMI0\n");
4221
4205 break; 4222 break;
4206 case 5: 4223 case 5:
4207 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4224 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4208 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4225 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4209 queue_hdmi = true; 4226
4210 DRM_DEBUG("IH: HDMI1\n"); 4227 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4211 } 4228 queue_hdmi = true;
4229 DRM_DEBUG("IH: HDMI1\n");
4230
4212 break; 4231 break;
4213 default: 4232 default:
4214 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4233 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
2483 struct drm_buf *buf; 2483 struct drm_buf *buf;
2484 u32 *buffer; 2484 u32 *buffer;
2485 const u8 __user *data; 2485 const u8 __user *data;
2486 int size, pass_size; 2486 unsigned int size, pass_size;
2487 u64 src_offset, dst_offset; 2487 u64 src_offset, dst_offset;
2488 2488
2489 if (!radeon_check_offset(dev_priv, tex->offset)) { 2489 if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
91 struct radeon_device *rdev = crtc->dev->dev_private; 91 struct radeon_device *rdev = crtc->dev->dev_private;
92 92
93 if (ASIC_IS_DCE4(rdev)) { 93 if (ASIC_IS_DCE4(rdev)) {
94 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
95 upper_32_bits(radeon_crtc->cursor_addr));
96 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
97 lower_32_bits(radeon_crtc->cursor_addr));
94 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 98 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
95 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | 99 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
96 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 100 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
97 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 101 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
98 } else if (ASIC_IS_AVIVO(rdev)) { 102 } else if (ASIC_IS_AVIVO(rdev)) {
103 if (rdev->family >= CHIP_RV770) {
104 if (radeon_crtc->crtc_id)
105 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
106 upper_32_bits(radeon_crtc->cursor_addr));
107 else
108 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
109 upper_32_bits(radeon_crtc->cursor_addr));
110 }
111
112 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
113 lower_32_bits(radeon_crtc->cursor_addr));
99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 114 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 115 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 116 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
102 } else { 117 } else {
118 /* offset is from DISP(2)_BASE_ADDRESS */
119 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
120 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
121
103 switch (radeon_crtc->crtc_id) { 122 switch (radeon_crtc->crtc_id) {
104 case 0: 123 case 0:
105 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); 124 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
205 | (x << 16) 224 | (x << 16)
206 | y)); 225 | y));
207 /* offset is from DISP(2)_BASE_ADDRESS */ 226 /* offset is from DISP(2)_BASE_ADDRESS */
208 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 227 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
209 (yorigin * 256))); 228 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
229 yorigin * 256);
210 } 230 }
211 231
212 radeon_crtc->cursor_x = x; 232 radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
227 return ret; 247 return ret;
228} 248}
229 249
230static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
231{
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_device *rdev = crtc->dev->dev_private;
234 struct radeon_bo *robj = gem_to_radeon_bo(obj);
235 uint64_t gpu_addr;
236 int ret;
237
238 ret = radeon_bo_reserve(robj, false);
239 if (unlikely(ret != 0))
240 goto fail;
241 /* Only 27 bit offset for legacy cursor */
242 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
243 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
244 &gpu_addr);
245 radeon_bo_unreserve(robj);
246 if (ret)
247 goto fail;
248
249 if (ASIC_IS_DCE4(rdev)) {
250 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
251 upper_32_bits(gpu_addr));
252 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
253 gpu_addr & 0xffffffff);
254 } else if (ASIC_IS_AVIVO(rdev)) {
255 if (rdev->family >= CHIP_RV770) {
256 if (radeon_crtc->crtc_id)
257 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
258 else
259 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
260 }
261 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
262 gpu_addr & 0xffffffff);
263 } else {
264 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
265 /* offset is from DISP(2)_BASE_ADDRESS */
266 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
267 }
268
269 return 0;
270
271fail:
272 drm_gem_object_unreference_unlocked(obj);
273
274 return ret;
275}
276
277int radeon_crtc_cursor_set2(struct drm_crtc *crtc, 250int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
278 struct drm_file *file_priv, 251 struct drm_file *file_priv,
279 uint32_t handle, 252 uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
283 int32_t hot_y) 256 int32_t hot_y)
284{ 257{
285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 258 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
259 struct radeon_device *rdev = crtc->dev->dev_private;
286 struct drm_gem_object *obj; 260 struct drm_gem_object *obj;
261 struct radeon_bo *robj;
287 int ret; 262 int ret;
288 263
289 if (!handle) { 264 if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
305 return -ENOENT; 280 return -ENOENT;
306 } 281 }
307 282
283 robj = gem_to_radeon_bo(obj);
284 ret = radeon_bo_reserve(robj, false);
285 if (ret != 0) {
286 drm_gem_object_unreference_unlocked(obj);
287 return ret;
288 }
289 /* Only 27 bit offset for legacy cursor */
290 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
291 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
292 &radeon_crtc->cursor_addr);
293 radeon_bo_unreserve(robj);
294 if (ret) {
295 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
296 drm_gem_object_unreference_unlocked(obj);
297 return ret;
298 }
299
308 radeon_crtc->cursor_width = width; 300 radeon_crtc->cursor_width = width;
309 radeon_crtc->cursor_height = height; 301 radeon_crtc->cursor_height = height;
310 302
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
323 radeon_crtc->cursor_hot_y = hot_y; 315 radeon_crtc->cursor_hot_y = hot_y;
324 } 316 }
325 317
326 ret = radeon_set_cursor(crtc, obj); 318 radeon_show_cursor(crtc);
327
328 if (ret)
329 DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
330 ret);
331 else
332 radeon_show_cursor(crtc);
333 319
334 radeon_lock_cursor(crtc, false); 320 radeon_lock_cursor(crtc, false);
335 321
@@ -341,8 +327,7 @@ unpin:
341 radeon_bo_unpin(robj); 327 radeon_bo_unpin(robj);
342 radeon_bo_unreserve(robj); 328 radeon_bo_unreserve(robj);
343 } 329 }
344 if (radeon_crtc->cursor_bo != obj) 330 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
345 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
346 } 331 }
347 332
348 radeon_crtc->cursor_bo = obj; 333 radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
360void radeon_cursor_reset(struct drm_crtc *crtc) 345void radeon_cursor_reset(struct drm_crtc *crtc)
361{ 346{
362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
363 int ret;
364 348
365 if (radeon_crtc->cursor_bo) { 349 if (radeon_crtc->cursor_bo) {
366 radeon_lock_cursor(crtc, true); 350 radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, 352 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
369 radeon_crtc->cursor_y); 353 radeon_crtc->cursor_y);
370 354
371 ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo); 355 radeon_show_cursor(crtc);
372 if (ret)
373 DRM_ERROR("radeon_set_cursor returned %d, not showing "
374 "cursor\n", ret);
375 else
376 radeon_show_cursor(crtc);
377 356
378 radeon_lock_cursor(crtc, false); 357 radeon_lock_cursor(crtc, false);
379 } 358 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b1168bd6..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
1080} 1080}
1081 1081
1082/** 1082/**
1083 * Determine a sensible default GART size according to ASIC family.
1084 *
1085 * @family ASIC family name
1086 */
1087static int radeon_gart_size_auto(enum radeon_family family)
1088{
1089 /* default to a larger gart size on newer asics */
1090 if (family >= CHIP_TAHITI)
1091 return 2048;
1092 else if (family >= CHIP_RV770)
1093 return 1024;
1094 else
1095 return 512;
1096}
1097
1098/**
1083 * radeon_check_arguments - validate module params 1099 * radeon_check_arguments - validate module params
1084 * 1100 *
1085 * @rdev: radeon_device pointer 1101 * @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1097 } 1113 }
1098 1114
1099 if (radeon_gart_size == -1) { 1115 if (radeon_gart_size == -1) {
1100 /* default to a larger gart size on newer asics */ 1116 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1101 if (rdev->family >= CHIP_RV770)
1102 radeon_gart_size = 1024;
1103 else
1104 radeon_gart_size = 512;
1105 } 1117 }
1106 /* gtt size must be power of two and greater or equal to 32M */ 1118 /* gtt size must be power of two and greater or equal to 32M */
1107 if (radeon_gart_size < 32) { 1119 if (radeon_gart_size < 32) {
1108 dev_warn(rdev->dev, "gart size (%d) too small\n", 1120 dev_warn(rdev->dev, "gart size (%d) too small\n",
1109 radeon_gart_size); 1121 radeon_gart_size);
1110 if (rdev->family >= CHIP_RV770) 1122 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1111 radeon_gart_size = 1024;
1112 else
1113 radeon_gart_size = 512;
1114 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1123 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1115 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1124 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1116 radeon_gart_size); 1125 radeon_gart_size);
1117 if (rdev->family >= CHIP_RV770) 1126 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1118 radeon_gart_size = 1024;
1119 else
1120 radeon_gart_size = 512;
1121 } 1127 }
1122 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1128 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1123 1129
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1572 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1573 } 1579 }
1574 1580
1575 /* unpin the front buffers */ 1581 /* unpin the front buffers and cursors */
1576 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1583 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1577 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 1584 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1578 struct radeon_bo *robj; 1585 struct radeon_bo *robj;
1579 1586
1587 if (radeon_crtc->cursor_bo) {
1588 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1589 r = radeon_bo_reserve(robj, false);
1590 if (r == 0) {
1591 radeon_bo_unpin(robj);
1592 radeon_bo_unreserve(robj);
1593 }
1594 }
1595
1580 if (rfb == NULL || rfb->obj == NULL) { 1596 if (rfb == NULL || rfb->obj == NULL) {
1581 continue; 1597 continue;
1582 } 1598 }
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1639{ 1655{
1640 struct drm_connector *connector; 1656 struct drm_connector *connector;
1641 struct radeon_device *rdev = dev->dev_private; 1657 struct radeon_device *rdev = dev->dev_private;
1658 struct drm_crtc *crtc;
1642 int r; 1659 int r;
1643 1660
1644 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1661 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1678 1695
1679 radeon_restore_bios_scratch_regs(rdev); 1696 radeon_restore_bios_scratch_regs(rdev);
1680 1697
1698 /* pin cursors */
1699 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1700 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1701
1702 if (radeon_crtc->cursor_bo) {
1703 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1704 r = radeon_bo_reserve(robj, false);
1705 if (r == 0) {
1706 /* Only 27 bit offset for legacy cursor */
1707 r = radeon_bo_pin_restricted(robj,
1708 RADEON_GEM_DOMAIN_VRAM,
1709 ASIC_IS_AVIVO(rdev) ?
1710 0 : 1 << 27,
1711 &radeon_crtc->cursor_addr);
1712 if (r != 0)
1713 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1714 radeon_bo_unreserve(robj);
1715 }
1716 }
1717 }
1718
1681 /* init dig PHYs, disp eng pll */ 1719 /* init dig PHYs, disp eng pll */
1682 if (rdev->is_atom_bios) { 1720 if (rdev->is_atom_bios) {
1683 radeon_atom_encoder_init(rdev); 1721 radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 634793ea8418..aeb676708e60 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
257 } 257 }
258 258
259 info->par = rfbdev; 259 info->par = rfbdev;
260 info->skip_vt_switch = true;
260 261
261 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 262 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
262 if (ret) { 263 if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..013ec7106e55 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -428,7 +428,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp) 429 struct drm_file *filp)
430{ 430{
431 struct radeon_device *rdev = dev->dev_private;
432 struct drm_radeon_gem_busy *args = data; 431 struct drm_radeon_gem_busy *args = data;
433 struct drm_gem_object *gobj; 432 struct drm_gem_object *gobj;
434 struct radeon_bo *robj; 433 struct radeon_bo *robj;
@@ -440,10 +439,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
440 return -ENOENT; 439 return -ENOENT;
441 } 440 }
442 robj = gem_to_radeon_bo(gobj); 441 robj = gem_to_radeon_bo(gobj);
443 r = radeon_bo_wait(robj, &cur_placement, true); 442
443 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
444 if (r == 0)
445 r = -EBUSY;
446 else
447 r = 0;
448
449 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
444 args->domain = radeon_mem_type_to_domain(cur_placement); 450 args->domain = radeon_mem_type_to_domain(cur_placement);
445 drm_gem_object_unreference_unlocked(gobj); 451 drm_gem_object_unreference_unlocked(gobj);
446 r = radeon_gem_handle_lockup(rdev, r);
447 return r; 452 return r;
448} 453}
449 454
@@ -471,6 +476,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
471 r = ret; 476 r = ret;
472 477
473 /* Flush HDP cache via MMIO if necessary */ 478 /* Flush HDP cache via MMIO if necessary */
479 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
474 if (rdev->asic->mmio_hdp_flush && 480 if (rdev->asic->mmio_hdp_flush &&
475 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 481 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
476 robj->rdev->asic->mmio_hdp_flush(rdev); 482 robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..07909d817381 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -343,7 +343,6 @@ struct radeon_crtc {
343 int max_cursor_width; 343 int max_cursor_width;
344 int max_cursor_height; 344 int max_cursor_height;
345 uint32_t legacy_display_base_addr; 345 uint32_t legacy_display_base_addr;
346 uint32_t legacy_cursor_offset;
347 enum radeon_rmx_type rmx_type; 346 enum radeon_rmx_type rmx_type;
348 u8 h_border; 347 u8 h_border;
349 u8 v_border; 348 u8 v_border;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ec10533a49b8..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
493 } 493 }
494 494
495 if (bo_va->it.start || bo_va->it.last) { 495 if (bo_va->it.start || bo_va->it.last) {
496 spin_lock(&vm->status_lock); 496 /* add a clone of the bo_va to clear the old address */
497 if (list_empty(&bo_va->vm_status)) { 497 struct radeon_bo_va *tmp;
498 /* add a clone of the bo_va to clear the old address */ 498 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
499 struct radeon_bo_va *tmp; 499 if (!tmp) {
500 spin_unlock(&vm->status_lock); 500 mutex_unlock(&vm->mutex);
501 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 501 r = -ENOMEM;
502 if (!tmp) { 502 goto error_unreserve;
503 mutex_unlock(&vm->mutex);
504 r = -ENOMEM;
505 goto error_unreserve;
506 }
507 tmp->it.start = bo_va->it.start;
508 tmp->it.last = bo_va->it.last;
509 tmp->vm = vm;
510 tmp->bo = radeon_bo_ref(bo_va->bo);
511 spin_lock(&vm->status_lock);
512 list_add(&tmp->vm_status, &vm->freed);
513 } 503 }
514 spin_unlock(&vm->status_lock); 504 tmp->it.start = bo_va->it.start;
505 tmp->it.last = bo_va->it.last;
506 tmp->vm = vm;
507 tmp->bo = radeon_bo_ref(bo_va->bo);
515 508
516 interval_tree_remove(&bo_va->it, &vm->va); 509 interval_tree_remove(&bo_va->it, &vm->va);
510 spin_lock(&vm->status_lock);
517 bo_va->it.start = 0; 511 bo_va->it.start = 0;
518 bo_va->it.last = 0; 512 bo_va->it.last = 0;
513 list_del_init(&bo_va->vm_status);
514 list_add(&tmp->vm_status, &vm->freed);
515 spin_unlock(&vm->status_lock);
519 } 516 }
520 517
521 if (soffset || eoffset) { 518 if (soffset || eoffset) {
519 spin_lock(&vm->status_lock);
522 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
523 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset - 1;
524 interval_tree_insert(&bo_va->it, &vm->va);
525 spin_lock(&vm->status_lock);
526 list_add(&bo_va->vm_status, &vm->cleared); 522 list_add(&bo_va->vm_status, &vm->cleared);
527 spin_unlock(&vm->status_lock); 523 spin_unlock(&vm->status_lock);
524 interval_tree_insert(&bo_va->it, &vm->va);
528 } 525 }
529 526
530 bo_va->flags = flags; 527 bo_va->flags = flags;
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1158 1155
1159 list_for_each_entry(bo_va, &bo->va, bo_list) { 1156 list_for_each_entry(bo_va, &bo->va, bo_list) {
1160 spin_lock(&bo_va->vm->status_lock); 1157 spin_lock(&bo_va->vm->status_lock);
1161 if (list_empty(&bo_va->vm_status)) 1158 if (list_empty(&bo_va->vm_status) &&
1159 (bo_va->it.start || bo_va->it.last))
1162 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1160 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1163 spin_unlock(&bo_va->vm->status_lock); 1161 spin_unlock(&bo_va->vm->status_lock);
1164 } 1162 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
6466 case 1: /* D1 vblank/vline */ 6466 case 1: /* D1 vblank/vline */
6467 switch (src_data) { 6467 switch (src_data) {
6468 case 0: /* D1 vblank */ 6468 case 0: /* D1 vblank */
6469 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 6469 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6470 if (rdev->irq.crtc_vblank_int[0]) { 6470 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6471 drm_handle_vblank(rdev->ddev, 0); 6471
6472 rdev->pm.vblank_sync = true; 6472 if (rdev->irq.crtc_vblank_int[0]) {
6473 wake_up(&rdev->irq.vblank_queue); 6473 drm_handle_vblank(rdev->ddev, 0);
6474 } 6474 rdev->pm.vblank_sync = true;
6475 if (atomic_read(&rdev->irq.pflip[0])) 6475 wake_up(&rdev->irq.vblank_queue);
6476 radeon_crtc_handle_vblank(rdev, 0);
6477 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6478 DRM_DEBUG("IH: D1 vblank\n");
6479 } 6476 }
6477 if (atomic_read(&rdev->irq.pflip[0]))
6478 radeon_crtc_handle_vblank(rdev, 0);
6479 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6480 DRM_DEBUG("IH: D1 vblank\n");
6481
6480 break; 6482 break;
6481 case 1: /* D1 vline */ 6483 case 1: /* D1 vline */
6482 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 6484 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6483 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 6485 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6484 DRM_DEBUG("IH: D1 vline\n"); 6486
6485 } 6487 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6488 DRM_DEBUG("IH: D1 vline\n");
6489
6486 break; 6490 break;
6487 default: 6491 default:
6488 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6492 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
6492 case 2: /* D2 vblank/vline */ 6496 case 2: /* D2 vblank/vline */
6493 switch (src_data) { 6497 switch (src_data) {
6494 case 0: /* D2 vblank */ 6498 case 0: /* D2 vblank */
6495 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 6499 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6496 if (rdev->irq.crtc_vblank_int[1]) { 6500 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6497 drm_handle_vblank(rdev->ddev, 1); 6501
6498 rdev->pm.vblank_sync = true; 6502 if (rdev->irq.crtc_vblank_int[1]) {
6499 wake_up(&rdev->irq.vblank_queue); 6503 drm_handle_vblank(rdev->ddev, 1);
6500 } 6504 rdev->pm.vblank_sync = true;
6501 if (atomic_read(&rdev->irq.pflip[1])) 6505 wake_up(&rdev->irq.vblank_queue);
6502 radeon_crtc_handle_vblank(rdev, 1);
6503 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6504 DRM_DEBUG("IH: D2 vblank\n");
6505 } 6506 }
6507 if (atomic_read(&rdev->irq.pflip[1]))
6508 radeon_crtc_handle_vblank(rdev, 1);
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6510 DRM_DEBUG("IH: D2 vblank\n");
6511
6506 break; 6512 break;
6507 case 1: /* D2 vline */ 6513 case 1: /* D2 vline */
6508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 6514 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 6515 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6510 DRM_DEBUG("IH: D2 vline\n"); 6516
6511 } 6517 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6518 DRM_DEBUG("IH: D2 vline\n");
6519
6512 break; 6520 break;
6513 default: 6521 default:
6514 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6522 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
6518 case 3: /* D3 vblank/vline */ 6526 case 3: /* D3 vblank/vline */
6519 switch (src_data) { 6527 switch (src_data) {
6520 case 0: /* D3 vblank */ 6528 case 0: /* D3 vblank */
6521 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 6529 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6522 if (rdev->irq.crtc_vblank_int[2]) { 6530 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6523 drm_handle_vblank(rdev->ddev, 2); 6531
6524 rdev->pm.vblank_sync = true; 6532 if (rdev->irq.crtc_vblank_int[2]) {
6525 wake_up(&rdev->irq.vblank_queue); 6533 drm_handle_vblank(rdev->ddev, 2);
6526 } 6534 rdev->pm.vblank_sync = true;
6527 if (atomic_read(&rdev->irq.pflip[2])) 6535 wake_up(&rdev->irq.vblank_queue);
6528 radeon_crtc_handle_vblank(rdev, 2);
6529 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6530 DRM_DEBUG("IH: D3 vblank\n");
6531 } 6536 }
6537 if (atomic_read(&rdev->irq.pflip[2]))
6538 radeon_crtc_handle_vblank(rdev, 2);
6539 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6540 DRM_DEBUG("IH: D3 vblank\n");
6541
6532 break; 6542 break;
6533 case 1: /* D3 vline */ 6543 case 1: /* D3 vline */
6534 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 6544 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6535 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 6545 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6536 DRM_DEBUG("IH: D3 vline\n"); 6546
6537 } 6547 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6548 DRM_DEBUG("IH: D3 vline\n");
6549
6538 break; 6550 break;
6539 default: 6551 default:
6540 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6552 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
6544 case 4: /* D4 vblank/vline */ 6556 case 4: /* D4 vblank/vline */
6545 switch (src_data) { 6557 switch (src_data) {
6546 case 0: /* D4 vblank */ 6558 case 0: /* D4 vblank */
6547 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 6559 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6548 if (rdev->irq.crtc_vblank_int[3]) { 6560 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6549 drm_handle_vblank(rdev->ddev, 3); 6561
6550 rdev->pm.vblank_sync = true; 6562 if (rdev->irq.crtc_vblank_int[3]) {
6551 wake_up(&rdev->irq.vblank_queue); 6563 drm_handle_vblank(rdev->ddev, 3);
6552 } 6564 rdev->pm.vblank_sync = true;
6553 if (atomic_read(&rdev->irq.pflip[3])) 6565 wake_up(&rdev->irq.vblank_queue);
6554 radeon_crtc_handle_vblank(rdev, 3);
6555 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6556 DRM_DEBUG("IH: D4 vblank\n");
6557 } 6566 }
6567 if (atomic_read(&rdev->irq.pflip[3]))
6568 radeon_crtc_handle_vblank(rdev, 3);
6569 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6570 DRM_DEBUG("IH: D4 vblank\n");
6571
6558 break; 6572 break;
6559 case 1: /* D4 vline */ 6573 case 1: /* D4 vline */
6560 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 6574 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6561 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 6575 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6562 DRM_DEBUG("IH: D4 vline\n"); 6576
6563 } 6577 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6578 DRM_DEBUG("IH: D4 vline\n");
6579
6564 break; 6580 break;
6565 default: 6581 default:
6566 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6582 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
6570 case 5: /* D5 vblank/vline */ 6586 case 5: /* D5 vblank/vline */
6571 switch (src_data) { 6587 switch (src_data) {
6572 case 0: /* D5 vblank */ 6588 case 0: /* D5 vblank */
6573 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 6589 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6574 if (rdev->irq.crtc_vblank_int[4]) { 6590 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6575 drm_handle_vblank(rdev->ddev, 4); 6591
6576 rdev->pm.vblank_sync = true; 6592 if (rdev->irq.crtc_vblank_int[4]) {
6577 wake_up(&rdev->irq.vblank_queue); 6593 drm_handle_vblank(rdev->ddev, 4);
6578 } 6594 rdev->pm.vblank_sync = true;
6579 if (atomic_read(&rdev->irq.pflip[4])) 6595 wake_up(&rdev->irq.vblank_queue);
6580 radeon_crtc_handle_vblank(rdev, 4);
6581 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6582 DRM_DEBUG("IH: D5 vblank\n");
6583 } 6596 }
6597 if (atomic_read(&rdev->irq.pflip[4]))
6598 radeon_crtc_handle_vblank(rdev, 4);
6599 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6600 DRM_DEBUG("IH: D5 vblank\n");
6601
6584 break; 6602 break;
6585 case 1: /* D5 vline */ 6603 case 1: /* D5 vline */
6586 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 6604 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6587 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 6605 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6588 DRM_DEBUG("IH: D5 vline\n"); 6606
6589 } 6607 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6608 DRM_DEBUG("IH: D5 vline\n");
6609
6590 break; 6610 break;
6591 default: 6611 default:
6592 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6612 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
6596 case 6: /* D6 vblank/vline */ 6616 case 6: /* D6 vblank/vline */
6597 switch (src_data) { 6617 switch (src_data) {
6598 case 0: /* D6 vblank */ 6618 case 0: /* D6 vblank */
6599 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 6619 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6600 if (rdev->irq.crtc_vblank_int[5]) { 6620 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6601 drm_handle_vblank(rdev->ddev, 5); 6621
6602 rdev->pm.vblank_sync = true; 6622 if (rdev->irq.crtc_vblank_int[5]) {
6603 wake_up(&rdev->irq.vblank_queue); 6623 drm_handle_vblank(rdev->ddev, 5);
6604 } 6624 rdev->pm.vblank_sync = true;
6605 if (atomic_read(&rdev->irq.pflip[5])) 6625 wake_up(&rdev->irq.vblank_queue);
6606 radeon_crtc_handle_vblank(rdev, 5);
6607 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6608 DRM_DEBUG("IH: D6 vblank\n");
6609 } 6626 }
6627 if (atomic_read(&rdev->irq.pflip[5]))
6628 radeon_crtc_handle_vblank(rdev, 5);
6629 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6630 DRM_DEBUG("IH: D6 vblank\n");
6631
6610 break; 6632 break;
6611 case 1: /* D6 vline */ 6633 case 1: /* D6 vline */
6612 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 6634 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6613 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 6635 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6614 DRM_DEBUG("IH: D6 vline\n"); 6636
6615 } 6637 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6638 DRM_DEBUG("IH: D6 vline\n");
6639
6616 break; 6640 break;
6617 default: 6641 default:
6618 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6642 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
6632 case 42: /* HPD hotplug */ 6656 case 42: /* HPD hotplug */
6633 switch (src_data) { 6657 switch (src_data) {
6634 case 0: 6658 case 0:
6635 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 6659 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6636 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 6660 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6637 queue_hotplug = true; 6661
6638 DRM_DEBUG("IH: HPD1\n"); 6662 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6639 } 6663 queue_hotplug = true;
6664 DRM_DEBUG("IH: HPD1\n");
6665
6640 break; 6666 break;
6641 case 1: 6667 case 1:
6642 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 6668 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6643 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 6669 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6644 queue_hotplug = true; 6670
6645 DRM_DEBUG("IH: HPD2\n"); 6671 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6646 } 6672 queue_hotplug = true;
6673 DRM_DEBUG("IH: HPD2\n");
6674
6647 break; 6675 break;
6648 case 2: 6676 case 2:
6649 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 6677 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6650 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 6678 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6651 queue_hotplug = true; 6679
6652 DRM_DEBUG("IH: HPD3\n"); 6680 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6653 } 6681 queue_hotplug = true;
6682 DRM_DEBUG("IH: HPD3\n");
6683
6654 break; 6684 break;
6655 case 3: 6685 case 3:
6656 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 6686 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6657 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 6687 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6658 queue_hotplug = true; 6688
6659 DRM_DEBUG("IH: HPD4\n"); 6689 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6660 } 6690 queue_hotplug = true;
6691 DRM_DEBUG("IH: HPD4\n");
6692
6661 break; 6693 break;
6662 case 4: 6694 case 4:
6663 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 6695 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6664 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 6696 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6665 queue_hotplug = true; 6697
6666 DRM_DEBUG("IH: HPD5\n"); 6698 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6667 } 6699 queue_hotplug = true;
6700 DRM_DEBUG("IH: HPD5\n");
6701
6668 break; 6702 break;
6669 case 5: 6703 case 5:
6670 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6704 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6671 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 6705 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6672 queue_hotplug = true; 6706
6673 DRM_DEBUG("IH: HPD6\n"); 6707 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6674 } 6708 queue_hotplug = true;
6709 DRM_DEBUG("IH: HPD6\n");
6710
6675 break; 6711 break;
6676 case 6: 6712 case 6:
6677 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 6713 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6678 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 6714 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6679 queue_dp = true; 6715
6680 DRM_DEBUG("IH: HPD_RX 1\n"); 6716 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6681 } 6717 queue_dp = true;
6718 DRM_DEBUG("IH: HPD_RX 1\n");
6719
6682 break; 6720 break;
6683 case 7: 6721 case 7:
6684 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 6722 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6685 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 6723 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6686 queue_dp = true; 6724
6687 DRM_DEBUG("IH: HPD_RX 2\n"); 6725 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6688 } 6726 queue_dp = true;
6727 DRM_DEBUG("IH: HPD_RX 2\n");
6728
6689 break; 6729 break;
6690 case 8: 6730 case 8:
6691 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 6731 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6692 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 6732 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6693 queue_dp = true; 6733
6694 DRM_DEBUG("IH: HPD_RX 3\n"); 6734 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6695 } 6735 queue_dp = true;
6736 DRM_DEBUG("IH: HPD_RX 3\n");
6737
6696 break; 6738 break;
6697 case 9: 6739 case 9:
6698 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 6740 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6699 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 6741 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6700 queue_dp = true; 6742
6701 DRM_DEBUG("IH: HPD_RX 4\n"); 6743 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6702 } 6744 queue_dp = true;
6745 DRM_DEBUG("IH: HPD_RX 4\n");
6746
6703 break; 6747 break;
6704 case 10: 6748 case 10:
6705 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 6749 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6706 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 6750 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6707 queue_dp = true; 6751
6708 DRM_DEBUG("IH: HPD_RX 5\n"); 6752 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6709 } 6753 queue_dp = true;
6754 DRM_DEBUG("IH: HPD_RX 5\n");
6755
6710 break; 6756 break;
6711 case 11: 6757 case 11:
6712 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6758 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6713 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 6759 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6714 queue_dp = true; 6760
6715 DRM_DEBUG("IH: HPD_RX 6\n"); 6761 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6716 } 6762 queue_dp = true;
6763 DRM_DEBUG("IH: HPD_RX 6\n");
6764
6717 break; 6765 break;
6718 default: 6766 default:
6719 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6767 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 35ac23768ce9..577d58d1f1a1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -633,6 +633,7 @@ config I2C_MPC
633config I2C_MT65XX 633config I2C_MT65XX
634 tristate "MediaTek I2C adapter" 634 tristate "MediaTek I2C adapter"
635 depends on ARCH_MEDIATEK || COMPILE_TEST 635 depends on ARCH_MEDIATEK || COMPILE_TEST
636 depends on HAS_DMA
636 help 637 help
637 This selects the MediaTek(R) Integrated Inter Circuit bus driver 638 This selects the MediaTek(R) Integrated Inter Circuit bus driver
638 for MT65xx and MT81xx. 639 for MT65xx and MT81xx.
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 19b2d689a5ef..f325663c27c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -764,12 +764,15 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
764 if (IS_ERR(i2c->clk)) 764 if (IS_ERR(i2c->clk))
765 return PTR_ERR(i2c->clk); 765 return PTR_ERR(i2c->clk);
766 766
767 clk_prepare_enable(i2c->clk); 767 ret = clk_prepare_enable(i2c->clk);
768 if (ret)
769 return ret;
768 770
769 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", 771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
770 &clk_freq)) { 772 &clk_freq);
773 if (ret) {
771 dev_err(&pdev->dev, "clock-frequency not specified in DT"); 774 dev_err(&pdev->dev, "clock-frequency not specified in DT");
772 return clk_freq; 775 goto err;
773 } 776 }
774 777
775 i2c->speed = clk_freq / 1000; 778 i2c->speed = clk_freq / 1000;
@@ -790,10 +793,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
790 i2c->irq = platform_get_irq(pdev, 0); 793 i2c->irq = platform_get_irq(pdev, 0);
791 ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, 794 ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
792 dev_name(&pdev->dev), i2c); 795 dev_name(&pdev->dev), i2c);
793 if (ret) { 796 if (ret)
794 ret = -ENODEV;
795 goto err; 797 goto err;
796 }
797 798
798 ret = i2c_add_adapter(&i2c->adap); 799 ret = i2c_add_adapter(&i2c->adap);
799 if (ret < 0) { 800 if (ret < 0) {
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dcca7076231e..1c9cb65ac4cf 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -419,6 +419,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
419 rc = i2c_add_adapter(adapter); 419 rc = i2c_add_adapter(adapter);
420 if (rc) { 420 if (rc) {
421 dev_err(&pdev->dev, "Adapter registeration failed\n"); 421 dev_err(&pdev->dev, "Adapter registeration failed\n");
422 mbox_free_channel(ctx->mbox_chan);
422 return rc; 423 return rc;
423 } 424 }
424 425
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 069a41f116dd..e6d4935161e4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1012,6 +1012,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
1012 */ 1012 */
1013void i2c_unregister_device(struct i2c_client *client) 1013void i2c_unregister_device(struct i2c_client *client)
1014{ 1014{
1015 if (client->dev.of_node)
1016 of_node_clear_flag(client->dev.of_node, OF_POPULATED);
1015 device_unregister(&client->dev); 1017 device_unregister(&client->dev);
1016} 1018}
1017EXPORT_SYMBOL_GPL(i2c_unregister_device); 1019EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1320,8 +1322,11 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
1320 1322
1321 dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); 1323 dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
1322 1324
1323 for_each_available_child_of_node(adap->dev.of_node, node) 1325 for_each_available_child_of_node(adap->dev.of_node, node) {
1326 if (of_node_test_and_set_flag(node, OF_POPULATED))
1327 continue;
1324 of_i2c_register_device(adap, node); 1328 of_i2c_register_device(adap, node);
1329 }
1325} 1330}
1326 1331
1327static int of_dev_node_match(struct device *dev, void *data) 1332static int of_dev_node_match(struct device *dev, void *data)
@@ -1853,6 +1858,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
1853 if (adap == NULL) 1858 if (adap == NULL)
1854 return NOTIFY_OK; /* not for us */ 1859 return NOTIFY_OK; /* not for us */
1855 1860
1861 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
1862 put_device(&adap->dev);
1863 return NOTIFY_OK;
1864 }
1865
1856 client = of_i2c_register_device(adap, rd->dn); 1866 client = of_i2c_register_device(adap, rd->dn);
1857 put_device(&adap->dev); 1867 put_device(&adap->dev);
1858 1868
@@ -1863,6 +1873,10 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
1863 } 1873 }
1864 break; 1874 break;
1865 case OF_RECONFIG_CHANGE_REMOVE: 1875 case OF_RECONFIG_CHANGE_REMOVE:
1876 /* already depopulated? */
1877 if (!of_node_check_flag(rd->dn, OF_POPULATED))
1878 return NOTIFY_OK;
1879
1866 /* find our device by node */ 1880 /* find our device by node */
1867 client = of_find_i2c_device_by_node(rd->dn); 1881 client = of_find_i2c_device_by_node(rd->dn);
1868 if (client == NULL) 1882 if (client == NULL)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 62641f2adaf7..5b5f403d8ce6 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -771,7 +771,7 @@ static const struct attribute_group *elan_sysfs_groups[] = {
771 */ 771 */
772static void elan_report_contact(struct elan_tp_data *data, 772static void elan_report_contact(struct elan_tp_data *data,
773 int contact_num, bool contact_valid, 773 int contact_num, bool contact_valid,
774 bool hover_event, u8 *finger_data) 774 u8 *finger_data)
775{ 775{
776 struct input_dev *input = data->input; 776 struct input_dev *input = data->input;
777 unsigned int pos_x, pos_y; 777 unsigned int pos_x, pos_y;
@@ -815,9 +815,7 @@ static void elan_report_contact(struct elan_tp_data *data,
815 input_mt_report_slot_state(input, MT_TOOL_FINGER, true); 815 input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
816 input_report_abs(input, ABS_MT_POSITION_X, pos_x); 816 input_report_abs(input, ABS_MT_POSITION_X, pos_x);
817 input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y); 817 input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
818 input_report_abs(input, ABS_MT_DISTANCE, hover_event); 818 input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
819 input_report_abs(input, ABS_MT_PRESSURE,
820 hover_event ? 0 : scaled_pressure);
821 input_report_abs(input, ABS_TOOL_WIDTH, mk_x); 819 input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
822 input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); 820 input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
823 input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); 821 input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -839,14 +837,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
839 hover_event = hover_info & 0x40; 837 hover_event = hover_info & 0x40;
840 for (i = 0; i < ETP_MAX_FINGERS; i++) { 838 for (i = 0; i < ETP_MAX_FINGERS; i++) {
841 contact_valid = tp_info & (1U << (3 + i)); 839 contact_valid = tp_info & (1U << (3 + i));
842 elan_report_contact(data, i, contact_valid, hover_event, 840 elan_report_contact(data, i, contact_valid, finger_data);
843 finger_data);
844 841
845 if (contact_valid) 842 if (contact_valid)
846 finger_data += ETP_FINGER_DATA_LEN; 843 finger_data += ETP_FINGER_DATA_LEN;
847 } 844 }
848 845
849 input_report_key(input, BTN_LEFT, tp_info & 0x01); 846 input_report_key(input, BTN_LEFT, tp_info & 0x01);
847 input_report_abs(input, ABS_DISTANCE, hover_event != 0);
850 input_mt_report_pointer_emulation(input, true); 848 input_mt_report_pointer_emulation(input, true);
851 input_sync(input); 849 input_sync(input);
852} 850}
@@ -922,6 +920,7 @@ static int elan_setup_input_device(struct elan_tp_data *data)
922 input_abs_set_res(input, ABS_Y, data->y_res); 920 input_abs_set_res(input, ABS_Y, data->y_res);
923 input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); 921 input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
924 input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0); 922 input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
923 input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
925 924
926 /* And MT parameters */ 925 /* And MT parameters */
927 input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0); 926 input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
@@ -934,7 +933,6 @@ static int elan_setup_input_device(struct elan_tp_data *data)
934 ETP_FINGER_WIDTH * max_width, 0, 0); 933 ETP_FINGER_WIDTH * max_width, 0, 0);
935 input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 934 input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
936 ETP_FINGER_WIDTH * min_width, 0, 0); 935 ETP_FINGER_WIDTH * min_width, 0, 0);
937 input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
938 936
939 data->input = input; 937 data->input = input;
940 938
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 35c8d0ceabee..3a32caf06bf1 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
1199 ABS_MT_POSITION_Y); 1199 ABS_MT_POSITION_Y);
1200 /* Image sensors can report per-contact pressure */ 1200 /* Image sensors can report per-contact pressure */
1201 input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0); 1201 input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
1202 input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK); 1202 input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
1203 1203
1204 /* Image sensors can signal 4 and 5 finger clicks */ 1204 /* Image sensors can signal 4 and 5 finger clicks */
1205 __set_bit(BTN_TOOL_QUADTAP, dev->keybit); 1205 __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8d7e1c8b6d56..4dd88264dff5 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1055,7 +1055,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
1055 1055
1056 processor = (struct acpi_madt_generic_interrupt *)header; 1056 processor = (struct acpi_madt_generic_interrupt *)header;
1057 1057
1058 if (BAD_MADT_ENTRY(processor, end)) 1058 if (BAD_MADT_GICC_ENTRY(processor, end))
1059 return -EINVAL; 1059 return -EINVAL;
1060 1060
1061 /* 1061 /*
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 4400edd1a6c7..b7d54d428b5e 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void)
257 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 257 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
258 } 258 }
259 259
260 /*
261 * Some cores claim the FDC is routable but it doesn't actually seem to
262 * be connected.
263 */
264 switch (current_cpu_type()) {
265 case CPU_INTERAPTIV:
266 case CPU_PROAPTIV:
267 return -1;
268 }
269
270 return irq_create_mapping(gic_irq_domain, 260 return irq_create_mapping(gic_irq_domain,
271 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 261 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
272} 262}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 8911e51d410a..3a27a84ad3ec 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2074,14 +2074,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
2074 ret = gpmc_probe_nand_child(pdev, child); 2074 ret = gpmc_probe_nand_child(pdev, child);
2075 else if (of_node_cmp(child->name, "onenand") == 0) 2075 else if (of_node_cmp(child->name, "onenand") == 0)
2076 ret = gpmc_probe_onenand_child(pdev, child); 2076 ret = gpmc_probe_onenand_child(pdev, child);
2077 else if (of_node_cmp(child->name, "ethernet") == 0 || 2077 else
2078 of_node_cmp(child->name, "nor") == 0 ||
2079 of_node_cmp(child->name, "uart") == 0)
2080 ret = gpmc_probe_generic_child(pdev, child); 2078 ret = gpmc_probe_generic_child(pdev, child);
2081
2082 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
2083 __func__, child->full_name))
2084 of_node_put(child);
2085 } 2079 }
2086 2080
2087 return 0; 2081 return 0;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 0c77240ae2fc..729e0851167d 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
23 23
24 afu = cxl_pci_to_afu(dev); 24 afu = cxl_pci_to_afu(dev);
25 25
26 get_device(&afu->dev);
26 ctx = cxl_context_alloc(); 27 ctx = cxl_context_alloc();
27 if (IS_ERR(ctx)) 28 if (IS_ERR(ctx))
28 return ctx; 29 return ctx;
@@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
31 rc = cxl_context_init(ctx, afu, false, NULL); 32 rc = cxl_context_init(ctx, afu, false, NULL);
32 if (rc) { 33 if (rc) {
33 kfree(ctx); 34 kfree(ctx);
35 put_device(&afu->dev);
34 return ERR_PTR(-ENOMEM); 36 return ERR_PTR(-ENOMEM);
35 } 37 }
36 cxl_assign_psn_space(ctx); 38 cxl_assign_psn_space(ctx);
@@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx)
60 if (ctx->status != CLOSED) 62 if (ctx->status != CLOSED)
61 return -EBUSY; 63 return -EBUSY;
62 64
65 put_device(&ctx->afu->dev);
66
63 cxl_context_free(ctx); 67 cxl_context_free(ctx);
64 68
65 return 0; 69 return 0;
@@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
159 } 163 }
160 164
161 ctx->status = STARTED; 165 ctx->status = STARTED;
162 get_device(&ctx->afu->dev);
163out: 166out:
164 mutex_unlock(&ctx->status_mutex); 167 mutex_unlock(&ctx->status_mutex);
165 return rc; 168 return rc;
@@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element);
175/* Stop a context. Returns 0 on success, otherwise -Errno */ 178/* Stop a context. Returns 0 on success, otherwise -Errno */
176int cxl_stop_context(struct cxl_context *ctx) 179int cxl_stop_context(struct cxl_context *ctx)
177{ 180{
178 int rc; 181 return __detach_context(ctx);
179
180 rc = __detach_context(ctx);
181 if (!rc)
182 put_device(&ctx->afu->dev);
183 return rc;
184} 182}
185EXPORT_SYMBOL_GPL(cxl_stop_context); 183EXPORT_SYMBOL_GPL(cxl_stop_context);
186 184
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2a4c80ac322a..1287148629c0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
113 113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys; 115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size) 116 if (offset >= ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS; 117 return VM_FAULT_SIGBUS;
118 } else { 118 } else {
119 area = ctx->psn_phys; 119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size) 120 if (offset >= ctx->psn_size)
121 return VM_FAULT_SIGBUS; 121 return VM_FAULT_SIGBUS;
122 } 122 }
123 123
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
145 */ 145 */
146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) 146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
147{ 147{
148 u64 start = vma->vm_pgoff << PAGE_SHIFT;
148 u64 len = vma->vm_end - vma->vm_start; 149 u64 len = vma->vm_end - vma->vm_start;
149 len = min(len, ctx->psn_size); 150
151 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
152 if (start + len > ctx->afu->adapter->ps_size)
153 return -EINVAL;
154 } else {
155 if (start + len > ctx->psn_size)
156 return -EINVAL;
157 }
150 158
151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
152 /* make sure there is a valid per process space for this AFU */ 160 /* make sure there is a valid per process space for this AFU */
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 833348e2c9cb..4a164ab8b35a 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
73 spin_lock(&adapter->afu_list_lock); 73 spin_lock(&adapter->afu_list_lock);
74 for (slice = 0; slice < adapter->slices; slice++) { 74 for (slice = 0; slice < adapter->slices; slice++) {
75 afu = adapter->afu[slice]; 75 afu = adapter->afu[slice];
76 if (!afu->enabled) 76 if (!afu || !afu->enabled)
77 continue; 77 continue;
78 rcu_read_lock(); 78 rcu_read_lock();
79 idr_for_each_entry(&afu->contexts_idr, ctx, id) 79 idr_for_each_entry(&afu->contexts_idr, ctx, id)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index c68ef5806dbe..32ad09705949 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -539,7 +539,7 @@ err:
539 539
540static void cxl_unmap_slice_regs(struct cxl_afu *afu) 540static void cxl_unmap_slice_regs(struct cxl_afu *afu)
541{ 541{
542 if (afu->p1n_mmio) 542 if (afu->p2n_mmio)
543 iounmap(afu->p2n_mmio); 543 iounmap(afu->p2n_mmio);
544 if (afu->p1n_mmio) 544 if (afu->p1n_mmio)
545 iounmap(afu->p1n_mmio); 545 iounmap(afu->p1n_mmio);
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index b1d1983a84a5..2eba002b580b 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
112 unsigned long addr; 112 unsigned long addr;
113 113
114 phb = pci_bus_to_host(bus); 114 phb = pci_bus_to_host(bus);
115 afu = (struct cxl_afu *)phb->private_data;
116 if (phb == NULL) 115 if (phb == NULL)
117 return PCIBIOS_DEVICE_NOT_FOUND; 116 return PCIBIOS_DEVICE_NOT_FOUND;
117 afu = (struct cxl_afu *)phb->private_data;
118
118 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) 119 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
119 return PCIBIOS_DEVICE_NOT_FOUND; 120 return PCIBIOS_DEVICE_NOT_FOUND;
120 if (offset >= (unsigned long)phb->cfg_data) 121 if (offset >= (unsigned long)phb->cfg_data)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 357b6ae4d207..458aa5a09c52 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
552 schedule_work(&device->event_work); 552 schedule_work(&device->event_work);
553} 553}
554 554
555void mei_cl_bus_remove_devices(struct mei_device *dev)
556{
557 struct mei_cl *cl, *next;
558
559 mutex_lock(&dev->device_lock);
560 list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
561 if (cl->device)
562 mei_cl_remove_device(cl->device);
563
564 list_del(&cl->device_link);
565 mei_cl_unlink(cl);
566 kfree(cl);
567 }
568 mutex_unlock(&dev->device_lock);
569}
570
571int __init mei_cl_bus_init(void) 555int __init mei_cl_bus_init(void)
572{ 556{
573 return bus_register(&mei_cl_bus_type); 557 return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 94514b2c7a50..00c3865ca3b1 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev)
333 333
334 mei_nfc_host_exit(dev); 334 mei_nfc_host_exit(dev);
335 335
336 mei_cl_bus_remove_devices(dev);
337
338 mutex_lock(&dev->device_lock); 336 mutex_lock(&dev->device_lock);
339 337
340 mei_wd_stop(dev); 338 mei_wd_stop(dev);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index b983c4ecad38..290ef3037437 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev)
402 402
403 cldev->priv_data = NULL; 403 cldev->priv_data = NULL;
404 404
405 mutex_lock(&dev->device_lock);
406 /* Need to remove the device here 405 /* Need to remove the device here
407 * since mei_nfc_free will unlink the clients 406 * since mei_nfc_free will unlink the clients
408 */ 407 */
409 mei_cl_remove_device(cldev); 408 mei_cl_remove_device(cldev);
409
410 mutex_lock(&dev->device_lock);
410 mei_nfc_free(ndev); 411 mei_nfc_free(ndev);
411 mutex_unlock(&dev->device_lock); 412 mutex_unlock(&dev->device_lock);
412} 413}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8eb22c0ca7ce..7e2c43f701bc 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -535,8 +535,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
535 __func__, dimm_name, cmd_name, i); 535 __func__, dimm_name, cmd_name, i);
536 return -ENXIO; 536 return -ENXIO;
537 } 537 }
538 if (!access_ok(VERIFY_READ, p + in_len, in_size))
539 return -EFAULT;
540 if (in_len < sizeof(in_env)) 538 if (in_len < sizeof(in_env))
541 copy = min_t(u32, sizeof(in_env) - in_len, in_size); 539 copy = min_t(u32, sizeof(in_env) - in_len, in_size);
542 else 540 else
@@ -557,8 +555,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
557 __func__, dimm_name, cmd_name, i); 555 __func__, dimm_name, cmd_name, i);
558 return -EFAULT; 556 return -EFAULT;
559 } 557 }
560 if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size))
561 return -EFAULT;
562 if (out_len < sizeof(out_env)) 558 if (out_len < sizeof(out_env))
563 copy = min_t(u32, sizeof(out_env) - out_len, out_size); 559 copy = min_t(u32, sizeof(out_env) - out_len, out_size);
564 else 560 else
@@ -570,9 +566,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
570 } 566 }
571 567
572 buf_len = out_len + in_len; 568 buf_len = out_len + in_len;
573 if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len)))
574 return -EFAULT;
575
576 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 569 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
577 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__, 570 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
578 dimm_name, cmd_name, buf_len, 571 dimm_name, cmd_name, buf_len,
@@ -706,8 +699,10 @@ int __init nvdimm_bus_init(void)
706 nvdimm_major = rc; 699 nvdimm_major = rc;
707 700
708 nd_class = class_create(THIS_MODULE, "nd"); 701 nd_class = class_create(THIS_MODULE, "nd");
709 if (IS_ERR(nd_class)) 702 if (IS_ERR(nd_class)) {
703 rc = PTR_ERR(nd_class);
710 goto err_class; 704 goto err_class;
705 }
711 706
712 return 0; 707 return 0;
713 708
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 515f33882ab8..49c1720df59a 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,7 +7,6 @@
7 * Bjorn Helgaas <bjorn.helgaas@hp.com> 7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
8 */ 8 */
9 9
10#include <linux/acpi.h>
11#include <linux/pnp.h> 10#include <linux/pnp.h>
12#include <linux/device.h> 11#include <linux/device.h>
13#include <linux/init.h> 12#include <linux/init.h>
@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
23 {"", 0} 22 {"", 0}
24}; 23};
25 24
26#ifdef CONFIG_ACPI
27static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
28{
29 u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
30 return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
31}
32#else
33static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
34{
35 struct resource *res;
36
37 res = io ? request_region(start, length, desc) :
38 request_mem_region(start, length, desc);
39 if (res) {
40 res->flags &= ~IORESOURCE_BUSY;
41 return true;
42 }
43 return false;
44}
45#endif
46
47static void reserve_range(struct pnp_dev *dev, struct resource *r, int port) 25static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
48{ 26{
49 char *regionid; 27 char *regionid;
50 const char *pnpid = dev_name(&dev->dev); 28 const char *pnpid = dev_name(&dev->dev);
51 resource_size_t start = r->start, end = r->end; 29 resource_size_t start = r->start, end = r->end;
52 bool reserved; 30 struct resource *res;
53 31
54 regionid = kmalloc(16, GFP_KERNEL); 32 regionid = kmalloc(16, GFP_KERNEL);
55 if (!regionid) 33 if (!regionid)
56 return; 34 return;
57 35
58 snprintf(regionid, 16, "pnp %s", pnpid); 36 snprintf(regionid, 16, "pnp %s", pnpid);
59 reserved = __reserve_range(start, end - start + 1, !!port, regionid); 37 if (port)
60 if (!reserved) 38 res = request_region(start, end - start + 1, regionid);
39 else
40 res = request_mem_region(start, end - start + 1, regionid);
41 if (res)
42 res->flags &= ~IORESOURCE_BUSY;
43 else
61 kfree(regionid); 44 kfree(regionid);
62 45
63 /* 46 /*
@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
66 * have double reservations. 49 * have double reservations.
67 */ 50 */
68 dev_info(&dev->dev, "%pR %s reserved\n", r, 51 dev_info(&dev->dev, "%pR %s reserved\n", r,
69 reserved ? "has been" : "could not be"); 52 res ? "has been" : "could not be");
70} 53}
71 54
72static void reserve_resources_of_dev(struct pnp_dev *dev) 55static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..520413e2bca0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1081,6 +1081,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
1081 } 1081 }
1082 } 1082 }
1083 1083
1084 if (rdev->constraints->over_current_protection
1085 && ops->set_over_current_protection) {
1086 ret = ops->set_over_current_protection(rdev);
1087 if (ret < 0) {
1088 rdev_err(rdev, "failed to set over current protection\n");
1089 goto out;
1090 }
1091 }
1092
1084 print_constraints(rdev); 1093 print_constraints(rdev);
1085 return 0; 1094 return 0;
1086out: 1095out:
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index b1c485b24ab2..250700c853bf 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -107,6 +107,9 @@ static void of_get_regulation_constraints(struct device_node *np,
107 if (!of_property_read_u32(np, "regulator-system-load", &pval)) 107 if (!of_property_read_u32(np, "regulator-system-load", &pval))
108 constraints->system_load = pval; 108 constraints->system_load = pval;
109 109
110 constraints->over_current_protection = of_property_read_bool(np,
111 "regulator-over-current-protection");
112
110 for (i = 0; i < ARRAY_SIZE(regulator_states); i++) { 113 for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
111 switch (i) { 114 switch (i) {
112 case PM_SUSPEND_MEM: 115 case PM_SUSPEND_MEM:
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 86621fabbb8b..735355b0e023 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -121,6 +121,7 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
121#define REG_3 0x0004a0 121#define REG_3 0x0004a0
122#define REG_4 0x000600 122#define REG_4 0x000600
123#define REG_6 0x000800 123#define REG_6 0x000800
124#define REG_7 0x000804
124#define REG_8 0x000820 125#define REG_8 0x000820
125#define REG_9 0x000a04 126#define REG_9 0x000a04
126#define REG_10 0x018000 127#define REG_10 0x018000
@@ -135,6 +136,8 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
135#define REG_21 0x200218 136#define REG_21 0x200218
136#define REG_22 0x0005a0 137#define REG_22 0x0005a0
137#define REG_23 0x0005c0 138#define REG_23 0x0005c0
139#define REG_24 0x000808
140#define REG_25 0x000b00
138#define REG_26 0x200118 141#define REG_26 0x200118
139#define REG_27 0x200308 142#define REG_27 0x200308
140#define REG_32 0x21003c 143#define REG_32 0x21003c
@@ -429,6 +432,9 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
429#define SET_LENXY_START_RECFILL(fb, lenxy) \ 432#define SET_LENXY_START_RECFILL(fb, lenxy) \
430 WRITE_WORD(lenxy, fb, REG_9) 433 WRITE_WORD(lenxy, fb, REG_9)
431 434
435#define SETUP_COPYAREA(fb) \
436 WRITE_BYTE(0, fb, REG_16b1)
437
432static void 438static void
433HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) 439HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
434{ 440{
@@ -1004,6 +1010,36 @@ stifb_blank(int blank_mode, struct fb_info *info)
1004 return 0; 1010 return 0;
1005} 1011}
1006 1012
1013static void
1014stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
1015{
1016 struct stifb_info *fb = container_of(info, struct stifb_info, info);
1017
1018 SETUP_COPYAREA(fb);
1019
1020 SETUP_HW(fb);
1021 if (fb->info.var.bits_per_pixel == 32) {
1022 WRITE_WORD(0xBBA0A000, fb, REG_10);
1023
1024 NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
1025 } else {
1026 WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
1027
1028 NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
1029 }
1030
1031 NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
1032 IBOvals(RopSrc, MaskAddrOffset(0),
1033 BitmapExtent08, StaticReg(1),
1034 DataDynamic, MaskOtc, BGx(0), FGx(0)));
1035
1036 WRITE_WORD(((area->sx << 16) | area->sy), fb, REG_24);
1037 WRITE_WORD(((area->width << 16) | area->height), fb, REG_7);
1038 WRITE_WORD(((area->dx << 16) | area->dy), fb, REG_25);
1039
1040 SETUP_FB(fb);
1041}
1042
1007static void __init 1043static void __init
1008stifb_init_display(struct stifb_info *fb) 1044stifb_init_display(struct stifb_info *fb)
1009{ 1045{
@@ -1069,7 +1105,7 @@ static struct fb_ops stifb_ops = {
1069 .fb_setcolreg = stifb_setcolreg, 1105 .fb_setcolreg = stifb_setcolreg,
1070 .fb_blank = stifb_blank, 1106 .fb_blank = stifb_blank,
1071 .fb_fillrect = cfb_fillrect, 1107 .fb_fillrect = cfb_fillrect,
1072 .fb_copyarea = cfb_copyarea, 1108 .fb_copyarea = stifb_copyarea,
1073 .fb_imageblit = cfb_imageblit, 1109 .fb_imageblit = cfb_imageblit,
1074}; 1110};
1075 1111
@@ -1258,7 +1294,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
1258 info->fbops = &stifb_ops; 1294 info->fbops = &stifb_ops;
1259 info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); 1295 info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
1260 info->screen_size = fix->smem_len; 1296 info->screen_size = fix->smem_len;
1261 info->flags = FBINFO_DEFAULT; 1297 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
1262 info->pseudo_palette = &fb->pseudo_palette; 1298 info->pseudo_palette = &fb->pseudo_palette;
1263 1299
1264 /* This has to be done !!! */ 1300 /* This has to be done !!! */
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 510040b04c96..b1dc51888048 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
540 unlock_new_inode(inode); 540 unlock_new_inode(inode);
541 return inode; 541 return inode;
542error: 542error:
543 unlock_new_inode(inode); 543 iget_failed(inode);
544 iput(inode);
545 return ERR_PTR(retval); 544 return ERR_PTR(retval);
546 545
547} 546}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 09e4433717b8..e8aa57dc8d6d 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
149 unlock_new_inode(inode); 149 unlock_new_inode(inode);
150 return inode; 150 return inode;
151error: 151error:
152 unlock_new_inode(inode); 152 iget_failed(inode);
153 iput(inode);
154 return ERR_PTR(retval); 153 return ERR_PTR(retval);
155 154
156} 155}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 0ef5cc13fae2..81220b2203c6 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,6 +44,8 @@
44#define BTRFS_INODE_IN_DELALLOC_LIST 9 44#define BTRFS_INODE_IN_DELALLOC_LIST 9
45#define BTRFS_INODE_READDIO_NEED_LOCK 10 45#define BTRFS_INODE_READDIO_NEED_LOCK 10
46#define BTRFS_INODE_HAS_PROPS 11 46#define BTRFS_INODE_HAS_PROPS 11
47/* DIO is ready to submit */
48#define BTRFS_INODE_DIO_READY 12
47/* 49/*
48 * The following 3 bits are meant only for the btree inode. 50 * The following 3 bits are meant only for the btree inode.
49 * When any of them is set, it means an error happened while writing an 51 * When any of them is set, it means an error happened while writing an
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80a9aefb0c46..aac314e14188 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1778,6 +1778,7 @@ struct btrfs_fs_info {
1778 spinlock_t unused_bgs_lock; 1778 spinlock_t unused_bgs_lock;
1779 struct list_head unused_bgs; 1779 struct list_head unused_bgs;
1780 struct mutex unused_bg_unpin_mutex; 1780 struct mutex unused_bg_unpin_mutex;
1781 struct mutex delete_unused_bgs_mutex;
1781 1782
1782 /* For btrfs to record security options */ 1783 /* For btrfs to record security options */
1783 struct security_mnt_opts security_opts; 1784 struct security_mnt_opts security_opts;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3f43bfea3684..a9aadb2ad525 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1751,6 +1751,7 @@ static int cleaner_kthread(void *arg)
1751{ 1751{
1752 struct btrfs_root *root = arg; 1752 struct btrfs_root *root = arg;
1753 int again; 1753 int again;
1754 struct btrfs_trans_handle *trans;
1754 1755
1755 do { 1756 do {
1756 again = 0; 1757 again = 0;
@@ -1772,7 +1773,6 @@ static int cleaner_kthread(void *arg)
1772 } 1773 }
1773 1774
1774 btrfs_run_delayed_iputs(root); 1775 btrfs_run_delayed_iputs(root);
1775 btrfs_delete_unused_bgs(root->fs_info);
1776 again = btrfs_clean_one_deleted_snapshot(root); 1776 again = btrfs_clean_one_deleted_snapshot(root);
1777 mutex_unlock(&root->fs_info->cleaner_mutex); 1777 mutex_unlock(&root->fs_info->cleaner_mutex);
1778 1778
@@ -1781,6 +1781,16 @@ static int cleaner_kthread(void *arg)
1781 * needn't do anything special here. 1781 * needn't do anything special here.
1782 */ 1782 */
1783 btrfs_run_defrag_inodes(root->fs_info); 1783 btrfs_run_defrag_inodes(root->fs_info);
1784
1785 /*
1786 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1787 * with relocation (btrfs_relocate_chunk) and relocation
1788 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1789 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1790 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1791 * unused block groups.
1792 */
1793 btrfs_delete_unused_bgs(root->fs_info);
1784sleep: 1794sleep:
1785 if (!try_to_freeze() && !again) { 1795 if (!try_to_freeze() && !again) {
1786 set_current_state(TASK_INTERRUPTIBLE); 1796 set_current_state(TASK_INTERRUPTIBLE);
@@ -1789,6 +1799,34 @@ sleep:
1789 __set_current_state(TASK_RUNNING); 1799 __set_current_state(TASK_RUNNING);
1790 } 1800 }
1791 } while (!kthread_should_stop()); 1801 } while (!kthread_should_stop());
1802
1803 /*
1804 * Transaction kthread is stopped before us and wakes us up.
1805 * However we might have started a new transaction and COWed some
1806 * tree blocks when deleting unused block groups for example. So
1807 * make sure we commit the transaction we started to have a clean
1808 * shutdown when evicting the btree inode - if it has dirty pages
1809 * when we do the final iput() on it, eviction will trigger a
1810 * writeback for it which will fail with null pointer dereferences
1811 * since work queues and other resources were already released and
1812 * destroyed by the time the iput/eviction/writeback is made.
1813 */
1814 trans = btrfs_attach_transaction(root);
1815 if (IS_ERR(trans)) {
1816 if (PTR_ERR(trans) != -ENOENT)
1817 btrfs_err(root->fs_info,
1818 "cleaner transaction attach returned %ld",
1819 PTR_ERR(trans));
1820 } else {
1821 int ret;
1822
1823 ret = btrfs_commit_transaction(trans, root);
1824 if (ret)
1825 btrfs_err(root->fs_info,
1826 "cleaner open transaction commit returned %d",
1827 ret);
1828 }
1829
1792 return 0; 1830 return 0;
1793} 1831}
1794 1832
@@ -2492,6 +2530,7 @@ int open_ctree(struct super_block *sb,
2492 spin_lock_init(&fs_info->unused_bgs_lock); 2530 spin_lock_init(&fs_info->unused_bgs_lock);
2493 rwlock_init(&fs_info->tree_mod_log_lock); 2531 rwlock_init(&fs_info->tree_mod_log_lock);
2494 mutex_init(&fs_info->unused_bg_unpin_mutex); 2532 mutex_init(&fs_info->unused_bg_unpin_mutex);
2533 mutex_init(&fs_info->delete_unused_bgs_mutex);
2495 mutex_init(&fs_info->reloc_mutex); 2534 mutex_init(&fs_info->reloc_mutex);
2496 mutex_init(&fs_info->delalloc_root_mutex); 2535 mutex_init(&fs_info->delalloc_root_mutex);
2497 seqlock_init(&fs_info->profiles_lock); 2536 seqlock_init(&fs_info->profiles_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 38b76cc02f48..1c2bd1723e40 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9889,6 +9889,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9889 } 9889 }
9890 spin_unlock(&fs_info->unused_bgs_lock); 9890 spin_unlock(&fs_info->unused_bgs_lock);
9891 9891
9892 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
9893
9892 /* Don't want to race with allocators so take the groups_sem */ 9894 /* Don't want to race with allocators so take the groups_sem */
9893 down_write(&space_info->groups_sem); 9895 down_write(&space_info->groups_sem);
9894 spin_lock(&block_group->lock); 9896 spin_lock(&block_group->lock);
@@ -9983,6 +9985,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9983end_trans: 9985end_trans:
9984 btrfs_end_transaction(trans, root); 9986 btrfs_end_transaction(trans, root);
9985next: 9987next:
9988 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
9986 btrfs_put_block_group(block_group); 9989 btrfs_put_block_group(block_group);
9987 spin_lock(&fs_info->unused_bgs_lock); 9990 spin_lock(&fs_info->unused_bgs_lock);
9988 } 9991 }
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index f6a596d5a637..d4a582ac3f73 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
246{ 246{
247 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 247 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
248 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; 248 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
249 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
249 struct btrfs_free_space *info; 250 struct btrfs_free_space *info;
250 struct rb_node *n; 251 struct rb_node *n;
251 u64 count; 252 u64 count;
@@ -254,24 +255,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
254 return; 255 return;
255 256
256 while (1) { 257 while (1) {
258 bool add_to_ctl = true;
259
260 spin_lock(rbroot_lock);
257 n = rb_first(rbroot); 261 n = rb_first(rbroot);
258 if (!n) 262 if (!n) {
263 spin_unlock(rbroot_lock);
259 break; 264 break;
265 }
260 266
261 info = rb_entry(n, struct btrfs_free_space, offset_index); 267 info = rb_entry(n, struct btrfs_free_space, offset_index);
262 BUG_ON(info->bitmap); /* Logic error */ 268 BUG_ON(info->bitmap); /* Logic error */
263 269
264 if (info->offset > root->ino_cache_progress) 270 if (info->offset > root->ino_cache_progress)
265 goto free; 271 add_to_ctl = false;
266 else if (info->offset + info->bytes > root->ino_cache_progress) 272 else if (info->offset + info->bytes > root->ino_cache_progress)
267 count = root->ino_cache_progress - info->offset + 1; 273 count = root->ino_cache_progress - info->offset + 1;
268 else 274 else
269 count = info->bytes; 275 count = info->bytes;
270 276
271 __btrfs_add_free_space(ctl, info->offset, count);
272free:
273 rb_erase(&info->offset_index, rbroot); 277 rb_erase(&info->offset_index, rbroot);
274 kfree(info); 278 spin_unlock(rbroot_lock);
279 if (add_to_ctl)
280 __btrfs_add_free_space(ctl, info->offset, count);
281 kmem_cache_free(btrfs_free_space_cachep, info);
275 } 282 }
276} 283}
277 284
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 855935f6671a..b33c0cf02668 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4989,8 +4989,9 @@ static void evict_inode_truncate_pages(struct inode *inode)
4989 /* 4989 /*
4990 * Keep looping until we have no more ranges in the io tree. 4990 * Keep looping until we have no more ranges in the io tree.
4991 * We can have ongoing bios started by readpages (called from readahead) 4991 * We can have ongoing bios started by readpages (called from readahead)
4992 * that didn't get their end io callbacks called yet or they are still 4992 * that have their endio callback (extent_io.c:end_bio_extent_readpage)
4993 * in progress ((extent_io.c:end_bio_extent_readpage()). This means some 4993 * still in progress (unlocked the pages in the bio but did not yet
4994 * unlocked the ranges in the io tree). Therefore this means some
4994 * ranges can still be locked and eviction started because before 4995 * ranges can still be locked and eviction started because before
4995 * submitting those bios, which are executed by a separate task (work 4996 * submitting those bios, which are executed by a separate task (work
4996 * queue kthread), inode references (inode->i_count) were not taken 4997 * queue kthread), inode references (inode->i_count) were not taken
@@ -7546,6 +7547,7 @@ unlock:
7546 7547
7547 current->journal_info = outstanding_extents; 7548 current->journal_info = outstanding_extents;
7548 btrfs_free_reserved_data_space(inode, len); 7549 btrfs_free_reserved_data_space(inode, len);
7550 set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags);
7549 } 7551 }
7550 7552
7551 /* 7553 /*
@@ -7871,8 +7873,6 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
7871 struct bio *dio_bio; 7873 struct bio *dio_bio;
7872 int ret; 7874 int ret;
7873 7875
7874 if (err)
7875 goto out_done;
7876again: 7876again:
7877 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 7877 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7878 &ordered_offset, 7878 &ordered_offset,
@@ -7895,7 +7895,6 @@ out_test:
7895 ordered = NULL; 7895 ordered = NULL;
7896 goto again; 7896 goto again;
7897 } 7897 }
7898out_done:
7899 dio_bio = dip->dio_bio; 7898 dio_bio = dip->dio_bio;
7900 7899
7901 kfree(dip); 7900 kfree(dip);
@@ -8163,9 +8162,8 @@ out_err:
8163static void btrfs_submit_direct(int rw, struct bio *dio_bio, 8162static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8164 struct inode *inode, loff_t file_offset) 8163 struct inode *inode, loff_t file_offset)
8165{ 8164{
8166 struct btrfs_root *root = BTRFS_I(inode)->root; 8165 struct btrfs_dio_private *dip = NULL;
8167 struct btrfs_dio_private *dip; 8166 struct bio *io_bio = NULL;
8168 struct bio *io_bio;
8169 struct btrfs_io_bio *btrfs_bio; 8167 struct btrfs_io_bio *btrfs_bio;
8170 int skip_sum; 8168 int skip_sum;
8171 int write = rw & REQ_WRITE; 8169 int write = rw & REQ_WRITE;
@@ -8182,7 +8180,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8182 dip = kzalloc(sizeof(*dip), GFP_NOFS); 8180 dip = kzalloc(sizeof(*dip), GFP_NOFS);
8183 if (!dip) { 8181 if (!dip) {
8184 ret = -ENOMEM; 8182 ret = -ENOMEM;
8185 goto free_io_bio; 8183 goto free_ordered;
8186 } 8184 }
8187 8185
8188 dip->private = dio_bio->bi_private; 8186 dip->private = dio_bio->bi_private;
@@ -8210,25 +8208,55 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8210 8208
8211 if (btrfs_bio->end_io) 8209 if (btrfs_bio->end_io)
8212 btrfs_bio->end_io(btrfs_bio, ret); 8210 btrfs_bio->end_io(btrfs_bio, ret);
8213free_io_bio:
8214 bio_put(io_bio);
8215 8211
8216free_ordered: 8212free_ordered:
8217 /* 8213 /*
8218 * If this is a write, we need to clean up the reserved space and kill 8214 * If we arrived here it means either we failed to submit the dip
8219 * the ordered extent. 8215 * or we either failed to clone the dio_bio or failed to allocate the
8216 * dip. If we cloned the dio_bio and allocated the dip, we can just
8217 * call bio_endio against our io_bio so that we get proper resource
8218 * cleanup if we fail to submit the dip, otherwise, we must do the
8219 * same as btrfs_endio_direct_[write|read] because we can't call these
8220 * callbacks - they require an allocated dip and a clone of dio_bio.
8220 */ 8221 */
8221 if (write) { 8222 if (io_bio && dip) {
8222 struct btrfs_ordered_extent *ordered; 8223 bio_endio(io_bio, ret);
8223 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 8224 /*
8224 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 8225 * The end io callbacks free our dip, do the final put on io_bio
8225 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 8226 * and all the cleanup and final put for dio_bio (through
8226 btrfs_free_reserved_extent(root, ordered->start, 8227 * dio_end_io()).
8227 ordered->disk_len, 1); 8228 */
8228 btrfs_put_ordered_extent(ordered); 8229 dip = NULL;
8229 btrfs_put_ordered_extent(ordered); 8230 io_bio = NULL;
8231 } else {
8232 if (write) {
8233 struct btrfs_ordered_extent *ordered;
8234
8235 ordered = btrfs_lookup_ordered_extent(inode,
8236 file_offset);
8237 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8238 /*
8239 * Decrements our ref on the ordered extent and removes
8240 * the ordered extent from the inode's ordered tree,
8241 * doing all the proper resource cleanup such as for the
8242 * reserved space and waking up any waiters for this
8243 * ordered extent (through btrfs_remove_ordered_extent).
8244 */
8245 btrfs_finish_ordered_io(ordered);
8246 } else {
8247 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8248 file_offset + dio_bio->bi_iter.bi_size - 1);
8249 }
8250 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
8251 /*
8252 * Releases and cleans up our dio_bio, no need to bio_put()
8253 * nor bio_endio()/bio_io_error() against dio_bio.
8254 */
8255 dio_end_io(dio_bio, ret);
8230 } 8256 }
8231 bio_endio(dio_bio, ret); 8257 if (io_bio)
8258 bio_put(io_bio);
8259 kfree(dip);
8232} 8260}
8233 8261
8234static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, 8262static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
@@ -8330,9 +8358,18 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8330 btrfs_submit_direct, flags); 8358 btrfs_submit_direct, flags);
8331 if (iov_iter_rw(iter) == WRITE) { 8359 if (iov_iter_rw(iter) == WRITE) {
8332 current->journal_info = NULL; 8360 current->journal_info = NULL;
8333 if (ret < 0 && ret != -EIOCBQUEUED) 8361 if (ret < 0 && ret != -EIOCBQUEUED) {
8334 btrfs_delalloc_release_space(inode, count); 8362 /*
8335 else if (ret >= 0 && (size_t)ret < count) 8363 * If the error comes from submitting stage,
8364 * btrfs_get_blocsk_direct() has free'd data space,
8365 * and metadata space will be handled by
8366 * finish_ordered_fn, don't do that again to make
8367 * sure bytes_may_use is correct.
8368 */
8369 if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
8370 &BTRFS_I(inode)->runtime_flags))
8371 btrfs_delalloc_release_space(inode, count);
8372 } else if (ret >= 0 && (size_t)ret < count)
8336 btrfs_delalloc_release_space(inode, 8373 btrfs_delalloc_release_space(inode,
8337 count - (size_t)ret); 8374 count - (size_t)ret);
8338 } 8375 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c86b835da7a8..5d91776e12a2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -87,7 +87,8 @@ struct btrfs_ioctl_received_subvol_args_32 {
87 87
88 88
89static int btrfs_clone(struct inode *src, struct inode *inode, 89static int btrfs_clone(struct inode *src, struct inode *inode,
90 u64 off, u64 olen, u64 olen_aligned, u64 destoff); 90 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
91 int no_time_update);
91 92
92/* Mask out flags that are inappropriate for the given type of inode. */ 93/* Mask out flags that are inappropriate for the given type of inode. */
93static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 94static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -2765,14 +2766,11 @@ out:
2765 return ret; 2766 return ret;
2766} 2767}
2767 2768
2768static struct page *extent_same_get_page(struct inode *inode, u64 off) 2769static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2769{ 2770{
2770 struct page *page; 2771 struct page *page;
2771 pgoff_t index;
2772 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2772 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2773 2773
2774 index = off >> PAGE_CACHE_SHIFT;
2775
2776 page = grab_cache_page(inode->i_mapping, index); 2774 page = grab_cache_page(inode->i_mapping, index);
2777 if (!page) 2775 if (!page)
2778 return NULL; 2776 return NULL;
@@ -2793,6 +2791,20 @@ static struct page *extent_same_get_page(struct inode *inode, u64 off)
2793 return page; 2791 return page;
2794} 2792}
2795 2793
2794static int gather_extent_pages(struct inode *inode, struct page **pages,
2795 int num_pages, u64 off)
2796{
2797 int i;
2798 pgoff_t index = off >> PAGE_CACHE_SHIFT;
2799
2800 for (i = 0; i < num_pages; i++) {
2801 pages[i] = extent_same_get_page(inode, index + i);
2802 if (!pages[i])
2803 return -ENOMEM;
2804 }
2805 return 0;
2806}
2807
2796static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) 2808static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2797{ 2809{
2798 /* do any pending delalloc/csum calc on src, one way or 2810 /* do any pending delalloc/csum calc on src, one way or
@@ -2818,52 +2830,120 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2818 } 2830 }
2819} 2831}
2820 2832
2821static void btrfs_double_unlock(struct inode *inode1, u64 loff1, 2833static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
2822 struct inode *inode2, u64 loff2, u64 len)
2823{ 2834{
2824 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2825 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2826
2827 mutex_unlock(&inode1->i_mutex); 2835 mutex_unlock(&inode1->i_mutex);
2828 mutex_unlock(&inode2->i_mutex); 2836 mutex_unlock(&inode2->i_mutex);
2829} 2837}
2830 2838
2831static void btrfs_double_lock(struct inode *inode1, u64 loff1, 2839static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
2832 struct inode *inode2, u64 loff2, u64 len) 2840{
2841 if (inode1 < inode2)
2842 swap(inode1, inode2);
2843
2844 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
2845 if (inode1 != inode2)
2846 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
2847}
2848
2849static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2850 struct inode *inode2, u64 loff2, u64 len)
2851{
2852 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2853 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2854}
2855
2856static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2857 struct inode *inode2, u64 loff2, u64 len)
2833{ 2858{
2834 if (inode1 < inode2) { 2859 if (inode1 < inode2) {
2835 swap(inode1, inode2); 2860 swap(inode1, inode2);
2836 swap(loff1, loff2); 2861 swap(loff1, loff2);
2837 } 2862 }
2838
2839 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
2840 lock_extent_range(inode1, loff1, len); 2863 lock_extent_range(inode1, loff1, len);
2841 if (inode1 != inode2) { 2864 if (inode1 != inode2)
2842 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
2843 lock_extent_range(inode2, loff2, len); 2865 lock_extent_range(inode2, loff2, len);
2866}
2867
2868struct cmp_pages {
2869 int num_pages;
2870 struct page **src_pages;
2871 struct page **dst_pages;
2872};
2873
2874static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2875{
2876 int i;
2877 struct page *pg;
2878
2879 for (i = 0; i < cmp->num_pages; i++) {
2880 pg = cmp->src_pages[i];
2881 if (pg)
2882 page_cache_release(pg);
2883 pg = cmp->dst_pages[i];
2884 if (pg)
2885 page_cache_release(pg);
2886 }
2887 kfree(cmp->src_pages);
2888 kfree(cmp->dst_pages);
2889}
2890
2891static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2892 struct inode *dst, u64 dst_loff,
2893 u64 len, struct cmp_pages *cmp)
2894{
2895 int ret;
2896 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
2897 struct page **src_pgarr, **dst_pgarr;
2898
2899 /*
2900 * We must gather up all the pages before we initiate our
2901 * extent locking. We use an array for the page pointers. Size
2902 * of the array is bounded by len, which is in turn bounded by
2903 * BTRFS_MAX_DEDUPE_LEN.
2904 */
2905 src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
2906 dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
2907 if (!src_pgarr || !dst_pgarr) {
2908 kfree(src_pgarr);
2909 kfree(dst_pgarr);
2910 return -ENOMEM;
2844 } 2911 }
2912 cmp->num_pages = num_pages;
2913 cmp->src_pages = src_pgarr;
2914 cmp->dst_pages = dst_pgarr;
2915
2916 ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
2917 if (ret)
2918 goto out;
2919
2920 ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
2921
2922out:
2923 if (ret)
2924 btrfs_cmp_data_free(cmp);
2925 return 0;
2845} 2926}
2846 2927
2847static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, 2928static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2848 u64 dst_loff, u64 len) 2929 u64 dst_loff, u64 len, struct cmp_pages *cmp)
2849{ 2930{
2850 int ret = 0; 2931 int ret = 0;
2932 int i;
2851 struct page *src_page, *dst_page; 2933 struct page *src_page, *dst_page;
2852 unsigned int cmp_len = PAGE_CACHE_SIZE; 2934 unsigned int cmp_len = PAGE_CACHE_SIZE;
2853 void *addr, *dst_addr; 2935 void *addr, *dst_addr;
2854 2936
2937 i = 0;
2855 while (len) { 2938 while (len) {
2856 if (len < PAGE_CACHE_SIZE) 2939 if (len < PAGE_CACHE_SIZE)
2857 cmp_len = len; 2940 cmp_len = len;
2858 2941
2859 src_page = extent_same_get_page(src, loff); 2942 BUG_ON(i >= cmp->num_pages);
2860 if (!src_page) 2943
2861 return -EINVAL; 2944 src_page = cmp->src_pages[i];
2862 dst_page = extent_same_get_page(dst, dst_loff); 2945 dst_page = cmp->dst_pages[i];
2863 if (!dst_page) { 2946
2864 page_cache_release(src_page);
2865 return -EINVAL;
2866 }
2867 addr = kmap_atomic(src_page); 2947 addr = kmap_atomic(src_page);
2868 dst_addr = kmap_atomic(dst_page); 2948 dst_addr = kmap_atomic(dst_page);
2869 2949
@@ -2875,15 +2955,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2875 2955
2876 kunmap_atomic(addr); 2956 kunmap_atomic(addr);
2877 kunmap_atomic(dst_addr); 2957 kunmap_atomic(dst_addr);
2878 page_cache_release(src_page);
2879 page_cache_release(dst_page);
2880 2958
2881 if (ret) 2959 if (ret)
2882 break; 2960 break;
2883 2961
2884 loff += cmp_len;
2885 dst_loff += cmp_len;
2886 len -= cmp_len; 2962 len -= cmp_len;
2963 i++;
2887 } 2964 }
2888 2965
2889 return ret; 2966 return ret;
@@ -2914,27 +2991,62 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2914{ 2991{
2915 int ret; 2992 int ret;
2916 u64 len = olen; 2993 u64 len = olen;
2994 struct cmp_pages cmp;
2995 int same_inode = 0;
2996 u64 same_lock_start = 0;
2997 u64 same_lock_len = 0;
2917 2998
2918 /*
2919 * btrfs_clone() can't handle extents in the same file
2920 * yet. Once that works, we can drop this check and replace it
2921 * with a check for the same inode, but overlapping extents.
2922 */
2923 if (src == dst) 2999 if (src == dst)
2924 return -EINVAL; 3000 same_inode = 1;
2925 3001
2926 if (len == 0) 3002 if (len == 0)
2927 return 0; 3003 return 0;
2928 3004
2929 btrfs_double_lock(src, loff, dst, dst_loff, len); 3005 if (same_inode) {
3006 mutex_lock(&src->i_mutex);
2930 3007
2931 ret = extent_same_check_offsets(src, loff, &len, olen); 3008 ret = extent_same_check_offsets(src, loff, &len, olen);
2932 if (ret) 3009 if (ret)
2933 goto out_unlock; 3010 goto out_unlock;
2934 3011
2935 ret = extent_same_check_offsets(dst, dst_loff, &len, olen); 3012 /*
2936 if (ret) 3013 * Single inode case wants the same checks, except we
2937 goto out_unlock; 3014 * don't want our length pushed out past i_size as
3015 * comparing that data range makes no sense.
3016 *
3017 * extent_same_check_offsets() will do this for an
3018 * unaligned length at i_size, so catch it here and
3019 * reject the request.
3020 *
3021 * This effectively means we require aligned extents
3022 * for the single-inode case, whereas the other cases
3023 * allow an unaligned length so long as it ends at
3024 * i_size.
3025 */
3026 if (len != olen) {
3027 ret = -EINVAL;
3028 goto out_unlock;
3029 }
3030
3031 /* Check for overlapping ranges */
3032 if (dst_loff + len > loff && dst_loff < loff + len) {
3033 ret = -EINVAL;
3034 goto out_unlock;
3035 }
3036
3037 same_lock_start = min_t(u64, loff, dst_loff);
3038 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3039 } else {
3040 btrfs_double_inode_lock(src, dst);
3041
3042 ret = extent_same_check_offsets(src, loff, &len, olen);
3043 if (ret)
3044 goto out_unlock;
3045
3046 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3047 if (ret)
3048 goto out_unlock;
3049 }
2938 3050
2939 /* don't make the dst file partly checksummed */ 3051 /* don't make the dst file partly checksummed */
2940 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != 3052 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
@@ -2943,12 +3055,32 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2943 goto out_unlock; 3055 goto out_unlock;
2944 } 3056 }
2945 3057
2946 ret = btrfs_cmp_data(src, loff, dst, dst_loff, len); 3058 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3059 if (ret)
3060 goto out_unlock;
3061
3062 if (same_inode)
3063 lock_extent_range(src, same_lock_start, same_lock_len);
3064 else
3065 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
3066
3067 /* pass original length for comparison so we stay within i_size */
3068 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
2947 if (ret == 0) 3069 if (ret == 0)
2948 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff); 3070 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3071
3072 if (same_inode)
3073 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3074 same_lock_start + same_lock_len - 1);
3075 else
3076 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
2949 3077
3078 btrfs_cmp_data_free(&cmp);
2950out_unlock: 3079out_unlock:
2951 btrfs_double_unlock(src, loff, dst, dst_loff, len); 3080 if (same_inode)
3081 mutex_unlock(&src->i_mutex);
3082 else
3083 btrfs_double_inode_unlock(src, dst);
2952 3084
2953 return ret; 3085 return ret;
2954} 3086}
@@ -3100,13 +3232,15 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3100 struct inode *inode, 3232 struct inode *inode,
3101 u64 endoff, 3233 u64 endoff,
3102 const u64 destoff, 3234 const u64 destoff,
3103 const u64 olen) 3235 const u64 olen,
3236 int no_time_update)
3104{ 3237{
3105 struct btrfs_root *root = BTRFS_I(inode)->root; 3238 struct btrfs_root *root = BTRFS_I(inode)->root;
3106 int ret; 3239 int ret;
3107 3240
3108 inode_inc_iversion(inode); 3241 inode_inc_iversion(inode);
3109 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 3242 if (!no_time_update)
3243 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
3110 /* 3244 /*
3111 * We round up to the block size at eof when determining which 3245 * We round up to the block size at eof when determining which
3112 * extents to clone above, but shouldn't round up the file size. 3246 * extents to clone above, but shouldn't round up the file size.
@@ -3191,13 +3325,13 @@ static void clone_update_extent_map(struct inode *inode,
3191 * @inode: Inode to clone to 3325 * @inode: Inode to clone to
3192 * @off: Offset within source to start clone from 3326 * @off: Offset within source to start clone from
3193 * @olen: Original length, passed by user, of range to clone 3327 * @olen: Original length, passed by user, of range to clone
3194 * @olen_aligned: Block-aligned value of olen, extent_same uses 3328 * @olen_aligned: Block-aligned value of olen
3195 * identical values here
3196 * @destoff: Offset within @inode to start clone 3329 * @destoff: Offset within @inode to start clone
3330 * @no_time_update: Whether to update mtime/ctime on the target inode
3197 */ 3331 */
3198static int btrfs_clone(struct inode *src, struct inode *inode, 3332static int btrfs_clone(struct inode *src, struct inode *inode,
3199 const u64 off, const u64 olen, const u64 olen_aligned, 3333 const u64 off, const u64 olen, const u64 olen_aligned,
3200 const u64 destoff) 3334 const u64 destoff, int no_time_update)
3201{ 3335{
3202 struct btrfs_root *root = BTRFS_I(inode)->root; 3336 struct btrfs_root *root = BTRFS_I(inode)->root;
3203 struct btrfs_path *path = NULL; 3337 struct btrfs_path *path = NULL;
@@ -3521,7 +3655,8 @@ process_slot:
3521 root->sectorsize); 3655 root->sectorsize);
3522 ret = clone_finish_inode_update(trans, inode, 3656 ret = clone_finish_inode_update(trans, inode,
3523 last_dest_end, 3657 last_dest_end,
3524 destoff, olen); 3658 destoff, olen,
3659 no_time_update);
3525 if (ret) 3660 if (ret)
3526 goto out; 3661 goto out;
3527 if (new_key.offset + datal >= destoff + len) 3662 if (new_key.offset + datal >= destoff + len)
@@ -3559,7 +3694,7 @@ process_slot:
3559 clone_update_extent_map(inode, trans, NULL, last_dest_end, 3694 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3560 destoff + len - last_dest_end); 3695 destoff + len - last_dest_end);
3561 ret = clone_finish_inode_update(trans, inode, destoff + len, 3696 ret = clone_finish_inode_update(trans, inode, destoff + len,
3562 destoff, olen); 3697 destoff, olen, no_time_update);
3563 } 3698 }
3564 3699
3565out: 3700out:
@@ -3696,7 +3831,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
3696 lock_extent_range(inode, destoff, len); 3831 lock_extent_range(inode, destoff, len);
3697 } 3832 }
3698 3833
3699 ret = btrfs_clone(src, inode, off, olen, len, destoff); 3834 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3700 3835
3701 if (same_inode) { 3836 if (same_inode) {
3702 u64 lock_start = min_t(u64, off, destoff); 3837 u64 lock_start = min_t(u64, off, destoff);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 89656d799ff6..52170cf1757e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -552,6 +552,10 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
552 trace_btrfs_ordered_extent_put(entry->inode, entry); 552 trace_btrfs_ordered_extent_put(entry->inode, entry);
553 553
554 if (atomic_dec_and_test(&entry->refs)) { 554 if (atomic_dec_and_test(&entry->refs)) {
555 ASSERT(list_empty(&entry->log_list));
556 ASSERT(list_empty(&entry->trans_list));
557 ASSERT(list_empty(&entry->root_extent_list));
558 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
555 if (entry->inode) 559 if (entry->inode)
556 btrfs_add_delayed_iput(entry->inode); 560 btrfs_add_delayed_iput(entry->inode);
557 while (!list_empty(&entry->list)) { 561 while (!list_empty(&entry->list)) {
@@ -579,6 +583,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
579 spin_lock_irq(&tree->lock); 583 spin_lock_irq(&tree->lock);
580 node = &entry->rb_node; 584 node = &entry->rb_node;
581 rb_erase(node, &tree->tree); 585 rb_erase(node, &tree->tree);
586 RB_CLEAR_NODE(node);
582 if (tree->last == node) 587 if (tree->last == node)
583 tree->last = NULL; 588 tree->last = NULL;
584 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 589 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index d5f1f033b7a0..e9ace099162c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1349,6 +1349,11 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1349 struct btrfs_root *quota_root; 1349 struct btrfs_root *quota_root;
1350 struct btrfs_qgroup *qgroup; 1350 struct btrfs_qgroup *qgroup;
1351 int ret = 0; 1351 int ret = 0;
1352 /* Sometimes we would want to clear the limit on this qgroup.
1353 * To meet this requirement, we treat the -1 as a special value
1354 * which tell kernel to clear the limit on this qgroup.
1355 */
1356 const u64 CLEAR_VALUE = -1;
1352 1357
1353 mutex_lock(&fs_info->qgroup_ioctl_lock); 1358 mutex_lock(&fs_info->qgroup_ioctl_lock);
1354 quota_root = fs_info->quota_root; 1359 quota_root = fs_info->quota_root;
@@ -1364,14 +1369,42 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1364 } 1369 }
1365 1370
1366 spin_lock(&fs_info->qgroup_lock); 1371 spin_lock(&fs_info->qgroup_lock);
1367 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) 1372 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1368 qgroup->max_rfer = limit->max_rfer; 1373 if (limit->max_rfer == CLEAR_VALUE) {
1369 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) 1374 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1370 qgroup->max_excl = limit->max_excl; 1375 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1371 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) 1376 qgroup->max_rfer = 0;
1372 qgroup->rsv_rfer = limit->rsv_rfer; 1377 } else {
1373 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) 1378 qgroup->max_rfer = limit->max_rfer;
1374 qgroup->rsv_excl = limit->rsv_excl; 1379 }
1380 }
1381 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1382 if (limit->max_excl == CLEAR_VALUE) {
1383 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1384 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1385 qgroup->max_excl = 0;
1386 } else {
1387 qgroup->max_excl = limit->max_excl;
1388 }
1389 }
1390 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1391 if (limit->rsv_rfer == CLEAR_VALUE) {
1392 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1393 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1394 qgroup->rsv_rfer = 0;
1395 } else {
1396 qgroup->rsv_rfer = limit->rsv_rfer;
1397 }
1398 }
1399 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1400 if (limit->rsv_excl == CLEAR_VALUE) {
1401 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1402 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1403 qgroup->rsv_excl = 0;
1404 } else {
1405 qgroup->rsv_excl = limit->rsv_excl;
1406 }
1407 }
1375 qgroup->lim_flags |= limit->flags; 1408 qgroup->lim_flags |= limit->flags;
1376 1409
1377 spin_unlock(&fs_info->qgroup_lock); 1410 spin_unlock(&fs_info->qgroup_lock);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 827951fbf7fc..88cbb5995667 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4049,7 +4049,7 @@ restart:
4049 if (trans && progress && err == -ENOSPC) { 4049 if (trans && progress && err == -ENOSPC) {
4050 ret = btrfs_force_chunk_alloc(trans, rc->extent_root, 4050 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
4051 rc->block_group->flags); 4051 rc->block_group->flags);
4052 if (ret == 0) { 4052 if (ret == 1) {
4053 err = 0; 4053 err = 0;
4054 progress = 0; 4054 progress = 0;
4055 goto restart; 4055 goto restart;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9f2feabe99f2..94db0fa5225a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3571,7 +3571,6 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3571static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 3571static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3572 int is_dev_replace) 3572 int is_dev_replace)
3573{ 3573{
3574 int ret = 0;
3575 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; 3574 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3576 int max_active = fs_info->thread_pool_size; 3575 int max_active = fs_info->thread_pool_size;
3577 3576
@@ -3584,34 +3583,36 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3584 fs_info->scrub_workers = 3583 fs_info->scrub_workers =
3585 btrfs_alloc_workqueue("btrfs-scrub", flags, 3584 btrfs_alloc_workqueue("btrfs-scrub", flags,
3586 max_active, 4); 3585 max_active, 4);
3587 if (!fs_info->scrub_workers) { 3586 if (!fs_info->scrub_workers)
3588 ret = -ENOMEM; 3587 goto fail_scrub_workers;
3589 goto out; 3588
3590 }
3591 fs_info->scrub_wr_completion_workers = 3589 fs_info->scrub_wr_completion_workers =
3592 btrfs_alloc_workqueue("btrfs-scrubwrc", flags, 3590 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3593 max_active, 2); 3591 max_active, 2);
3594 if (!fs_info->scrub_wr_completion_workers) { 3592 if (!fs_info->scrub_wr_completion_workers)
3595 ret = -ENOMEM; 3593 goto fail_scrub_wr_completion_workers;
3596 goto out; 3594
3597 }
3598 fs_info->scrub_nocow_workers = 3595 fs_info->scrub_nocow_workers =
3599 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0); 3596 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3600 if (!fs_info->scrub_nocow_workers) { 3597 if (!fs_info->scrub_nocow_workers)
3601 ret = -ENOMEM; 3598 goto fail_scrub_nocow_workers;
3602 goto out;
3603 }
3604 fs_info->scrub_parity_workers = 3599 fs_info->scrub_parity_workers =
3605 btrfs_alloc_workqueue("btrfs-scrubparity", flags, 3600 btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3606 max_active, 2); 3601 max_active, 2);
3607 if (!fs_info->scrub_parity_workers) { 3602 if (!fs_info->scrub_parity_workers)
3608 ret = -ENOMEM; 3603 goto fail_scrub_parity_workers;
3609 goto out;
3610 }
3611 } 3604 }
3612 ++fs_info->scrub_workers_refcnt; 3605 ++fs_info->scrub_workers_refcnt;
3613out: 3606 return 0;
3614 return ret; 3607
3608fail_scrub_parity_workers:
3609 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3610fail_scrub_nocow_workers:
3611 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3612fail_scrub_wr_completion_workers:
3613 btrfs_destroy_workqueue(fs_info->scrub_workers);
3614fail_scrub_workers:
3615 return -ENOMEM;
3615} 3616}
3616 3617
3617static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) 3618static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1ce80c1c4eb6..9c45431e69ab 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4117,6 +4117,187 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4117 return 0; 4117 return 0;
4118} 4118}
4119 4119
4120/*
4121 * At the moment we always log all xattrs. This is to figure out at log replay
4122 * time which xattrs must have their deletion replayed. If a xattr is missing
4123 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4124 * because if a xattr is deleted, the inode is fsynced and a power failure
4125 * happens, causing the log to be replayed the next time the fs is mounted,
4126 * we want the xattr to not exist anymore (same behaviour as other filesystems
4127 * with a journal, ext3/4, xfs, f2fs, etc).
4128 */
4129static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4130 struct btrfs_root *root,
4131 struct inode *inode,
4132 struct btrfs_path *path,
4133 struct btrfs_path *dst_path)
4134{
4135 int ret;
4136 struct btrfs_key key;
4137 const u64 ino = btrfs_ino(inode);
4138 int ins_nr = 0;
4139 int start_slot = 0;
4140
4141 key.objectid = ino;
4142 key.type = BTRFS_XATTR_ITEM_KEY;
4143 key.offset = 0;
4144
4145 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4146 if (ret < 0)
4147 return ret;
4148
4149 while (true) {
4150 int slot = path->slots[0];
4151 struct extent_buffer *leaf = path->nodes[0];
4152 int nritems = btrfs_header_nritems(leaf);
4153
4154 if (slot >= nritems) {
4155 if (ins_nr > 0) {
4156 u64 last_extent = 0;
4157
4158 ret = copy_items(trans, inode, dst_path, path,
4159 &last_extent, start_slot,
4160 ins_nr, 1, 0);
4161 /* can't be 1, extent items aren't processed */
4162 ASSERT(ret <= 0);
4163 if (ret < 0)
4164 return ret;
4165 ins_nr = 0;
4166 }
4167 ret = btrfs_next_leaf(root, path);
4168 if (ret < 0)
4169 return ret;
4170 else if (ret > 0)
4171 break;
4172 continue;
4173 }
4174
4175 btrfs_item_key_to_cpu(leaf, &key, slot);
4176 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4177 break;
4178
4179 if (ins_nr == 0)
4180 start_slot = slot;
4181 ins_nr++;
4182 path->slots[0]++;
4183 cond_resched();
4184 }
4185 if (ins_nr > 0) {
4186 u64 last_extent = 0;
4187
4188 ret = copy_items(trans, inode, dst_path, path,
4189 &last_extent, start_slot,
4190 ins_nr, 1, 0);
4191 /* can't be 1, extent items aren't processed */
4192 ASSERT(ret <= 0);
4193 if (ret < 0)
4194 return ret;
4195 }
4196
4197 return 0;
4198}
4199
4200/*
4201 * If the no holes feature is enabled we need to make sure any hole between the
4202 * last extent and the i_size of our inode is explicitly marked in the log. This
4203 * is to make sure that doing something like:
4204 *
4205 * 1) create file with 128Kb of data
4206 * 2) truncate file to 64Kb
4207 * 3) truncate file to 256Kb
4208 * 4) fsync file
4209 * 5) <crash/power failure>
4210 * 6) mount fs and trigger log replay
4211 *
4212 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4213 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4214 * file correspond to a hole. The presence of explicit holes in a log tree is
4215 * what guarantees that log replay will remove/adjust file extent items in the
4216 * fs/subvol tree.
4217 *
4218 * Here we do not need to care about holes between extents, that is already done
4219 * by copy_items(). We also only need to do this in the full sync path, where we
4220 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4221 * lookup the list of modified extent maps and if any represents a hole, we
4222 * insert a corresponding extent representing a hole in the log tree.
4223 */
4224static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4225 struct btrfs_root *root,
4226 struct inode *inode,
4227 struct btrfs_path *path)
4228{
4229 int ret;
4230 struct btrfs_key key;
4231 u64 hole_start;
4232 u64 hole_size;
4233 struct extent_buffer *leaf;
4234 struct btrfs_root *log = root->log_root;
4235 const u64 ino = btrfs_ino(inode);
4236 const u64 i_size = i_size_read(inode);
4237
4238 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4239 return 0;
4240
4241 key.objectid = ino;
4242 key.type = BTRFS_EXTENT_DATA_KEY;
4243 key.offset = (u64)-1;
4244
4245 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4246 ASSERT(ret != 0);
4247 if (ret < 0)
4248 return ret;
4249
4250 ASSERT(path->slots[0] > 0);
4251 path->slots[0]--;
4252 leaf = path->nodes[0];
4253 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4254
4255 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4256 /* inode does not have any extents */
4257 hole_start = 0;
4258 hole_size = i_size;
4259 } else {
4260 struct btrfs_file_extent_item *extent;
4261 u64 len;
4262
4263 /*
4264 * If there's an extent beyond i_size, an explicit hole was
4265 * already inserted by copy_items().
4266 */
4267 if (key.offset >= i_size)
4268 return 0;
4269
4270 extent = btrfs_item_ptr(leaf, path->slots[0],
4271 struct btrfs_file_extent_item);
4272
4273 if (btrfs_file_extent_type(leaf, extent) ==
4274 BTRFS_FILE_EXTENT_INLINE) {
4275 len = btrfs_file_extent_inline_len(leaf,
4276 path->slots[0],
4277 extent);
4278 ASSERT(len == i_size);
4279 return 0;
4280 }
4281
4282 len = btrfs_file_extent_num_bytes(leaf, extent);
4283 /* Last extent goes beyond i_size, no need to log a hole. */
4284 if (key.offset + len > i_size)
4285 return 0;
4286 hole_start = key.offset + len;
4287 hole_size = i_size - hole_start;
4288 }
4289 btrfs_release_path(path);
4290
4291 /* Last extent ends at i_size. */
4292 if (hole_size == 0)
4293 return 0;
4294
4295 hole_size = ALIGN(hole_size, root->sectorsize);
4296 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4297 hole_size, 0, hole_size, 0, 0, 0);
4298 return ret;
4299}
4300
4120/* log a single inode in the tree log. 4301/* log a single inode in the tree log.
4121 * At least one parent directory for this inode must exist in the tree 4302 * At least one parent directory for this inode must exist in the tree
4122 * or be logged already. 4303 * or be logged already.
@@ -4155,6 +4336,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4155 u64 ino = btrfs_ino(inode); 4336 u64 ino = btrfs_ino(inode);
4156 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4337 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4157 u64 logged_isize = 0; 4338 u64 logged_isize = 0;
4339 bool need_log_inode_item = true;
4158 4340
4159 path = btrfs_alloc_path(); 4341 path = btrfs_alloc_path();
4160 if (!path) 4342 if (!path)
@@ -4263,11 +4445,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4263 } else { 4445 } else {
4264 if (inode_only == LOG_INODE_ALL) 4446 if (inode_only == LOG_INODE_ALL)
4265 fast_search = true; 4447 fast_search = true;
4266 ret = log_inode_item(trans, log, dst_path, inode);
4267 if (ret) {
4268 err = ret;
4269 goto out_unlock;
4270 }
4271 goto log_extents; 4448 goto log_extents;
4272 } 4449 }
4273 4450
@@ -4290,6 +4467,28 @@ again:
4290 if (min_key.type > max_key.type) 4467 if (min_key.type > max_key.type)
4291 break; 4468 break;
4292 4469
4470 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4471 need_log_inode_item = false;
4472
4473 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4474 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4475 if (ins_nr == 0)
4476 goto next_slot;
4477 ret = copy_items(trans, inode, dst_path, path,
4478 &last_extent, ins_start_slot,
4479 ins_nr, inode_only, logged_isize);
4480 if (ret < 0) {
4481 err = ret;
4482 goto out_unlock;
4483 }
4484 ins_nr = 0;
4485 if (ret) {
4486 btrfs_release_path(path);
4487 continue;
4488 }
4489 goto next_slot;
4490 }
4491
4293 src = path->nodes[0]; 4492 src = path->nodes[0];
4294 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 4493 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4295 ins_nr++; 4494 ins_nr++;
@@ -4357,9 +4556,26 @@ next_slot:
4357 ins_nr = 0; 4556 ins_nr = 0;
4358 } 4557 }
4359 4558
4559 btrfs_release_path(path);
4560 btrfs_release_path(dst_path);
4561 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4562 if (err)
4563 goto out_unlock;
4564 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4565 btrfs_release_path(path);
4566 btrfs_release_path(dst_path);
4567 err = btrfs_log_trailing_hole(trans, root, inode, path);
4568 if (err)
4569 goto out_unlock;
4570 }
4360log_extents: 4571log_extents:
4361 btrfs_release_path(path); 4572 btrfs_release_path(path);
4362 btrfs_release_path(dst_path); 4573 btrfs_release_path(dst_path);
4574 if (need_log_inode_item) {
4575 err = log_inode_item(trans, log, dst_path, inode);
4576 if (err)
4577 goto out_unlock;
4578 }
4363 if (fast_search) { 4579 if (fast_search) {
4364 /* 4580 /*
4365 * Some ordered extents started by fsync might have completed 4581 * Some ordered extents started by fsync might have completed
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 4b438b4c8c91..fbe7c104531c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2766,6 +2766,20 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
2766 root = root->fs_info->chunk_root; 2766 root = root->fs_info->chunk_root;
2767 extent_root = root->fs_info->extent_root; 2767 extent_root = root->fs_info->extent_root;
2768 2768
2769 /*
2770 * Prevent races with automatic removal of unused block groups.
2771 * After we relocate and before we remove the chunk with offset
2772 * chunk_offset, automatic removal of the block group can kick in,
2773 * resulting in a failure when calling btrfs_remove_chunk() below.
2774 *
2775 * Make sure to acquire this mutex before doing a tree search (dev
2776 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2777 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2778 * we release the path used to search the chunk/dev tree and before
2779 * the current task acquires this mutex and calls us.
2780 */
2781 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2782
2769 ret = btrfs_can_relocate(extent_root, chunk_offset); 2783 ret = btrfs_can_relocate(extent_root, chunk_offset);
2770 if (ret) 2784 if (ret)
2771 return -ENOSPC; 2785 return -ENOSPC;
@@ -2814,13 +2828,18 @@ again:
2814 key.type = BTRFS_CHUNK_ITEM_KEY; 2828 key.type = BTRFS_CHUNK_ITEM_KEY;
2815 2829
2816 while (1) { 2830 while (1) {
2831 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2817 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2832 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2818 if (ret < 0) 2833 if (ret < 0) {
2834 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2819 goto error; 2835 goto error;
2836 }
2820 BUG_ON(ret == 0); /* Corruption */ 2837 BUG_ON(ret == 0); /* Corruption */
2821 2838
2822 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2839 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2823 key.type); 2840 key.type);
2841 if (ret)
2842 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2824 if (ret < 0) 2843 if (ret < 0)
2825 goto error; 2844 goto error;
2826 if (ret > 0) 2845 if (ret > 0)
@@ -2843,6 +2862,7 @@ again:
2843 else 2862 else
2844 BUG_ON(ret); 2863 BUG_ON(ret);
2845 } 2864 }
2865 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2846 2866
2847 if (found_key.offset == 0) 2867 if (found_key.offset == 0)
2848 break; 2868 break;
@@ -3299,9 +3319,12 @@ again:
3299 goto error; 3319 goto error;
3300 } 3320 }
3301 3321
3322 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3302 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3323 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3303 if (ret < 0) 3324 if (ret < 0) {
3325 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3304 goto error; 3326 goto error;
3327 }
3305 3328
3306 /* 3329 /*
3307 * this shouldn't happen, it means the last relocate 3330 * this shouldn't happen, it means the last relocate
@@ -3313,6 +3336,7 @@ again:
3313 ret = btrfs_previous_item(chunk_root, path, 0, 3336 ret = btrfs_previous_item(chunk_root, path, 0,
3314 BTRFS_CHUNK_ITEM_KEY); 3337 BTRFS_CHUNK_ITEM_KEY);
3315 if (ret) { 3338 if (ret) {
3339 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3316 ret = 0; 3340 ret = 0;
3317 break; 3341 break;
3318 } 3342 }
@@ -3321,8 +3345,10 @@ again:
3321 slot = path->slots[0]; 3345 slot = path->slots[0];
3322 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3346 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3323 3347
3324 if (found_key.objectid != key.objectid) 3348 if (found_key.objectid != key.objectid) {
3349 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3325 break; 3350 break;
3351 }
3326 3352
3327 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3353 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3328 3354
@@ -3335,10 +3361,13 @@ again:
3335 ret = should_balance_chunk(chunk_root, leaf, chunk, 3361 ret = should_balance_chunk(chunk_root, leaf, chunk,
3336 found_key.offset); 3362 found_key.offset);
3337 btrfs_release_path(path); 3363 btrfs_release_path(path);
3338 if (!ret) 3364 if (!ret) {
3365 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3339 goto loop; 3366 goto loop;
3367 }
3340 3368
3341 if (counting) { 3369 if (counting) {
3370 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3342 spin_lock(&fs_info->balance_lock); 3371 spin_lock(&fs_info->balance_lock);
3343 bctl->stat.expected++; 3372 bctl->stat.expected++;
3344 spin_unlock(&fs_info->balance_lock); 3373 spin_unlock(&fs_info->balance_lock);
@@ -3348,6 +3377,7 @@ again:
3348 ret = btrfs_relocate_chunk(chunk_root, 3377 ret = btrfs_relocate_chunk(chunk_root,
3349 found_key.objectid, 3378 found_key.objectid,
3350 found_key.offset); 3379 found_key.offset);
3380 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3351 if (ret && ret != -ENOSPC) 3381 if (ret && ret != -ENOSPC)
3352 goto error; 3382 goto error;
3353 if (ret == -ENOSPC) { 3383 if (ret == -ENOSPC) {
@@ -4087,11 +4117,16 @@ again:
4087 key.type = BTRFS_DEV_EXTENT_KEY; 4117 key.type = BTRFS_DEV_EXTENT_KEY;
4088 4118
4089 do { 4119 do {
4120 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4090 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4121 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4091 if (ret < 0) 4122 if (ret < 0) {
4123 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4092 goto done; 4124 goto done;
4125 }
4093 4126
4094 ret = btrfs_previous_item(root, path, 0, key.type); 4127 ret = btrfs_previous_item(root, path, 0, key.type);
4128 if (ret)
4129 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4095 if (ret < 0) 4130 if (ret < 0)
4096 goto done; 4131 goto done;
4097 if (ret) { 4132 if (ret) {
@@ -4105,6 +4140,7 @@ again:
4105 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4140 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4106 4141
4107 if (key.objectid != device->devid) { 4142 if (key.objectid != device->devid) {
4143 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4108 btrfs_release_path(path); 4144 btrfs_release_path(path);
4109 break; 4145 break;
4110 } 4146 }
@@ -4113,6 +4149,7 @@ again:
4113 length = btrfs_dev_extent_length(l, dev_extent); 4149 length = btrfs_dev_extent_length(l, dev_extent);
4114 4150
4115 if (key.offset + length <= new_size) { 4151 if (key.offset + length <= new_size) {
4152 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4116 btrfs_release_path(path); 4153 btrfs_release_path(path);
4117 break; 4154 break;
4118 } 4155 }
@@ -4122,6 +4159,7 @@ again:
4122 btrfs_release_path(path); 4159 btrfs_release_path(path);
4123 4160
4124 ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset); 4161 ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
4162 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4125 if (ret && ret != -ENOSPC) 4163 if (ret && ret != -ENOSPC)
4126 goto done; 4164 goto done;
4127 if (ret == -ENOSPC) 4165 if (ret == -ENOSPC)
@@ -5715,7 +5753,6 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e
5715static void btrfs_end_bio(struct bio *bio, int err) 5753static void btrfs_end_bio(struct bio *bio, int err)
5716{ 5754{
5717 struct btrfs_bio *bbio = bio->bi_private; 5755 struct btrfs_bio *bbio = bio->bi_private;
5718 struct btrfs_device *dev = bbio->stripes[0].dev;
5719 int is_orig_bio = 0; 5756 int is_orig_bio = 0;
5720 5757
5721 if (err) { 5758 if (err) {
@@ -5723,6 +5760,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
5723 if (err == -EIO || err == -EREMOTEIO) { 5760 if (err == -EIO || err == -EREMOTEIO) {
5724 unsigned int stripe_index = 5761 unsigned int stripe_index =
5725 btrfs_io_bio(bio)->stripe_index; 5762 btrfs_io_bio(bio)->stripe_index;
5763 struct btrfs_device *dev;
5726 5764
5727 BUG_ON(stripe_index >= bbio->num_stripes); 5765 BUG_ON(stripe_index >= bbio->num_stripes);
5728 dev = bbio->stripes[stripe_index].dev; 5766 dev = bbio->stripes[stripe_index].dev;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6b8e2f091f5b..48851f6ea6ec 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -896,6 +896,7 @@ COMPATIBLE_IOCTL(FIGETBSZ)
896/* 'X' - originally XFS but some now in the VFS */ 896/* 'X' - originally XFS but some now in the VFS */
897COMPATIBLE_IOCTL(FIFREEZE) 897COMPATIBLE_IOCTL(FIFREEZE)
898COMPATIBLE_IOCTL(FITHAW) 898COMPATIBLE_IOCTL(FITHAW)
899COMPATIBLE_IOCTL(FITRIM)
899COMPATIBLE_IOCTL(KDGETKEYCODE) 900COMPATIBLE_IOCTL(KDGETKEYCODE)
900COMPATIBLE_IOCTL(KDSETKEYCODE) 901COMPATIBLE_IOCTL(KDSETKEYCODE)
901COMPATIBLE_IOCTL(KDGKBTYPE) 902COMPATIBLE_IOCTL(KDGKBTYPE)
diff --git a/fs/dcache.c b/fs/dcache.c
index 7a3f3e5f9cea..5c8ea15e73a5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -642,7 +642,7 @@ static inline bool fast_dput(struct dentry *dentry)
642 642
643 /* 643 /*
644 * If we have a d_op->d_delete() operation, we sould not 644 * If we have a d_op->d_delete() operation, we sould not
645 * let the dentry count go to zero, so use "put__or_lock". 645 * let the dentry count go to zero, so use "put_or_lock".
646 */ 646 */
647 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) 647 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
648 return lockref_put_or_lock(&dentry->d_lockref); 648 return lockref_put_or_lock(&dentry->d_lockref);
@@ -697,7 +697,7 @@ static inline bool fast_dput(struct dentry *dentry)
697 */ 697 */
698 smp_rmb(); 698 smp_rmb();
699 d_flags = ACCESS_ONCE(dentry->d_flags); 699 d_flags = ACCESS_ONCE(dentry->d_flags);
700 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST; 700 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
701 701
702 /* Nothing to do? Dropping the reference was all we needed? */ 702 /* Nothing to do? Dropping the reference was all we needed? */
703 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) 703 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
@@ -776,6 +776,9 @@ repeat:
776 if (unlikely(d_unhashed(dentry))) 776 if (unlikely(d_unhashed(dentry)))
777 goto kill_it; 777 goto kill_it;
778 778
779 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
780 goto kill_it;
781
779 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { 782 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
780 if (dentry->d_op->d_delete(dentry)) 783 if (dentry->d_op->d_delete(dentry))
781 goto kill_it; 784 goto kill_it;
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 72afcc629d7b..feef8a9c4de7 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -325,7 +325,6 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
325 return rc; 325 return rc;
326 326
327 switch (cmd) { 327 switch (cmd) {
328 case FITRIM:
329 case FS_IOC32_GETFLAGS: 328 case FS_IOC32_GETFLAGS:
330 case FS_IOC32_SETFLAGS: 329 case FS_IOC32_SETFLAGS:
331 case FS_IOC32_GETVERSION: 330 case FS_IOC32_GETVERSION:
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index aadb72828834..2553aa8b608d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
504 struct buffer_head *bh; 504 struct buffer_head *bh;
505 int err; 505 int err;
506 506
507 bh = sb_getblk(inode->i_sb, pblk); 507 bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
508 if (unlikely(!bh)) 508 if (unlikely(!bh))
509 return ERR_PTR(-ENOMEM); 509 return ERR_PTR(-ENOMEM);
510 510
@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
1089 err = -EIO; 1089 err = -EIO;
1090 goto cleanup; 1090 goto cleanup;
1091 } 1091 }
1092 bh = sb_getblk(inode->i_sb, newblock); 1092 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1093 if (unlikely(!bh)) { 1093 if (unlikely(!bh)) {
1094 err = -ENOMEM; 1094 err = -ENOMEM;
1095 goto cleanup; 1095 goto cleanup;
@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1283 if (newblock == 0) 1283 if (newblock == 0)
1284 return err; 1284 return err;
1285 1285
1286 bh = sb_getblk(inode->i_sb, newblock); 1286 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1287 if (unlikely(!bh)) 1287 if (unlikely(!bh))
1288 return -ENOMEM; 1288 return -ENOMEM;
1289 lock_buffer(bh); 1289 lock_buffer(bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 41f8e55afcd1..cecf9aa10811 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1323 unsigned int offset, 1323 unsigned int offset,
1324 unsigned int length) 1324 unsigned int length)
1325{ 1325{
1326 int to_release = 0; 1326 int to_release = 0, contiguous_blks = 0;
1327 struct buffer_head *head, *bh; 1327 struct buffer_head *head, *bh;
1328 unsigned int curr_off = 0; 1328 unsigned int curr_off = 0;
1329 struct inode *inode = page->mapping->host; 1329 struct inode *inode = page->mapping->host;
@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page,
1344 1344
1345 if ((offset <= curr_off) && (buffer_delay(bh))) { 1345 if ((offset <= curr_off) && (buffer_delay(bh))) {
1346 to_release++; 1346 to_release++;
1347 contiguous_blks++;
1347 clear_buffer_delay(bh); 1348 clear_buffer_delay(bh);
1349 } else if (contiguous_blks) {
1350 lblk = page->index <<
1351 (PAGE_CACHE_SHIFT - inode->i_blkbits);
1352 lblk += (curr_off >> inode->i_blkbits) -
1353 contiguous_blks;
1354 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1355 contiguous_blks = 0;
1348 } 1356 }
1349 curr_off = next_off; 1357 curr_off = next_off;
1350 } while ((bh = bh->b_this_page) != head); 1358 } while ((bh = bh->b_this_page) != head);
1351 1359
1352 if (to_release) { 1360 if (contiguous_blks) {
1353 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1361 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1354 ext4_es_remove_extent(inode, lblk, to_release); 1362 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1363 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1355 } 1364 }
1356 1365
1357 /* If we have released all the blocks belonging to a cluster, then we 1366 /* If we have released all the blocks belonging to a cluster, then we
@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
4344 int inode_size = EXT4_INODE_SIZE(sb); 4353 int inode_size = EXT4_INODE_SIZE(sb);
4345 4354
4346 oi.orig_ino = orig_ino; 4355 oi.orig_ino = orig_ino;
4347 ino = (orig_ino & ~(inodes_per_block - 1)) + 1; 4356 /*
4357 * Calculate the first inode in the inode table block. Inode
4358 * numbers are one-based. That is, the first inode in a block
4359 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
4360 */
4361 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
4348 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 4362 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
4349 if (ino == orig_ino) 4363 if (ino == orig_ino)
4350 continue; 4364 continue;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cb8451246b30..1346cfa355d0 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -755,7 +755,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 return err; 755 return err;
756 } 756 }
757 case EXT4_IOC_MOVE_EXT: 757 case EXT4_IOC_MOVE_EXT:
758 case FITRIM:
759 case EXT4_IOC_RESIZE_FS: 758 case EXT4_IOC_RESIZE_FS:
760 case EXT4_IOC_PRECACHE_EXTENTS: 759 case EXT4_IOC_PRECACHE_EXTENTS:
761 case EXT4_IOC_SET_ENCRYPTION_POLICY: 760 case EXT4_IOC_SET_ENCRYPTION_POLICY:
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f6aedf88da43..34b610ea5030 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4816,18 +4816,12 @@ do_more:
4816 /* 4816 /*
4817 * blocks being freed are metadata. these blocks shouldn't 4817 * blocks being freed are metadata. these blocks shouldn't
4818 * be used until this transaction is committed 4818 * be used until this transaction is committed
4819 *
4820 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4821 * to fail.
4819 */ 4822 */
4820 retry: 4823 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4821 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); 4824 GFP_NOFS|__GFP_NOFAIL);
4822 if (!new_entry) {
4823 /*
4824 * We use a retry loop because
4825 * ext4_free_blocks() is not allowed to fail.
4826 */
4827 cond_resched();
4828 congestion_wait(BLK_RW_ASYNC, HZ/50);
4829 goto retry;
4830 }
4831 new_entry->efd_start_cluster = bit; 4825 new_entry->efd_start_cluster = bit;
4832 new_entry->efd_group = block_group; 4826 new_entry->efd_group = block_group;
4833 new_entry->efd_count = count_clusters; 4827 new_entry->efd_count = count_clusters;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b52374e42102..6163ad21cb0e 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
620 struct ext4_inode_info *ei = EXT4_I(inode); 620 struct ext4_inode_info *ei = EXT4_I(inode);
621 struct ext4_extent *ex; 621 struct ext4_extent *ex;
622 unsigned int i, len; 622 unsigned int i, len;
623 ext4_lblk_t start, end;
623 ext4_fsblk_t blk; 624 ext4_fsblk_t blk;
624 handle_t *handle; 625 handle_t *handle;
625 int ret; 626 int ret;
@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
633 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) 634 EXT4_FEATURE_RO_COMPAT_BIGALLOC))
634 return -EOPNOTSUPP; 635 return -EOPNOTSUPP;
635 636
637 /*
638 * In order to get correct extent info, force all delayed allocation
639 * blocks to be allocated, otherwise delayed allocation blocks may not
640 * be reflected and bypass the checks on extent header.
641 */
642 if (test_opt(inode->i_sb, DELALLOC))
643 ext4_alloc_da_blocks(inode);
644
636 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); 645 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
637 if (IS_ERR(handle)) 646 if (IS_ERR(handle))
638 return PTR_ERR(handle); 647 return PTR_ERR(handle);
@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
650 goto errout; 659 goto errout;
651 } 660 }
652 if (eh->eh_entries == 0) 661 if (eh->eh_entries == 0)
653 blk = len = 0; 662 blk = len = start = end = 0;
654 else { 663 else {
655 len = le16_to_cpu(ex->ee_len); 664 len = le16_to_cpu(ex->ee_len);
656 blk = ext4_ext_pblock(ex); 665 blk = ext4_ext_pblock(ex);
657 if (len > EXT4_NDIR_BLOCKS) { 666 start = le32_to_cpu(ex->ee_block);
667 end = start + len - 1;
668 if (end >= EXT4_NDIR_BLOCKS) {
658 ret = -EOPNOTSUPP; 669 ret = -EOPNOTSUPP;
659 goto errout; 670 goto errout;
660 } 671 }
@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
662 673
663 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); 674 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
664 memset(ei->i_data, 0, sizeof(ei->i_data)); 675 memset(ei->i_data, 0, sizeof(ei->i_data));
665 for (i=0; i < len; i++) 676 for (i = start; i <= end; i++)
666 ei->i_data[i] = cpu_to_le32(blk++); 677 ei->i_data[i] = cpu_to_le32(blk++);
667 ext4_mark_inode_dirty(handle, inode); 678 ext4_mark_inode_dirty(handle, inode);
668errout: 679errout:
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index f005046e1591..d6a4b55d2ab0 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -484,3 +484,98 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a
484 a->btree.first_free = cpu_to_le16(8); 484 a->btree.first_free = cpu_to_le16(8);
485 return a; 485 return a;
486} 486}
487
488static unsigned find_run(__le32 *bmp, unsigned *idx)
489{
490 unsigned len;
491 while (tstbits(bmp, *idx, 1)) {
492 (*idx)++;
493 if (unlikely(*idx >= 0x4000))
494 return 0;
495 }
496 len = 1;
497 while (!tstbits(bmp, *idx + len, 1))
498 len++;
499 return len;
500}
501
502static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
503{
504 int err;
505 secno end;
506 if (fatal_signal_pending(current))
507 return -EINTR;
508 end = start + len;
509 if (start < limit_start)
510 start = limit_start;
511 if (end > limit_end)
512 end = limit_end;
513 if (start >= end)
514 return 0;
515 if (end - start < minlen)
516 return 0;
517 err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
518 if (err)
519 return err;
520 *result += end - start;
521 return 0;
522}
523
524int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
525{
526 int err = 0;
527 struct hpfs_sb_info *sbi = hpfs_sb(s);
528 unsigned idx, len, start_bmp, end_bmp;
529 __le32 *bmp;
530 struct quad_buffer_head qbh;
531
532 *result = 0;
533 if (!end || end > sbi->sb_fs_size)
534 end = sbi->sb_fs_size;
535 if (start >= sbi->sb_fs_size)
536 return 0;
537 if (minlen > 0x4000)
538 return 0;
539 if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
540 hpfs_lock(s);
541 if (s->s_flags & MS_RDONLY) {
542 err = -EROFS;
543 goto unlock_1;
544 }
545 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
546 err = -EIO;
547 goto unlock_1;
548 }
549 idx = 0;
550 while ((len = find_run(bmp, &idx)) && !err) {
551 err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
552 idx += len;
553 }
554 hpfs_brelse4(&qbh);
555unlock_1:
556 hpfs_unlock(s);
557 }
558 start_bmp = start >> 14;
559 end_bmp = (end + 0x3fff) >> 14;
560 while (start_bmp < end_bmp && !err) {
561 hpfs_lock(s);
562 if (s->s_flags & MS_RDONLY) {
563 err = -EROFS;
564 goto unlock_2;
565 }
566 if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
567 err = -EIO;
568 goto unlock_2;
569 }
570 idx = 0;
571 while ((len = find_run(bmp, &idx)) && !err) {
572 err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
573 idx += len;
574 }
575 hpfs_brelse4(&qbh);
576unlock_2:
577 hpfs_unlock(s);
578 start_bmp++;
579 }
580 return err;
581}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2a8e07425de0..dc540bfcee1d 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -327,4 +327,5 @@ const struct file_operations hpfs_dir_ops =
327 .iterate = hpfs_readdir, 327 .iterate = hpfs_readdir,
328 .release = hpfs_dir_release, 328 .release = hpfs_dir_release,
329 .fsync = hpfs_file_fsync, 329 .fsync = hpfs_file_fsync,
330 .unlocked_ioctl = hpfs_ioctl,
330}; 331};
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 6d8cfe9b52d6..7ca28d604bf7 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -203,6 +203,7 @@ const struct file_operations hpfs_file_ops =
203 .release = hpfs_file_release, 203 .release = hpfs_file_release,
204 .fsync = hpfs_file_fsync, 204 .fsync = hpfs_file_fsync,
205 .splice_read = generic_file_splice_read, 205 .splice_read = generic_file_splice_read,
206 .unlocked_ioctl = hpfs_ioctl,
206}; 207};
207 208
208const struct inode_operations hpfs_file_iops = 209const struct inode_operations hpfs_file_iops =
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index bb04b58d1d69..c4867b5116dd 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -18,6 +18,8 @@
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/sched.h>
22#include <linux/blkdev.h>
21#include <asm/unaligned.h> 23#include <asm/unaligned.h>
22 24
23#include "hpfs.h" 25#include "hpfs.h"
@@ -200,6 +202,7 @@ void hpfs_free_dnode(struct super_block *, secno);
200struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); 202struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *);
201struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); 203struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
202struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); 204struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
205int hpfs_trim_fs(struct super_block *, u64, u64, u64, unsigned *);
203 206
204/* anode.c */ 207/* anode.c */
205 208
@@ -318,6 +321,7 @@ __printf(2, 3)
318void hpfs_error(struct super_block *, const char *, ...); 321void hpfs_error(struct super_block *, const char *, ...);
319int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); 322int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
320unsigned hpfs_get_free_dnodes(struct super_block *); 323unsigned hpfs_get_free_dnodes(struct super_block *);
324long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg);
321 325
322/* 326/*
323 * local time (HPFS) to GMT (Unix) 327 * local time (HPFS) to GMT (Unix)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 7cd00d3a7c9b..68a9bed05628 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
52} 52}
53 53
54/* Filesystem error... */ 54/* Filesystem error... */
55static char err_buf[1024];
56
57void hpfs_error(struct super_block *s, const char *fmt, ...) 55void hpfs_error(struct super_block *s, const char *fmt, ...)
58{ 56{
57 struct va_format vaf;
59 va_list args; 58 va_list args;
60 59
61 va_start(args, fmt); 60 va_start(args, fmt);
62 vsnprintf(err_buf, sizeof(err_buf), fmt, args); 61
62 vaf.fmt = fmt;
63 vaf.va = &args;
64
65 pr_err("filesystem error: %pV", &vaf);
66
63 va_end(args); 67 va_end(args);
64 68
65 pr_err("filesystem error: %s", err_buf);
66 if (!hpfs_sb(s)->sb_was_error) { 69 if (!hpfs_sb(s)->sb_was_error) {
67 if (hpfs_sb(s)->sb_err == 2) { 70 if (hpfs_sb(s)->sb_err == 2) {
68 pr_cont("; crashing the system because you wanted it\n"); 71 pr_cont("; crashing the system because you wanted it\n");
@@ -196,12 +199,39 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
196 return 0; 199 return 0;
197} 200}
198 201
202
203long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
204{
205 switch (cmd) {
206 case FITRIM: {
207 struct fstrim_range range;
208 secno n_trimmed;
209 int r;
210 if (!capable(CAP_SYS_ADMIN))
211 return -EPERM;
212 if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
213 return -EFAULT;
214 r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed);
215 if (r)
216 return r;
217 range.len = (u64)n_trimmed << 9;
218 if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range)))
219 return -EFAULT;
220 return 0;
221 }
222 default: {
223 return -ENOIOCTLCMD;
224 }
225 }
226}
227
228
199static struct kmem_cache * hpfs_inode_cachep; 229static struct kmem_cache * hpfs_inode_cachep;
200 230
201static struct inode *hpfs_alloc_inode(struct super_block *sb) 231static struct inode *hpfs_alloc_inode(struct super_block *sb)
202{ 232{
203 struct hpfs_inode_info *ei; 233 struct hpfs_inode_info *ei;
204 ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); 234 ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
205 if (!ei) 235 if (!ei)
206 return NULL; 236 return NULL;
207 ei->vfs_inode.i_version = 1; 237 ei->vfs_inode.i_version = 1;
@@ -424,11 +454,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
424 int o; 454 int o;
425 struct hpfs_sb_info *sbi = hpfs_sb(s); 455 struct hpfs_sb_info *sbi = hpfs_sb(s);
426 char *new_opts = kstrdup(data, GFP_KERNEL); 456 char *new_opts = kstrdup(data, GFP_KERNEL);
427 457
458 if (!new_opts)
459 return -ENOMEM;
460
428 sync_filesystem(s); 461 sync_filesystem(s);
429 462
430 *flags |= MS_NOATIME; 463 *flags |= MS_NOATIME;
431 464
432 hpfs_lock(s); 465 hpfs_lock(s);
433 uid = sbi->sb_uid; gid = sbi->sb_gid; 466 uid = sbi->sb_uid; gid = sbi->sb_gid;
434 umask = 0777 & ~sbi->sb_mode; 467 umask = 0777 & ~sbi->sb_mode;
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 93a1232894f6..8db8b7d61e40 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -180,9 +180,6 @@ long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
180 case JFS_IOC_SETFLAGS32: 180 case JFS_IOC_SETFLAGS32:
181 cmd = JFS_IOC_SETFLAGS; 181 cmd = JFS_IOC_SETFLAGS;
182 break; 182 break;
183 case FITRIM:
184 cmd = FITRIM;
185 break;
186 } 183 }
187 return jfs_ioctl(filp, cmd, arg); 184 return jfs_ioctl(filp, cmd, arg);
188} 185}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 9a20e513d7eb..aba43811d6ef 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1369,7 +1369,6 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1369 case NILFS_IOCTL_SYNC: 1369 case NILFS_IOCTL_SYNC:
1370 case NILFS_IOCTL_RESIZE: 1370 case NILFS_IOCTL_RESIZE:
1371 case NILFS_IOCTL_SET_ALLOC_RANGE: 1371 case NILFS_IOCTL_SET_ALLOC_RANGE:
1372 case FITRIM:
1373 break; 1372 break;
1374 default: 1373 default:
1375 return -ENOIOCTLCMD; 1374 return -ENOIOCTLCMD;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 53e6c40ed4c6..3cb097ccce60 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -980,7 +980,6 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
980 case OCFS2_IOC_GROUP_EXTEND: 980 case OCFS2_IOC_GROUP_EXTEND:
981 case OCFS2_IOC_GROUP_ADD: 981 case OCFS2_IOC_GROUP_ADD:
982 case OCFS2_IOC_GROUP_ADD64: 982 case OCFS2_IOC_GROUP_ADD64:
983 case FITRIM:
984 break; 983 break;
985 case OCFS2_IOC_REFLINK: 984 case OCFS2_IOC_REFLINK:
986 if (copy_from_user(&args, argp, sizeof(args))) 985 if (copy_from_user(&args, argp, sizeof(args)))
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index f140e3dbfb7b..d9da5a4e9382 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -343,6 +343,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
343 struct path realpath; 343 struct path realpath;
344 enum ovl_path_type type; 344 enum ovl_path_type type;
345 345
346 if (d_is_dir(dentry))
347 return d_backing_inode(dentry);
348
346 type = ovl_path_real(dentry, &realpath); 349 type = ovl_path_real(dentry, &realpath);
347 if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) { 350 if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
348 err = ovl_want_write(dentry); 351 err = ovl_want_write(dentry);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c471dfc93b71..d2445fa9999f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -58,6 +58,19 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
58 acpi_fwnode_handle(adev) : NULL) 58 acpi_fwnode_handle(adev) : NULL)
59#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) 59#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
60 60
61/**
62 * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
63 * the PCI-defined class-code information
64 *
65 * @_cls : the class, subclass, prog-if triple for this device
66 * @_msk : the class mask for this device
67 *
68 * This macro is used to create a struct acpi_device_id that matches a
69 * specific PCI class. The .id and .driver_data fields will be left
70 * initialized with the default value.
71 */
72#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
73
61static inline bool has_acpi_companion(struct device *dev) 74static inline bool has_acpi_companion(struct device *dev)
62{ 75{
63 return is_acpi_node(dev->fwnode); 76 return is_acpi_node(dev->fwnode);
@@ -309,9 +322,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
309 322
310int acpi_resources_are_enforced(void); 323int acpi_resources_are_enforced(void);
311 324
312int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
313 unsigned long flags, char *desc);
314
315#ifdef CONFIG_HIBERNATION 325#ifdef CONFIG_HIBERNATION
316void __init acpi_no_s4_hw_signature(void); 326void __init acpi_no_s4_hw_signature(void);
317#endif 327#endif
@@ -446,6 +456,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
446#define ACPI_COMPANION(dev) (NULL) 456#define ACPI_COMPANION(dev) (NULL)
447#define ACPI_COMPANION_SET(dev, adev) do { } while (0) 457#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
448#define ACPI_HANDLE(dev) (NULL) 458#define ACPI_HANDLE(dev) (NULL)
459#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
449 460
450struct fwnode_handle; 461struct fwnode_handle;
451 462
@@ -507,13 +518,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
507 return 0; 518 return 0;
508} 519}
509 520
510static inline int acpi_reserve_region(u64 start, unsigned int length,
511 u8 space_id, unsigned long flags,
512 char *desc)
513{
514 return -ENXIO;
515}
516
517struct acpi_table_header; 521struct acpi_table_header;
518static inline int acpi_table_parse(char *id, 522static inline int acpi_table_parse(char *id,
519 int (*handler)(struct acpi_table_header *)) 523 int (*handler)(struct acpi_table_header *))
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 73b45225a7ca..e6797ded700e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
318} 318}
319 319
320
321static inline struct buffer_head *
322sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
323{
324 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
325}
326
320static inline struct buffer_head * 327static inline struct buffer_head *
321sb_find_get_block(struct super_block *sb, sector_t block) 328sb_find_get_block(struct super_block *sb, sector_t block)
322{ 329{
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index e15499422fdc..37753278987a 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -8,6 +8,7 @@
8#include <linux/radix-tree.h> 8#include <linux/radix-tree.h>
9#include <linux/uio.h> 9#include <linux/uio.h>
10#include <linux/workqueue.h> 10#include <linux/workqueue.h>
11#include <net/net_namespace.h>
11 12
12#include <linux/ceph/types.h> 13#include <linux/ceph/types.h>
13#include <linux/ceph/buffer.h> 14#include <linux/ceph/buffer.h>
@@ -56,6 +57,7 @@ struct ceph_messenger {
56 struct ceph_entity_addr my_enc_addr; 57 struct ceph_entity_addr my_enc_addr;
57 58
58 atomic_t stopping; 59 atomic_t stopping;
60 possible_net_t net;
59 bool nocrc; 61 bool nocrc;
60 bool tcp_nodelay; 62 bool tcp_nodelay;
61 63
@@ -267,6 +269,7 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
267 u64 required_features, 269 u64 required_features,
268 bool nocrc, 270 bool nocrc,
269 bool tcp_nodelay); 271 bool tcp_nodelay);
272extern void ceph_messenger_fini(struct ceph_messenger *msgr);
270 273
271extern void ceph_con_init(struct ceph_connection *con, void *private, 274extern void ceph_con_init(struct ceph_connection *con, void *private,
272 const struct ceph_connection_operations *ops, 275 const struct ceph_connection_operations *ops,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 7f8ad9593da7..e08a6ae7c0a4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,11 +17,11 @@
17# define __release(x) __context__(x,-1) 17# define __release(x) __context__(x,-1)
18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19# define __percpu __attribute__((noderef, address_space(3))) 19# define __percpu __attribute__((noderef, address_space(3)))
20# define __pmem __attribute__((noderef, address_space(5)))
20#ifdef CONFIG_SPARSE_RCU_POINTER 21#ifdef CONFIG_SPARSE_RCU_POINTER
21# define __rcu __attribute__((noderef, address_space(4))) 22# define __rcu __attribute__((noderef, address_space(4)))
22#else 23#else
23# define __rcu 24# define __rcu
24# define __pmem __attribute__((noderef, address_space(5)))
25#endif 25#endif
26extern void __chk_user_ptr(const volatile void __user *); 26extern void __chk_user_ptr(const volatile void __user *);
27extern void __chk_io_ptr(const volatile void __iomem *); 27extern void __chk_io_ptr(const volatile void __iomem *);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 624a668e61f1..fcea4e48e21f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -87,7 +87,12 @@ struct irq_desc {
87 const char *name; 87 const char *name;
88} ____cacheline_internodealigned_in_smp; 88} ____cacheline_internodealigned_in_smp;
89 89
90#ifndef CONFIG_SPARSE_IRQ 90#ifdef CONFIG_SPARSE_IRQ
91extern void irq_lock_sparse(void);
92extern void irq_unlock_sparse(void);
93#else
94static inline void irq_lock_sparse(void) { }
95static inline void irq_unlock_sparse(void) { }
91extern struct irq_desc irq_desc[NR_IRQS]; 96extern struct irq_desc irq_desc[NR_IRQS];
92#endif 97#endif
93 98
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8183d6640ca7..34f25b7bf642 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -189,6 +189,8 @@ struct css_device_id {
189struct acpi_device_id { 189struct acpi_device_id {
190 __u8 id[ACPI_ID_LEN]; 190 __u8 id[ACPI_ID_LEN];
191 kernel_ulong_t driver_data; 191 kernel_ulong_t driver_data;
192 __u32 cls;
193 __u32 cls_msk;
192}; 194};
193 195
194#define PNP_ID_LEN 8 196#define PNP_ID_LEN 8
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe4889d..45932228cbf5 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@ struct regulator_ops {
148 int (*get_current_limit) (struct regulator_dev *); 148 int (*get_current_limit) (struct regulator_dev *);
149 149
150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA); 150 int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
151 int (*set_over_current_protection) (struct regulator_dev *);
151 152
152 /* enable/disable regulator */ 153 /* enable/disable regulator */
153 int (*enable) (struct regulator_dev *); 154 int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be1260129..a1067d0b3991 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@ struct regulation_constraints {
147 unsigned ramp_disable:1; /* disable ramp delay */ 147 unsigned ramp_disable:1; /* disable ramp delay */
148 unsigned soft_start:1; /* ramp voltage slowly */ 148 unsigned soft_start:1; /* ramp voltage slowly */
149 unsigned pull_down:1; /* pull down resistor when regulator off */ 149 unsigned pull_down:1; /* pull down resistor when regulator off */
150 unsigned over_current_protection:1; /* auto disable on over current */
150}; 151};
151 152
152/** 153/**
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
index 2c92e1c8e055..aefd997262e4 100644
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ b/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -9,10 +9,14 @@
9#ifndef _SIRFSOC_RTC_IOBRG_H_ 9#ifndef _SIRFSOC_RTC_IOBRG_H_
10#define _SIRFSOC_RTC_IOBRG_H_ 10#define _SIRFSOC_RTC_IOBRG_H_
11 11
12struct regmap_config;
13
12extern void sirfsoc_rtc_iobrg_besyncing(void); 14extern void sirfsoc_rtc_iobrg_besyncing(void);
13 15
14extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); 16extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
15 17
16extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); 18extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
19struct regmap *devm_regmap_init_iobg(struct device *dev,
20 const struct regmap_config *config);
17 21
18#endif 22#endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 3741ba1a652c..edbfc9a5293e 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -67,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
68#endif /* BROADCAST */ 68#endif /* BROADCAST */
69 69
70#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 70#ifdef CONFIG_GENERIC_CLOCKEVENTS
71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
72#else 72#else
73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } 73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
74{
75 return 0;
76}
74#endif 77#endif
75 78
76static inline void tick_broadcast_enable(void) 79static inline void tick_broadcast_enable(void)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 3aa72e648650..6e191e4e6ab6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts)
145} 145}
146#endif 146#endif
147 147
148#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
149#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 148#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
150 149
151/* 150/*
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 09c65640cad6..e85bdfd15fed 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1021,8 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
1021 * for strings that are too long, we should not have created 1021 * for strings that are too long, we should not have created
1022 * any. 1022 * any.
1023 */ 1023 */
1024 if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) { 1024 if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
1025 WARN_ON(1);
1026 send_sig(SIGKILL, current, 0); 1025 send_sig(SIGKILL, current, 0);
1027 return -1; 1026 return -1;
1028 } 1027 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9c9c9fab16cc..6a374544d495 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,7 @@
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/lockdep.h> 22#include <linux/lockdep.h>
23#include <linux/tick.h> 23#include <linux/tick.h>
24#include <linux/irq.h>
24#include <trace/events/power.h> 25#include <trace/events/power.h>
25 26
26#include "smpboot.h" 27#include "smpboot.h"
@@ -392,13 +393,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
392 smpboot_park_threads(cpu); 393 smpboot_park_threads(cpu);
393 394
394 /* 395 /*
395 * So now all preempt/rcu users must observe !cpu_active(). 396 * Prevent irq alloc/free while the dying cpu reorganizes the
397 * interrupt affinities.
396 */ 398 */
399 irq_lock_sparse();
397 400
401 /*
402 * So now all preempt/rcu users must observe !cpu_active().
403 */
398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 404 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
399 if (err) { 405 if (err) {
400 /* CPU didn't die: tell everyone. Can't complain. */ 406 /* CPU didn't die: tell everyone. Can't complain. */
401 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 407 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
408 irq_unlock_sparse();
402 goto out_release; 409 goto out_release;
403 } 410 }
404 BUG_ON(cpu_online(cpu)); 411 BUG_ON(cpu_online(cpu));
@@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
415 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ 422 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
416 per_cpu(cpu_dead_idle, cpu) = false; 423 per_cpu(cpu_dead_idle, cpu) = false;
417 424
425 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
426 irq_unlock_sparse();
427
418 hotplug_cpu__broadcast_tick_pull(cpu); 428 hotplug_cpu__broadcast_tick_pull(cpu);
419 /* This actually kills the CPU. */ 429 /* This actually kills the CPU. */
420 __cpu_die(cpu); 430 __cpu_die(cpu);
@@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
517 goto out_notify; 527 goto out_notify;
518 } 528 }
519 529
530 /*
531 * Some architectures have to walk the irq descriptors to
532 * setup the vector space for the cpu which comes online.
533 * Prevent irq alloc/free across the bringup.
534 */
535 irq_lock_sparse();
536
520 /* Arch-specific enabling code. */ 537 /* Arch-specific enabling code. */
521 ret = __cpu_up(cpu, idle); 538 ret = __cpu_up(cpu, idle);
539
540 irq_unlock_sparse();
541
522 if (ret != 0) 542 if (ret != 0)
523 goto out_notify; 543 goto out_notify;
524 BUG_ON(!cpu_online(cpu)); 544 BUG_ON(!cpu_online(cpu));
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e965cfae4207..d3dae3419b99 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
4358 rcu_read_unlock(); 4358 rcu_read_unlock();
4359} 4359}
4360 4360
4361static void rb_free_rcu(struct rcu_head *rcu_head)
4362{
4363 struct ring_buffer *rb;
4364
4365 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
4366 rb_free(rb);
4367}
4368
4369struct ring_buffer *ring_buffer_get(struct perf_event *event) 4361struct ring_buffer *ring_buffer_get(struct perf_event *event)
4370{ 4362{
4371 struct ring_buffer *rb; 4363 struct ring_buffer *rb;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 2deb24c7a40d..2bbad9c1274c 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -11,6 +11,7 @@
11struct ring_buffer { 11struct ring_buffer {
12 atomic_t refcount; 12 atomic_t refcount;
13 struct rcu_head rcu_head; 13 struct rcu_head rcu_head;
14 struct irq_work irq_work;
14#ifdef CONFIG_PERF_USE_VMALLOC 15#ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work; 16 struct work_struct work;
16 int page_order; /* allocation order */ 17 int page_order; /* allocation order */
@@ -55,6 +56,15 @@ struct ring_buffer {
55}; 56};
56 57
57extern void rb_free(struct ring_buffer *rb); 58extern void rb_free(struct ring_buffer *rb);
59
60static inline void rb_free_rcu(struct rcu_head *rcu_head)
61{
62 struct ring_buffer *rb;
63
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 rb_free(rb);
66}
67
58extern struct ring_buffer * 68extern struct ring_buffer *
59rb_alloc(int nr_pages, long watermark, int cpu, int flags); 69rb_alloc(int nr_pages, long watermark, int cpu, int flags);
60extern void perf_event_wakeup(struct perf_event *event); 70extern void perf_event_wakeup(struct perf_event *event);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 96472824a752..b2be01b1aa9d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
221 rcu_read_unlock(); 221 rcu_read_unlock();
222} 222}
223 223
224static void rb_irq_work(struct irq_work *work);
225
224static void 226static void
225ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) 227ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
226{ 228{
@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
241 243
242 INIT_LIST_HEAD(&rb->event_list); 244 INIT_LIST_HEAD(&rb->event_list);
243 spin_lock_init(&rb->event_lock); 245 spin_lock_init(&rb->event_lock);
246 init_irq_work(&rb->irq_work, rb_irq_work);
247}
248
249static void ring_buffer_put_async(struct ring_buffer *rb)
250{
251 if (!atomic_dec_and_test(&rb->refcount))
252 return;
253
254 rb->rcu_head.next = (void *)rb;
255 irq_work_queue(&rb->irq_work);
244} 256}
245 257
246/* 258/*
@@ -319,7 +331,7 @@ err_put:
319 rb_free_aux(rb); 331 rb_free_aux(rb);
320 332
321err: 333err:
322 ring_buffer_put(rb); 334 ring_buffer_put_async(rb);
323 handle->event = NULL; 335 handle->event = NULL;
324 336
325 return NULL; 337 return NULL;
@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
370 382
371 local_set(&rb->aux_nest, 0); 383 local_set(&rb->aux_nest, 0);
372 rb_free_aux(rb); 384 rb_free_aux(rb);
373 ring_buffer_put(rb); 385 ring_buffer_put_async(rb);
374} 386}
375 387
376/* 388/*
@@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
557void rb_free_aux(struct ring_buffer *rb) 569void rb_free_aux(struct ring_buffer *rb)
558{ 570{
559 if (atomic_dec_and_test(&rb->aux_refcount)) 571 if (atomic_dec_and_test(&rb->aux_refcount))
572 irq_work_queue(&rb->irq_work);
573}
574
575static void rb_irq_work(struct irq_work *work)
576{
577 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
578
579 if (!atomic_read(&rb->aux_refcount))
560 __rb_free_aux(rb); 580 __rb_free_aux(rb);
581
582 if (rb->rcu_head.next == (void *)rb)
583 call_rcu(&rb->rcu_head, rb_free_rcu);
561} 584}
562 585
563#ifndef CONFIG_PERF_USE_VMALLOC 586#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4834ee828c41..61008b8433ab 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
76 76
77#ifdef CONFIG_SPARSE_IRQ 77#ifdef CONFIG_SPARSE_IRQ
78static inline void irq_mark_irq(unsigned int irq) { } 78static inline void irq_mark_irq(unsigned int irq) { }
79extern void irq_lock_sparse(void);
80extern void irq_unlock_sparse(void);
81#else 79#else
82extern void irq_mark_irq(unsigned int irq); 80extern void irq_mark_irq(unsigned int irq);
83static inline void irq_lock_sparse(void) { }
84static inline void irq_unlock_sparse(void) { }
85#endif 81#endif
86 82
87extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 83extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
diff --git a/kernel/module.c b/kernel/module.c
index 3e0e19763d24..4d2b82e610e2 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3557,6 +3557,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3557 mutex_lock(&module_mutex); 3557 mutex_lock(&module_mutex);
3558 /* Unlink carefully: kallsyms could be walking list. */ 3558 /* Unlink carefully: kallsyms could be walking list. */
3559 list_del_rcu(&mod->list); 3559 list_del_rcu(&mod->list);
3560 mod_tree_remove(mod);
3560 wake_up_all(&module_wq); 3561 wake_up_all(&module_wq);
3561 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3562 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3562 synchronize_sched(); 3563 synchronize_sched();
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 08ccc3da3ca0..50eb107f1198 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev,
120 /* The clockevent device is getting replaced. Shut it down. */ 120 /* The clockevent device is getting replaced. Shut it down. */
121 121
122 case CLOCK_EVT_STATE_SHUTDOWN: 122 case CLOCK_EVT_STATE_SHUTDOWN:
123 return dev->set_state_shutdown(dev); 123 if (dev->set_state_shutdown)
124 return dev->set_state_shutdown(dev);
125 return 0;
124 126
125 case CLOCK_EVT_STATE_PERIODIC: 127 case CLOCK_EVT_STATE_PERIODIC:
126 /* Core internal bug */ 128 /* Core internal bug */
127 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 129 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
128 return -ENOSYS; 130 return -ENOSYS;
129 return dev->set_state_periodic(dev); 131 if (dev->set_state_periodic)
132 return dev->set_state_periodic(dev);
133 return 0;
130 134
131 case CLOCK_EVT_STATE_ONESHOT: 135 case CLOCK_EVT_STATE_ONESHOT:
132 /* Core internal bug */ 136 /* Core internal bug */
133 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
134 return -ENOSYS; 138 return -ENOSYS;
135 return dev->set_state_oneshot(dev); 139 if (dev->set_state_oneshot)
140 return dev->set_state_oneshot(dev);
141 return 0;
136 142
137 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 143 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
138 /* Core internal bug */ 144 /* Core internal bug */
@@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
471 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 477 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
472 return 0; 478 return 0;
473 479
474 /* New state-specific callbacks */
475 if (!dev->set_state_shutdown)
476 return -EINVAL;
477
478 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
479 !dev->set_state_periodic)
480 return -EINVAL;
481
482 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
483 !dev->set_state_oneshot)
484 return -EINVAL;
485
486 return 0; 480 return 0;
487} 481}
488 482
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d39f32cdd1b5..52b9e199b5ac 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
159{ 159{
160 struct clock_event_device *bc = tick_broadcast_device.evtdev; 160 struct clock_event_device *bc = tick_broadcast_device.evtdev;
161 unsigned long flags; 161 unsigned long flags;
162 int ret; 162 int ret = 0;
163 163
164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
165 165
@@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
221 * If we kept the cpu in the broadcast mask, 221 * If we kept the cpu in the broadcast mask,
222 * tell the caller to leave the per cpu device 222 * tell the caller to leave the per cpu device
223 * in shutdown state. The periodic interrupt 223 * in shutdown state. The periodic interrupt
224 * is delivered by the broadcast device. 224 * is delivered by the broadcast device, if
225 * the broadcast device exists and is not
226 * hrtimer based.
225 */ 227 */
226 ret = cpumask_test_cpu(cpu, tick_broadcast_mask); 228 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
229 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
227 break; 230 break;
228 default: 231 default:
229 /* Nothing to do */
230 ret = 0;
231 break; 232 break;
232 } 233 }
233 } 234 }
@@ -265,8 +266,22 @@ static bool tick_do_broadcast(struct cpumask *mask)
265 * Check, if the current cpu is in the mask 266 * Check, if the current cpu is in the mask
266 */ 267 */
267 if (cpumask_test_cpu(cpu, mask)) { 268 if (cpumask_test_cpu(cpu, mask)) {
269 struct clock_event_device *bc = tick_broadcast_device.evtdev;
270
268 cpumask_clear_cpu(cpu, mask); 271 cpumask_clear_cpu(cpu, mask);
269 local = true; 272 /*
273 * We only run the local handler, if the broadcast
274 * device is not hrtimer based. Otherwise we run into
275 * a hrtimer recursion.
276 *
277 * local timer_interrupt()
278 * local_handler()
279 * expire_hrtimers()
280 * bc_handler()
281 * local_handler()
282 * expire_hrtimers()
283 */
284 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
270 } 285 }
271 286
272 if (!cpumask_empty(mask)) { 287 if (!cpumask_empty(mask)) {
@@ -301,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
301 bool bc_local; 316 bool bc_local;
302 317
303 raw_spin_lock(&tick_broadcast_lock); 318 raw_spin_lock(&tick_broadcast_lock);
319
320 /* Handle spurious interrupts gracefully */
321 if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
322 raw_spin_unlock(&tick_broadcast_lock);
323 return;
324 }
325
304 bc_local = tick_do_periodic_broadcast(); 326 bc_local = tick_do_periodic_broadcast();
305 327
306 if (clockevent_state_oneshot(dev)) { 328 if (clockevent_state_oneshot(dev)) {
@@ -359,8 +381,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
359 case TICK_BROADCAST_ON: 381 case TICK_BROADCAST_ON:
360 cpumask_set_cpu(cpu, tick_broadcast_on); 382 cpumask_set_cpu(cpu, tick_broadcast_on);
361 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { 383 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
362 if (tick_broadcast_device.mode == 384 /*
363 TICKDEV_MODE_PERIODIC) 385 * Only shutdown the cpu local device, if:
386 *
387 * - the broadcast device exists
388 * - the broadcast device is not a hrtimer based one
389 * - the broadcast device is in periodic mode to
390 * avoid a hickup during switch to oneshot mode
391 */
392 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
393 tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
364 clockevents_shutdown(dev); 394 clockevents_shutdown(dev);
365 } 395 }
366 break; 396 break;
@@ -379,14 +409,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
379 break; 409 break;
380 } 410 }
381 411
382 if (cpumask_empty(tick_broadcast_mask)) { 412 if (bc) {
383 if (!bc_stopped) 413 if (cpumask_empty(tick_broadcast_mask)) {
384 clockevents_shutdown(bc); 414 if (!bc_stopped)
385 } else if (bc_stopped) { 415 clockevents_shutdown(bc);
386 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 416 } else if (bc_stopped) {
387 tick_broadcast_start_periodic(bc); 417 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
388 else 418 tick_broadcast_start_periodic(bc);
389 tick_broadcast_setup_oneshot(bc); 419 else
420 tick_broadcast_setup_oneshot(bc);
421 }
390 } 422 }
391 raw_spin_unlock(&tick_broadcast_lock); 423 raw_spin_unlock(&tick_broadcast_lock);
392} 424}
@@ -662,71 +694,82 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
662 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
663} 695}
664 696
665/** 697int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
666 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
667 * @state: The target state (enter/exit)
668 *
669 * The system enters/leaves a state, where affected devices might stop
670 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
671 *
672 * Called with interrupts disabled, so clockevents_lock is not
673 * required here because the local clock event device cannot go away
674 * under us.
675 */
676int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
677{ 698{
678 struct clock_event_device *bc, *dev; 699 struct clock_event_device *bc, *dev;
679 struct tick_device *td;
680 int cpu, ret = 0; 700 int cpu, ret = 0;
681 ktime_t now; 701 ktime_t now;
682 702
683 /* 703 /*
684 * Periodic mode does not care about the enter/exit of power 704 * If there is no broadcast device, tell the caller not to go
685 * states 705 * into deep idle.
686 */ 706 */
687 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 707 if (!tick_broadcast_device.evtdev)
688 return 0; 708 return -EBUSY;
689 709
690 /* 710 dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
691 * We are called with preemtion disabled from the depth of the
692 * idle code, so we can't be moved away.
693 */
694 td = this_cpu_ptr(&tick_cpu_device);
695 dev = td->evtdev;
696
697 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
698 return 0;
699 711
700 raw_spin_lock(&tick_broadcast_lock); 712 raw_spin_lock(&tick_broadcast_lock);
701 bc = tick_broadcast_device.evtdev; 713 bc = tick_broadcast_device.evtdev;
702 cpu = smp_processor_id(); 714 cpu = smp_processor_id();
703 715
704 if (state == TICK_BROADCAST_ENTER) { 716 if (state == TICK_BROADCAST_ENTER) {
717 /*
718 * If the current CPU owns the hrtimer broadcast
719 * mechanism, it cannot go deep idle and we do not add
720 * the CPU to the broadcast mask. We don't have to go
721 * through the EXIT path as the local timer is not
722 * shutdown.
723 */
724 ret = broadcast_needs_cpu(bc, cpu);
725 if (ret)
726 goto out;
727
728 /*
729 * If the broadcast device is in periodic mode, we
730 * return.
731 */
732 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
733 /* If it is a hrtimer based broadcast, return busy */
734 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
735 ret = -EBUSY;
736 goto out;
737 }
738
705 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 739 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
706 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 740 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
741
742 /* Conditionally shut down the local timer. */
707 broadcast_shutdown_local(bc, dev); 743 broadcast_shutdown_local(bc, dev);
744
708 /* 745 /*
709 * We only reprogram the broadcast timer if we 746 * We only reprogram the broadcast timer if we
710 * did not mark ourself in the force mask and 747 * did not mark ourself in the force mask and
711 * if the cpu local event is earlier than the 748 * if the cpu local event is earlier than the
712 * broadcast event. If the current CPU is in 749 * broadcast event. If the current CPU is in
713 * the force mask, then we are going to be 750 * the force mask, then we are going to be
714 * woken by the IPI right away. 751 * woken by the IPI right away; we return
752 * busy, so the CPU does not try to go deep
753 * idle.
715 */ 754 */
716 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
717 dev->next_event.tv64 < bc->next_event.tv64) 756 ret = -EBUSY;
757 } else if (dev->next_event.tv64 < bc->next_event.tv64) {
718 tick_broadcast_set_event(bc, cpu, dev->next_event); 758 tick_broadcast_set_event(bc, cpu, dev->next_event);
759 /*
760 * In case of hrtimer broadcasts the
761 * programming might have moved the
762 * timer to this cpu. If yes, remove
763 * us from the broadcast mask and
764 * return busy.
765 */
766 ret = broadcast_needs_cpu(bc, cpu);
767 if (ret) {
768 cpumask_clear_cpu(cpu,
769 tick_broadcast_oneshot_mask);
770 }
771 }
719 } 772 }
720 /*
721 * If the current CPU owns the hrtimer broadcast
722 * mechanism, it cannot go deep idle and we remove the
723 * CPU from the broadcast mask. We don't have to go
724 * through the EXIT path as the local timer is not
725 * shutdown.
726 */
727 ret = broadcast_needs_cpu(bc, cpu);
728 if (ret)
729 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
730 } else { 773 } else {
731 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 774 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
732 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); 775 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
@@ -938,6 +981,16 @@ bool tick_broadcast_oneshot_available(void)
938 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; 981 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
939} 982}
940 983
984#else
985int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
986{
987 struct clock_event_device *bc = tick_broadcast_device.evtdev;
988
989 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
990 return -EBUSY;
991
992 return 0;
993}
941#endif 994#endif
942 995
943void __init tick_broadcast_init(void) 996void __init tick_broadcast_init(void)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 76446cb5dfe1..55e13efff1ab 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -343,6 +343,27 @@ out_bc:
343 tick_install_broadcast_device(newdev); 343 tick_install_broadcast_device(newdev);
344} 344}
345 345
346/**
347 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
348 * @state: The target state (enter/exit)
349 *
350 * The system enters/leaves a state, where affected devices might stop
351 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
352 *
353 * Called with interrupts disabled, so clockevents_lock is not
354 * required here because the local clock event device cannot go away
355 * under us.
356 */
357int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
358{
359 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
360
361 if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
362 return 0;
363
364 return __tick_broadcast_oneshot_control(state);
365}
366
346#ifdef CONFIG_HOTPLUG_CPU 367#ifdef CONFIG_HOTPLUG_CPU
347/* 368/*
348 * Transfer the do_timer job away from a dying cpu. 369 * Transfer the do_timer job away from a dying cpu.
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 42fdf4958bcc..a4a8d4e9baa1 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu);
71static inline void tick_cancel_sched_timer(int cpu) { } 71static inline void tick_cancel_sched_timer(int cpu) { }
72#endif 72#endif
73 73
74#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
75extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
76#else
77static inline int
78__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
79{
80 return -EBUSY;
81}
82#endif
83
74#endif 84#endif
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 777eda7d1ab4..39f24d6721e5 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,10 +18,6 @@ config KASAN
18 For better error detection enable CONFIG_STACKTRACE, 18 For better error detection enable CONFIG_STACKTRACE,
19 and add slub_debug=U to boot cmdline. 19 and add slub_debug=U to boot cmdline.
20 20
21config KASAN_SHADOW_OFFSET
22 hex
23 default 0xdffffc0000000000 if X86_64
24
25choice 21choice
26 prompt "Instrumentation type" 22 prompt "Instrumentation type"
27 depends on KASAN 23 depends on KASAN
diff --git a/mm/memory.c b/mm/memory.c
index a84fbb772034..388dcf9aa283 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2670,6 +2670,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2670 2670
2671 pte_unmap(page_table); 2671 pte_unmap(page_table);
2672 2672
2673 /* File mapping without ->vm_ops ? */
2674 if (vma->vm_flags & VM_SHARED)
2675 return VM_FAULT_SIGBUS;
2676
2673 /* Check if we need to add a guard page to the stack */ 2677 /* Check if we need to add a guard page to the stack */
2674 if (check_stack_guard_page(vma, address) < 0) 2678 if (check_stack_guard_page(vma, address) < 0)
2675 return VM_FAULT_SIGSEGV; 2679 return VM_FAULT_SIGSEGV;
@@ -3099,6 +3103,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3099 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 3103 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3100 3104
3101 pte_unmap(page_table); 3105 pte_unmap(page_table);
3106 /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3107 if (!vma->vm_ops->fault)
3108 return VM_FAULT_SIGBUS;
3102 if (!(flags & FAULT_FLAG_WRITE)) 3109 if (!(flags & FAULT_FLAG_WRITE))
3103 return do_read_fault(mm, vma, address, pmd, pgoff, flags, 3110 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3104 orig_pte); 3111 orig_pte);
@@ -3244,13 +3251,12 @@ static int handle_pte_fault(struct mm_struct *mm,
3244 barrier(); 3251 barrier();
3245 if (!pte_present(entry)) { 3252 if (!pte_present(entry)) {
3246 if (pte_none(entry)) { 3253 if (pte_none(entry)) {
3247 if (vma->vm_ops) { 3254 if (vma->vm_ops)
3248 if (likely(vma->vm_ops->fault)) 3255 return do_fault(mm, vma, address, pte, pmd,
3249 return do_fault(mm, vma, address, pte, 3256 flags, entry);
3250 pmd, flags, entry); 3257
3251 } 3258 return do_anonymous_page(mm, vma, address, pte, pmd,
3252 return do_anonymous_page(mm, vma, address, 3259 flags);
3253 pte, pmd, flags);
3254 } 3260 }
3255 return do_swap_page(mm, vma, address, 3261 return do_swap_page(mm, vma, address,
3256 pte, pmd, flags, entry); 3262 pte, pmd, flags, entry);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cb7db320dd27..f30329f72641 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -9,6 +9,7 @@
9#include <keys/ceph-type.h> 9#include <keys/ceph-type.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/mount.h> 11#include <linux/mount.h>
12#include <linux/nsproxy.h>
12#include <linux/parser.h> 13#include <linux/parser.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/seq_file.h> 15#include <linux/seq_file.h>
@@ -16,8 +17,6 @@
16#include <linux/statfs.h> 17#include <linux/statfs.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19#include <linux/nsproxy.h>
20#include <net/net_namespace.h>
21 20
22 21
23#include <linux/ceph/ceph_features.h> 22#include <linux/ceph/ceph_features.h>
@@ -131,6 +130,13 @@ int ceph_compare_options(struct ceph_options *new_opt,
131 int i; 130 int i;
132 int ret; 131 int ret;
133 132
133 /*
134 * Don't bother comparing options if network namespaces don't
135 * match.
136 */
137 if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))
138 return -1;
139
134 ret = memcmp(opt1, opt2, ofs); 140 ret = memcmp(opt1, opt2, ofs);
135 if (ret) 141 if (ret)
136 return ret; 142 return ret;
@@ -335,9 +341,6 @@ ceph_parse_options(char *options, const char *dev_name,
335 int err = -ENOMEM; 341 int err = -ENOMEM;
336 substring_t argstr[MAX_OPT_ARGS]; 342 substring_t argstr[MAX_OPT_ARGS];
337 343
338 if (current->nsproxy->net_ns != &init_net)
339 return ERR_PTR(-EINVAL);
340
341 opt = kzalloc(sizeof(*opt), GFP_KERNEL); 344 opt = kzalloc(sizeof(*opt), GFP_KERNEL);
342 if (!opt) 345 if (!opt)
343 return ERR_PTR(-ENOMEM); 346 return ERR_PTR(-ENOMEM);
@@ -608,6 +611,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
608fail_monc: 611fail_monc:
609 ceph_monc_stop(&client->monc); 612 ceph_monc_stop(&client->monc);
610fail: 613fail:
614 ceph_messenger_fini(&client->msgr);
611 kfree(client); 615 kfree(client);
612 return ERR_PTR(err); 616 return ERR_PTR(err);
613} 617}
@@ -621,8 +625,8 @@ void ceph_destroy_client(struct ceph_client *client)
621 625
622 /* unmount */ 626 /* unmount */
623 ceph_osdc_stop(&client->osdc); 627 ceph_osdc_stop(&client->osdc);
624
625 ceph_monc_stop(&client->monc); 628 ceph_monc_stop(&client->monc);
629 ceph_messenger_fini(&client->msgr);
626 630
627 ceph_debugfs_client_cleanup(client); 631 ceph_debugfs_client_cleanup(client);
628 632
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1679f47280e2..e3be1d22a247 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -6,6 +6,7 @@
6#include <linux/inet.h> 6#include <linux/inet.h>
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -479,7 +480,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
479 int ret; 480 int ret;
480 481
481 BUG_ON(con->sock); 482 BUG_ON(con->sock);
482 ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family, 483 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
483 SOCK_STREAM, IPPROTO_TCP, &sock); 484 SOCK_STREAM, IPPROTO_TCP, &sock);
484 if (ret) 485 if (ret)
485 return ret; 486 return ret;
@@ -1731,17 +1732,17 @@ static int verify_hello(struct ceph_connection *con)
1731 1732
1732static bool addr_is_blank(struct sockaddr_storage *ss) 1733static bool addr_is_blank(struct sockaddr_storage *ss)
1733{ 1734{
1735 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1736 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1737
1734 switch (ss->ss_family) { 1738 switch (ss->ss_family) {
1735 case AF_INET: 1739 case AF_INET:
1736 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1740 return addr->s_addr == htonl(INADDR_ANY);
1737 case AF_INET6: 1741 case AF_INET6:
1738 return 1742 return ipv6_addr_any(addr6);
1739 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1743 default:
1740 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1744 return true;
1741 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1742 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1743 } 1745 }
1744 return false;
1745} 1746}
1746 1747
1747static int addr_port(struct sockaddr_storage *ss) 1748static int addr_port(struct sockaddr_storage *ss)
@@ -2944,11 +2945,18 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
2944 msgr->tcp_nodelay = tcp_nodelay; 2945 msgr->tcp_nodelay = tcp_nodelay;
2945 2946
2946 atomic_set(&msgr->stopping, 0); 2947 atomic_set(&msgr->stopping, 0);
2948 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
2947 2949
2948 dout("%s %p\n", __func__, msgr); 2950 dout("%s %p\n", __func__, msgr);
2949} 2951}
2950EXPORT_SYMBOL(ceph_messenger_init); 2952EXPORT_SYMBOL(ceph_messenger_init);
2951 2953
2954void ceph_messenger_fini(struct ceph_messenger *msgr)
2955{
2956 put_net(read_pnet(&msgr->net));
2957}
2958EXPORT_SYMBOL(ceph_messenger_fini);
2959
2952static void clear_standby(struct ceph_connection *con) 2960static void clear_standby(struct ceph_connection *con)
2953{ 2961{
2954 /* come back from STANDBY? */ 2962 /* come back from STANDBY? */
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index eff7de1fc82e..e70fcd12eeeb 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -63,6 +63,8 @@ int main(void)
63 63
64 DEVID(acpi_device_id); 64 DEVID(acpi_device_id);
65 DEVID_FIELD(acpi_device_id, id); 65 DEVID_FIELD(acpi_device_id, id);
66 DEVID_FIELD(acpi_device_id, cls);
67 DEVID_FIELD(acpi_device_id, cls_msk);
66 68
67 DEVID(pnp_device_id); 69 DEVID(pnp_device_id);
68 DEVID_FIELD(pnp_device_id, id); 70 DEVID_FIELD(pnp_device_id, id);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 84c86f3cd6cd..5f2088209132 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -523,12 +523,40 @@ static int do_serio_entry(const char *filename,
523} 523}
524ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry); 524ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
525 525
526/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */ 526/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
527 * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
528 *
529 * NOTE: Each driver should use one of the following : _HID, _CIDs
530 * or _CLS. Also, bb, ss, and pp can be substituted with ??
531 * as don't care byte.
532 */
527static int do_acpi_entry(const char *filename, 533static int do_acpi_entry(const char *filename,
528 void *symval, char *alias) 534 void *symval, char *alias)
529{ 535{
530 DEF_FIELD_ADDR(symval, acpi_device_id, id); 536 DEF_FIELD_ADDR(symval, acpi_device_id, id);
531 sprintf(alias, "acpi*:%s:*", *id); 537 DEF_FIELD_ADDR(symval, acpi_device_id, cls);
538 DEF_FIELD_ADDR(symval, acpi_device_id, cls_msk);
539
540 if (id && strlen((const char *)*id))
541 sprintf(alias, "acpi*:%s:*", *id);
542 else if (cls) {
543 int i, byte_shift, cnt = 0;
544 unsigned int msk;
545
546 sprintf(&alias[cnt], "acpi*:");
547 cnt = 6;
548 for (i = 1; i <= 3; i++) {
549 byte_shift = 8 * (3-i);
550 msk = (*cls_msk >> byte_shift) & 0xFF;
551 if (msk)
552 sprintf(&alias[cnt], "%02x",
553 (*cls >> byte_shift) & 0xFF);
554 else
555 sprintf(&alias[cnt], "??");
556 cnt += 2;
557 }
558 sprintf(&alias[cnt], ":*");
559 }
532 return 1; 560 return 1;
533} 561}
534ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry); 562ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 91ee1b2e0f9a..12d3db3bd46b 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -886,7 +886,8 @@ static void check_section(const char *modname, struct elf_info *elf,
886#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ 886#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
887 ".kprobes.text" 887 ".kprobes.text"
888#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ 888#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
889 ".fixup", ".entry.text", ".exception.text", ".text.*" 889 ".fixup", ".entry.text", ".exception.text", ".text.*", \
890 ".coldtext"
890 891
891#define INIT_SECTIONS ".init.*" 892#define INIT_SECTIONS ".init.*"
892#define MEM_INIT_SECTIONS ".meminit.*" 893#define MEM_INIT_SECTIONS ".meminit.*"
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 623108199641..564079c5c49d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3283,7 +3283,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
3283 int rc = 0; 3283 int rc = 0;
3284 3284
3285 if (default_noexec && 3285 if (default_noexec &&
3286 (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { 3286 (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
3287 (!shared && (prot & PROT_WRITE)))) {
3287 /* 3288 /*
3288 * We are making executable an anonymous mapping or a 3289 * We are making executable an anonymous mapping or a
3289 * private file mapping that will also be writable. 3290 * private file mapping that will also be writable.
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index afe6a269ec17..57644b1dc42e 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
153 if (offset == (u32)-1) 153 if (offset == (u32)-1)
154 return 0; 154 return 0;
155 155
156 /* don't waste ebitmap space if the netlabel bitmap is empty */
157 if (bitmap == 0) {
158 offset += EBITMAP_UNIT_SIZE;
159 continue;
160 }
161
156 if (e_iter == NULL || 162 if (e_iter == NULL ||
157 offset >= e_iter->startbit + EBITMAP_SIZE) { 163 offset >= e_iter->startbit + EBITMAP_SIZE) {
158 e_prev = e_iter; 164 e_prev = e_iter;
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index f0e72674c52d..9098083869c8 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -41,4 +41,62 @@
41 41
42#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 42#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
43 43
44#include <linux/types.h>
45
46static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
47{
48 switch (size) {
49 case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
50 case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
51 case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
52 case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
53 default:
54 barrier();
55 __builtin_memcpy((void *)res, (const void *)p, size);
56 barrier();
57 }
58}
59
60static __always_inline void __write_once_size(volatile void *p, void *res, int size)
61{
62 switch (size) {
63 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
64 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
65 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
66 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
67 default:
68 barrier();
69 __builtin_memcpy((void *)p, (const void *)res, size);
70 barrier();
71 }
72}
73
74/*
75 * Prevent the compiler from merging or refetching reads or writes. The
76 * compiler is also forbidden from reordering successive instances of
77 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
78 * compiler is aware of some particular ordering. One way to make the
79 * compiler aware of ordering is to put the two invocations of READ_ONCE,
80 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
81 *
82 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
83 * data types like structs or unions. If the size of the accessed data
84 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
85 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
86 * compile-time warning.
87 *
88 * Their two major use cases are: (1) Mediating communication between
89 * process-level code and irq/NMI handlers, all running on the same CPU,
90 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
91 * mutilate accesses that either do not require ordering or that interact
92 * with an explicit memory barrier or atomic instruction that provides the
93 * required ordering.
94 */
95
96#define READ_ONCE(x) \
97 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
98
99#define WRITE_ONCE(x, val) \
100 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
101
44#endif /* _TOOLS_LINUX_COMPILER_H */ 102#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
deleted file mode 100644
index d07e586b9ba0..000000000000
--- a/tools/include/linux/export.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _TOOLS_LINUX_EXPORT_H_
2#define _TOOLS_LINUX_EXPORT_H_
3
4#define EXPORT_SYMBOL(sym)
5#define EXPORT_SYMBOL_GPL(sym)
6#define EXPORT_SYMBOL_GPL_FUTURE(sym)
7#define EXPORT_UNUSED_SYMBOL(sym)
8#define EXPORT_UNUSED_SYMBOL_GPL(sym)
9
10#endif
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
new file mode 100644
index 000000000000..112582253dd0
--- /dev/null
+++ b/tools/include/linux/rbtree.h
@@ -0,0 +1,104 @@
1/*
2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
19 linux/include/linux/rbtree.h
20
21 To use rbtrees you'll have to implement your own insert and search cores.
22 This will avoid us to use callbacks and to drop drammatically performances.
23 I know it's not the cleaner way, but in C (not in C++) to get
24 performances and genericity...
25
26 See Documentation/rbtree.txt for documentation and samples.
27*/
28
29#ifndef __TOOLS_LINUX_PERF_RBTREE_H
30#define __TOOLS_LINUX_PERF_RBTREE_H
31
32#include <linux/kernel.h>
33#include <linux/stddef.h>
34
35struct rb_node {
36 unsigned long __rb_parent_color;
37 struct rb_node *rb_right;
38 struct rb_node *rb_left;
39} __attribute__((aligned(sizeof(long))));
40 /* The alignment might seem pointless, but allegedly CRIS needs it */
41
42struct rb_root {
43 struct rb_node *rb_node;
44};
45
46
47#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
48
49#define RB_ROOT (struct rb_root) { NULL, }
50#define rb_entry(ptr, type, member) container_of(ptr, type, member)
51
52#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
53
54/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
55#define RB_EMPTY_NODE(node) \
56 ((node)->__rb_parent_color == (unsigned long)(node))
57#define RB_CLEAR_NODE(node) \
58 ((node)->__rb_parent_color = (unsigned long)(node))
59
60
61extern void rb_insert_color(struct rb_node *, struct rb_root *);
62extern void rb_erase(struct rb_node *, struct rb_root *);
63
64
65/* Find logical next and previous nodes in a tree */
66extern struct rb_node *rb_next(const struct rb_node *);
67extern struct rb_node *rb_prev(const struct rb_node *);
68extern struct rb_node *rb_first(const struct rb_root *);
69extern struct rb_node *rb_last(const struct rb_root *);
70
71/* Postorder iteration - always visit the parent after its children */
72extern struct rb_node *rb_first_postorder(const struct rb_root *);
73extern struct rb_node *rb_next_postorder(const struct rb_node *);
74
75/* Fast replacement of a single node without remove/rebalance/add/rebalance */
76extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
77 struct rb_root *root);
78
79static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
80 struct rb_node **rb_link)
81{
82 node->__rb_parent_color = (unsigned long)parent;
83 node->rb_left = node->rb_right = NULL;
84
85 *rb_link = node;
86}
87
88#define rb_entry_safe(ptr, type, member) \
89 ({ typeof(ptr) ____ptr = (ptr); \
90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
91 })
92
93
94/*
95 * Handy for checking that we are not deleting an entry that is
96 * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
97 * probably should be moved to lib/rbtree.c...
98 */
99static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
100{
101 rb_erase(n, root);
102 RB_CLEAR_NODE(n);
103}
104#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
new file mode 100644
index 000000000000..43be941db695
--- /dev/null
+++ b/tools/include/linux/rbtree_augmented.h
@@ -0,0 +1,245 @@
1/*
2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4 (C) 2002 David Woodhouse <dwmw2@infradead.org>
5 (C) 2012 Michel Lespinasse <walken@google.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
21 tools/linux/include/linux/rbtree_augmented.h
22
23 Copied from:
24 linux/include/linux/rbtree_augmented.h
25*/
26
27#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
28#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
29
30#include <linux/compiler.h>
31#include <linux/rbtree.h>
32
33/*
34 * Please note - only struct rb_augment_callbacks and the prototypes for
35 * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
36 * The rest are implementation details you are not expected to depend on.
37 *
38 * See Documentation/rbtree.txt for documentation and samples.
39 */
40
41struct rb_augment_callbacks {
42 void (*propagate)(struct rb_node *node, struct rb_node *stop);
43 void (*copy)(struct rb_node *old, struct rb_node *new);
44 void (*rotate)(struct rb_node *old, struct rb_node *new);
45};
46
47extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
48 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
49/*
50 * Fixup the rbtree and update the augmented information when rebalancing.
51 *
52 * On insertion, the user must update the augmented information on the path
53 * leading to the inserted node, then call rb_link_node() as usual and
54 * rb_augment_inserted() instead of the usual rb_insert_color() call.
55 * If rb_augment_inserted() rebalances the rbtree, it will callback into
56 * a user provided function to update the augmented information on the
57 * affected subtrees.
58 */
59static inline void
60rb_insert_augmented(struct rb_node *node, struct rb_root *root,
61 const struct rb_augment_callbacks *augment)
62{
63 __rb_insert_augmented(node, root, augment->rotate);
64}
65
66#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
67 rbtype, rbaugmented, rbcompute) \
68static inline void \
69rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
70{ \
71 while (rb != stop) { \
72 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
73 rbtype augmented = rbcompute(node); \
74 if (node->rbaugmented == augmented) \
75 break; \
76 node->rbaugmented = augmented; \
77 rb = rb_parent(&node->rbfield); \
78 } \
79} \
80static inline void \
81rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
82{ \
83 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
84 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
85 new->rbaugmented = old->rbaugmented; \
86} \
87static void \
88rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
89{ \
90 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
91 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
92 new->rbaugmented = old->rbaugmented; \
93 old->rbaugmented = rbcompute(old); \
94} \
95rbstatic const struct rb_augment_callbacks rbname = { \
96 rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
97};
98
99
100#define RB_RED 0
101#define RB_BLACK 1
102
103#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
104
105#define __rb_color(pc) ((pc) & 1)
106#define __rb_is_black(pc) __rb_color(pc)
107#define __rb_is_red(pc) (!__rb_color(pc))
108#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
109#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
110#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
111
112static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
113{
114 rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
115}
116
117static inline void rb_set_parent_color(struct rb_node *rb,
118 struct rb_node *p, int color)
119{
120 rb->__rb_parent_color = (unsigned long)p | color;
121}
122
123static inline void
124__rb_change_child(struct rb_node *old, struct rb_node *new,
125 struct rb_node *parent, struct rb_root *root)
126{
127 if (parent) {
128 if (parent->rb_left == old)
129 parent->rb_left = new;
130 else
131 parent->rb_right = new;
132 } else
133 root->rb_node = new;
134}
135
136extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
137 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
138
139static __always_inline struct rb_node *
140__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
141 const struct rb_augment_callbacks *augment)
142{
143 struct rb_node *child = node->rb_right, *tmp = node->rb_left;
144 struct rb_node *parent, *rebalance;
145 unsigned long pc;
146
147 if (!tmp) {
148 /*
149 * Case 1: node to erase has no more than 1 child (easy!)
150 *
151 * Note that if there is one child it must be red due to 5)
152 * and node must be black due to 4). We adjust colors locally
153 * so as to bypass __rb_erase_color() later on.
154 */
155 pc = node->__rb_parent_color;
156 parent = __rb_parent(pc);
157 __rb_change_child(node, child, parent, root);
158 if (child) {
159 child->__rb_parent_color = pc;
160 rebalance = NULL;
161 } else
162 rebalance = __rb_is_black(pc) ? parent : NULL;
163 tmp = parent;
164 } else if (!child) {
165 /* Still case 1, but this time the child is node->rb_left */
166 tmp->__rb_parent_color = pc = node->__rb_parent_color;
167 parent = __rb_parent(pc);
168 __rb_change_child(node, tmp, parent, root);
169 rebalance = NULL;
170 tmp = parent;
171 } else {
172 struct rb_node *successor = child, *child2;
173 tmp = child->rb_left;
174 if (!tmp) {
175 /*
176 * Case 2: node's successor is its right child
177 *
178 * (n) (s)
179 * / \ / \
180 * (x) (s) -> (x) (c)
181 * \
182 * (c)
183 */
184 parent = successor;
185 child2 = successor->rb_right;
186 augment->copy(node, successor);
187 } else {
188 /*
189 * Case 3: node's successor is leftmost under
190 * node's right child subtree
191 *
192 * (n) (s)
193 * / \ / \
194 * (x) (y) -> (x) (y)
195 * / /
196 * (p) (p)
197 * / /
198 * (s) (c)
199 * \
200 * (c)
201 */
202 do {
203 parent = successor;
204 successor = tmp;
205 tmp = tmp->rb_left;
206 } while (tmp);
207 parent->rb_left = child2 = successor->rb_right;
208 successor->rb_right = child;
209 rb_set_parent(child, successor);
210 augment->copy(node, successor);
211 augment->propagate(parent, successor);
212 }
213
214 successor->rb_left = tmp = node->rb_left;
215 rb_set_parent(tmp, successor);
216
217 pc = node->__rb_parent_color;
218 tmp = __rb_parent(pc);
219 __rb_change_child(node, successor, tmp, root);
220 if (child2) {
221 successor->__rb_parent_color = pc;
222 rb_set_parent_color(child2, parent, RB_BLACK);
223 rebalance = NULL;
224 } else {
225 unsigned long pc2 = successor->__rb_parent_color;
226 successor->__rb_parent_color = pc;
227 rebalance = __rb_is_black(pc2) ? parent : NULL;
228 }
229 tmp = successor;
230 }
231
232 augment->propagate(tmp, NULL);
233 return rebalance;
234}
235
236static __always_inline void
237rb_erase_augmented(struct rb_node *node, struct rb_root *root,
238 const struct rb_augment_callbacks *augment)
239{
240 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
241 if (rebalance)
242 __rb_erase_color(rebalance, root, augment->rotate);
243}
244
245#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
new file mode 100644
index 000000000000..17c2b596f043
--- /dev/null
+++ b/tools/lib/rbtree.c
@@ -0,0 +1,548 @@
1/*
2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4 (C) 2002 David Woodhouse <dwmw2@infradead.org>
5 (C) 2012 Michel Lespinasse <walken@google.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
21 linux/lib/rbtree.c
22*/
23
24#include <linux/rbtree_augmented.h>
25
26/*
27 * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
28 *
29 * 1) A node is either red or black
30 * 2) The root is black
31 * 3) All leaves (NULL) are black
32 * 4) Both children of every red node are black
33 * 5) Every simple path from root to leaves contains the same number
34 * of black nodes.
35 *
36 * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
37 * consecutive red nodes in a path and every red node is therefore followed by
38 * a black. So if B is the number of black nodes on every simple path (as per
39 * 5), then the longest possible path due to 4 is 2B.
40 *
41 * We shall indicate color with case, where black nodes are uppercase and red
42 * nodes will be lowercase. Unknown color nodes shall be drawn as red within
43 * parentheses and have some accompanying text comment.
44 */
45
46static inline void rb_set_black(struct rb_node *rb)
47{
48 rb->__rb_parent_color |= RB_BLACK;
49}
50
51static inline struct rb_node *rb_red_parent(struct rb_node *red)
52{
53 return (struct rb_node *)red->__rb_parent_color;
54}
55
56/*
57 * Helper function for rotations:
58 * - old's parent and color get assigned to new
59 * - old gets assigned new as a parent and 'color' as a color.
60 */
61static inline void
62__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
63 struct rb_root *root, int color)
64{
65 struct rb_node *parent = rb_parent(old);
66 new->__rb_parent_color = old->__rb_parent_color;
67 rb_set_parent_color(old, new, color);
68 __rb_change_child(old, new, parent, root);
69}
70
71static __always_inline void
72__rb_insert(struct rb_node *node, struct rb_root *root,
73 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
74{
75 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
76
77 while (true) {
78 /*
79 * Loop invariant: node is red
80 *
81 * If there is a black parent, we are done.
82 * Otherwise, take some corrective action as we don't
83 * want a red root or two consecutive red nodes.
84 */
85 if (!parent) {
86 rb_set_parent_color(node, NULL, RB_BLACK);
87 break;
88 } else if (rb_is_black(parent))
89 break;
90
91 gparent = rb_red_parent(parent);
92
93 tmp = gparent->rb_right;
94 if (parent != tmp) { /* parent == gparent->rb_left */
95 if (tmp && rb_is_red(tmp)) {
96 /*
97 * Case 1 - color flips
98 *
99 * G g
100 * / \ / \
101 * p u --> P U
102 * / /
103 * n n
104 *
105 * However, since g's parent might be red, and
106 * 4) does not allow this, we need to recurse
107 * at g.
108 */
109 rb_set_parent_color(tmp, gparent, RB_BLACK);
110 rb_set_parent_color(parent, gparent, RB_BLACK);
111 node = gparent;
112 parent = rb_parent(node);
113 rb_set_parent_color(node, parent, RB_RED);
114 continue;
115 }
116
117 tmp = parent->rb_right;
118 if (node == tmp) {
119 /*
120 * Case 2 - left rotate at parent
121 *
122 * G G
123 * / \ / \
124 * p U --> n U
125 * \ /
126 * n p
127 *
128 * This still leaves us in violation of 4), the
129 * continuation into Case 3 will fix that.
130 */
131 parent->rb_right = tmp = node->rb_left;
132 node->rb_left = parent;
133 if (tmp)
134 rb_set_parent_color(tmp, parent,
135 RB_BLACK);
136 rb_set_parent_color(parent, node, RB_RED);
137 augment_rotate(parent, node);
138 parent = node;
139 tmp = node->rb_right;
140 }
141
142 /*
143 * Case 3 - right rotate at gparent
144 *
145 * G P
146 * / \ / \
147 * p U --> n g
148 * / \
149 * n U
150 */
151 gparent->rb_left = tmp; /* == parent->rb_right */
152 parent->rb_right = gparent;
153 if (tmp)
154 rb_set_parent_color(tmp, gparent, RB_BLACK);
155 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
156 augment_rotate(gparent, parent);
157 break;
158 } else {
159 tmp = gparent->rb_left;
160 if (tmp && rb_is_red(tmp)) {
161 /* Case 1 - color flips */
162 rb_set_parent_color(tmp, gparent, RB_BLACK);
163 rb_set_parent_color(parent, gparent, RB_BLACK);
164 node = gparent;
165 parent = rb_parent(node);
166 rb_set_parent_color(node, parent, RB_RED);
167 continue;
168 }
169
170 tmp = parent->rb_left;
171 if (node == tmp) {
172 /* Case 2 - right rotate at parent */
173 parent->rb_left = tmp = node->rb_right;
174 node->rb_right = parent;
175 if (tmp)
176 rb_set_parent_color(tmp, parent,
177 RB_BLACK);
178 rb_set_parent_color(parent, node, RB_RED);
179 augment_rotate(parent, node);
180 parent = node;
181 tmp = node->rb_left;
182 }
183
184 /* Case 3 - left rotate at gparent */
185 gparent->rb_right = tmp; /* == parent->rb_left */
186 parent->rb_left = gparent;
187 if (tmp)
188 rb_set_parent_color(tmp, gparent, RB_BLACK);
189 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
190 augment_rotate(gparent, parent);
191 break;
192 }
193 }
194}
195
196/*
197 * Inline version for rb_erase() use - we want to be able to inline
198 * and eliminate the dummy_rotate callback there
199 */
200static __always_inline void
201____rb_erase_color(struct rb_node *parent, struct rb_root *root,
202 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
203{
204 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
205
206 while (true) {
207 /*
208 * Loop invariants:
209 * - node is black (or NULL on first iteration)
210 * - node is not the root (parent is not NULL)
211 * - All leaf paths going through parent and node have a
212 * black node count that is 1 lower than other leaf paths.
213 */
214 sibling = parent->rb_right;
215 if (node != sibling) { /* node == parent->rb_left */
216 if (rb_is_red(sibling)) {
217 /*
218 * Case 1 - left rotate at parent
219 *
220 * P S
221 * / \ / \
222 * N s --> p Sr
223 * / \ / \
224 * Sl Sr N Sl
225 */
226 parent->rb_right = tmp1 = sibling->rb_left;
227 sibling->rb_left = parent;
228 rb_set_parent_color(tmp1, parent, RB_BLACK);
229 __rb_rotate_set_parents(parent, sibling, root,
230 RB_RED);
231 augment_rotate(parent, sibling);
232 sibling = tmp1;
233 }
234 tmp1 = sibling->rb_right;
235 if (!tmp1 || rb_is_black(tmp1)) {
236 tmp2 = sibling->rb_left;
237 if (!tmp2 || rb_is_black(tmp2)) {
238 /*
239 * Case 2 - sibling color flip
240 * (p could be either color here)
241 *
242 * (p) (p)
243 * / \ / \
244 * N S --> N s
245 * / \ / \
246 * Sl Sr Sl Sr
247 *
248 * This leaves us violating 5) which
249 * can be fixed by flipping p to black
250 * if it was red, or by recursing at p.
251 * p is red when coming from Case 1.
252 */
253 rb_set_parent_color(sibling, parent,
254 RB_RED);
255 if (rb_is_red(parent))
256 rb_set_black(parent);
257 else {
258 node = parent;
259 parent = rb_parent(node);
260 if (parent)
261 continue;
262 }
263 break;
264 }
265 /*
266 * Case 3 - right rotate at sibling
267 * (p could be either color here)
268 *
269 * (p) (p)
270 * / \ / \
271 * N S --> N Sl
272 * / \ \
273 * sl Sr s
274 * \
275 * Sr
276 */
277 sibling->rb_left = tmp1 = tmp2->rb_right;
278 tmp2->rb_right = sibling;
279 parent->rb_right = tmp2;
280 if (tmp1)
281 rb_set_parent_color(tmp1, sibling,
282 RB_BLACK);
283 augment_rotate(sibling, tmp2);
284 tmp1 = sibling;
285 sibling = tmp2;
286 }
287 /*
288 * Case 4 - left rotate at parent + color flips
289 * (p and sl could be either color here.
290 * After rotation, p becomes black, s acquires
291 * p's color, and sl keeps its color)
292 *
293 * (p) (s)
294 * / \ / \
295 * N S --> P Sr
296 * / \ / \
297 * (sl) sr N (sl)
298 */
299 parent->rb_right = tmp2 = sibling->rb_left;
300 sibling->rb_left = parent;
301 rb_set_parent_color(tmp1, sibling, RB_BLACK);
302 if (tmp2)
303 rb_set_parent(tmp2, parent);
304 __rb_rotate_set_parents(parent, sibling, root,
305 RB_BLACK);
306 augment_rotate(parent, sibling);
307 break;
308 } else {
309 sibling = parent->rb_left;
310 if (rb_is_red(sibling)) {
311 /* Case 1 - right rotate at parent */
312 parent->rb_left = tmp1 = sibling->rb_right;
313 sibling->rb_right = parent;
314 rb_set_parent_color(tmp1, parent, RB_BLACK);
315 __rb_rotate_set_parents(parent, sibling, root,
316 RB_RED);
317 augment_rotate(parent, sibling);
318 sibling = tmp1;
319 }
320 tmp1 = sibling->rb_left;
321 if (!tmp1 || rb_is_black(tmp1)) {
322 tmp2 = sibling->rb_right;
323 if (!tmp2 || rb_is_black(tmp2)) {
324 /* Case 2 - sibling color flip */
325 rb_set_parent_color(sibling, parent,
326 RB_RED);
327 if (rb_is_red(parent))
328 rb_set_black(parent);
329 else {
330 node = parent;
331 parent = rb_parent(node);
332 if (parent)
333 continue;
334 }
335 break;
336 }
337 /* Case 3 - right rotate at sibling */
338 sibling->rb_right = tmp1 = tmp2->rb_left;
339 tmp2->rb_left = sibling;
340 parent->rb_left = tmp2;
341 if (tmp1)
342 rb_set_parent_color(tmp1, sibling,
343 RB_BLACK);
344 augment_rotate(sibling, tmp2);
345 tmp1 = sibling;
346 sibling = tmp2;
347 }
348 /* Case 4 - left rotate at parent + color flips */
349 parent->rb_left = tmp2 = sibling->rb_right;
350 sibling->rb_right = parent;
351 rb_set_parent_color(tmp1, sibling, RB_BLACK);
352 if (tmp2)
353 rb_set_parent(tmp2, parent);
354 __rb_rotate_set_parents(parent, sibling, root,
355 RB_BLACK);
356 augment_rotate(parent, sibling);
357 break;
358 }
359 }
360}
361
362/* Non-inline version for rb_erase_augmented() use */
363void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
364 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
365{
366 ____rb_erase_color(parent, root, augment_rotate);
367}
368
369/*
370 * Non-augmented rbtree manipulation functions.
371 *
372 * We use dummy augmented callbacks here, and have the compiler optimize them
373 * out of the rb_insert_color() and rb_erase() function definitions.
374 */
375
376static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
377static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
378static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
379
380static const struct rb_augment_callbacks dummy_callbacks = {
381 dummy_propagate, dummy_copy, dummy_rotate
382};
383
384void rb_insert_color(struct rb_node *node, struct rb_root *root)
385{
386 __rb_insert(node, root, dummy_rotate);
387}
388
389void rb_erase(struct rb_node *node, struct rb_root *root)
390{
391 struct rb_node *rebalance;
392 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
393 if (rebalance)
394 ____rb_erase_color(rebalance, root, dummy_rotate);
395}
396
397/*
398 * Augmented rbtree manipulation functions.
399 *
400 * This instantiates the same __always_inline functions as in the non-augmented
401 * case, but this time with user-defined callbacks.
402 */
403
404void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
405 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
406{
407 __rb_insert(node, root, augment_rotate);
408}
409
410/*
411 * This function returns the first node (in sort order) of the tree.
412 */
413struct rb_node *rb_first(const struct rb_root *root)
414{
415 struct rb_node *n;
416
417 n = root->rb_node;
418 if (!n)
419 return NULL;
420 while (n->rb_left)
421 n = n->rb_left;
422 return n;
423}
424
425struct rb_node *rb_last(const struct rb_root *root)
426{
427 struct rb_node *n;
428
429 n = root->rb_node;
430 if (!n)
431 return NULL;
432 while (n->rb_right)
433 n = n->rb_right;
434 return n;
435}
436
437struct rb_node *rb_next(const struct rb_node *node)
438{
439 struct rb_node *parent;
440
441 if (RB_EMPTY_NODE(node))
442 return NULL;
443
444 /*
445 * If we have a right-hand child, go down and then left as far
446 * as we can.
447 */
448 if (node->rb_right) {
449 node = node->rb_right;
450 while (node->rb_left)
451 node=node->rb_left;
452 return (struct rb_node *)node;
453 }
454
455 /*
456 * No right-hand children. Everything down and left is smaller than us,
457 * so any 'next' node must be in the general direction of our parent.
458 * Go up the tree; any time the ancestor is a right-hand child of its
459 * parent, keep going up. First time it's a left-hand child of its
460 * parent, said parent is our 'next' node.
461 */
462 while ((parent = rb_parent(node)) && node == parent->rb_right)
463 node = parent;
464
465 return parent;
466}
467
468struct rb_node *rb_prev(const struct rb_node *node)
469{
470 struct rb_node *parent;
471
472 if (RB_EMPTY_NODE(node))
473 return NULL;
474
475 /*
476 * If we have a left-hand child, go down and then right as far
477 * as we can.
478 */
479 if (node->rb_left) {
480 node = node->rb_left;
481 while (node->rb_right)
482 node=node->rb_right;
483 return (struct rb_node *)node;
484 }
485
486 /*
487 * No left-hand children. Go up till we find an ancestor which
488 * is a right-hand child of its parent.
489 */
490 while ((parent = rb_parent(node)) && node == parent->rb_left)
491 node = parent;
492
493 return parent;
494}
495
496void rb_replace_node(struct rb_node *victim, struct rb_node *new,
497 struct rb_root *root)
498{
499 struct rb_node *parent = rb_parent(victim);
500
501 /* Set the surrounding nodes to point to the replacement */
502 __rb_change_child(victim, new, parent, root);
503 if (victim->rb_left)
504 rb_set_parent(victim->rb_left, new);
505 if (victim->rb_right)
506 rb_set_parent(victim->rb_right, new);
507
508 /* Copy the pointers/colour from the victim to the replacement */
509 *new = *victim;
510}
511
512static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
513{
514 for (;;) {
515 if (node->rb_left)
516 node = node->rb_left;
517 else if (node->rb_right)
518 node = node->rb_right;
519 else
520 return (struct rb_node *)node;
521 }
522}
523
524struct rb_node *rb_next_postorder(const struct rb_node *node)
525{
526 const struct rb_node *parent;
527 if (!node)
528 return NULL;
529 parent = rb_parent(node);
530
531 /* If we're sitting on node, we've already seen our children */
532 if (parent && node == parent->rb_left && parent->rb_right) {
533 /* If we are the parent's left node, go to the parent's right
534 * node then all the way down to the left */
535 return rb_left_deepest_node(parent->rb_right);
536 } else
537 /* Otherwise we are the parent's right node, and the parent
538 * should be next */
539 return (struct rb_node *)parent;
540}
541
542struct rb_node *rb_first_postorder(const struct rb_root *root)
543{
544 if (!root->rb_node)
545 return NULL;
546
547 return rb_left_deepest_node(root->rb_node);
548}
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index fe50a1b34aa0..09dc0aabb515 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
18tools/arch/x86/include/asm/rmwcc.h 18tools/arch/x86/include/asm/rmwcc.h
19tools/lib/traceevent 19tools/lib/traceevent
20tools/lib/api 20tools/lib/api
21tools/lib/rbtree.c
21tools/lib/symbol/kallsyms.c 22tools/lib/symbol/kallsyms.c
22tools/lib/symbol/kallsyms.h 23tools/lib/symbol/kallsyms.h
23tools/lib/util/find_next_bit.c 24tools/lib/util/find_next_bit.c
@@ -44,6 +45,8 @@ tools/include/linux/kernel.h
44tools/include/linux/list.h 45tools/include/linux/list.h
45tools/include/linux/log2.h 46tools/include/linux/log2.h
46tools/include/linux/poison.h 47tools/include/linux/poison.h
48tools/include/linux/rbtree.h
49tools/include/linux/rbtree_augmented.h
47tools/include/linux/types.h 50tools/include/linux/types.h
48include/asm-generic/bitops/arch_hweight.h 51include/asm-generic/bitops/arch_hweight.h
49include/asm-generic/bitops/const_hweight.h 52include/asm-generic/bitops/const_hweight.h
@@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h
51include/asm-generic/bitops/__fls.h 54include/asm-generic/bitops/__fls.h
52include/asm-generic/bitops/fls.h 55include/asm-generic/bitops/fls.h
53include/linux/perf_event.h 56include/linux/perf_event.h
54include/linux/rbtree.h
55include/linux/list.h 57include/linux/list.h
56include/linux/hash.h 58include/linux/hash.h
57include/linux/stringify.h 59include/linux/stringify.h
58lib/hweight.c 60lib/hweight.c
59lib/rbtree.c
60include/linux/swab.h 61include/linux/swab.h
61arch/*/include/asm/unistd*.h 62arch/*/include/asm/unistd*.h
62arch/*/include/uapi/asm/unistd*.h 63arch/*/include/uapi/asm/unistd*.h
@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S
65arch/*/lib/memset*.S 66arch/*/lib/memset*.S
66include/linux/poison.h 67include/linux/poison.h
67include/linux/hw_breakpoint.h 68include/linux/hw_breakpoint.h
68include/linux/rbtree_augmented.h
69include/uapi/linux/perf_event.h 69include/uapi/linux/perf_event.h
70include/uapi/linux/const.h 70include/uapi/linux/const.h
71include/uapi/linux/swab.h 71include/uapi/linux/swab.h
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 586a59d46022..601d11440596 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
139 $(call rule_mkdir) 139 $(call rule_mkdir)
140 $(call if_changed_dep,cc_o_c) 140 $(call if_changed_dep,cc_o_c)
141 141
142$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE 142$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
143 $(call rule_mkdir) 143 $(call rule_mkdir)
144 $(call if_changed_dep,cc_o_c) 144 $(call if_changed_dep,cc_o_c)
145 145
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
deleted file mode 100644
index f06d89f0b867..000000000000
--- a/tools/perf/util/include/linux/rbtree.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __TOOLS_LINUX_PERF_RBTREE_H
2#define __TOOLS_LINUX_PERF_RBTREE_H
3#include <stdbool.h>
4#include "../../../../include/linux/rbtree.h"
5
6/*
7 * Handy for checking that we are not deleting an entry that is
8 * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
9 * probably should be moved to lib/rbtree.c...
10 */
11static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
12{
13 rb_erase(n, root);
14 RB_CLEAR_NODE(n);
15}
16#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
deleted file mode 100644
index 9d6fcdf1788b..000000000000
--- a/tools/perf/util/include/linux/rbtree_augmented.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <stdbool.h>
2#include "../../../../include/linux/rbtree_augmented.h"
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 8e9b64520ec1..f56914c7929b 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -1,3 +1,6 @@
1ldflags-y += --wrap=ioremap_wt
2ldflags-y += --wrap=ioremap_wc
3ldflags-y += --wrap=devm_ioremap_nocache
1ldflags-y += --wrap=ioremap_cache 4ldflags-y += --wrap=ioremap_cache
2ldflags-y += --wrap=ioremap_nocache 5ldflags-y += --wrap=ioremap_nocache
3ldflags-y += --wrap=iounmap 6ldflags-y += --wrap=iounmap
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c85a6f6ba559..64bfaa50831c 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -65,6 +65,21 @@ void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
65 return fallback_fn(offset, size); 65 return fallback_fn(offset, size);
66} 66}
67 67
68void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
69 resource_size_t offset, unsigned long size)
70{
71 struct nfit_test_resource *nfit_res;
72
73 rcu_read_lock();
74 nfit_res = get_nfit_res(offset);
75 rcu_read_unlock();
76 if (nfit_res)
77 return (void __iomem *) nfit_res->buf + offset
78 - nfit_res->res->start;
79 return devm_ioremap_nocache(dev, offset, size);
80}
81EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
82
68void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size) 83void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size)
69{ 84{
70 return __nfit_test_ioremap(offset, size, ioremap_cache); 85 return __nfit_test_ioremap(offset, size, ioremap_cache);
@@ -77,6 +92,18 @@ void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
77} 92}
78EXPORT_SYMBOL(__wrap_ioremap_nocache); 93EXPORT_SYMBOL(__wrap_ioremap_nocache);
79 94
95void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size)
96{
97 return __nfit_test_ioremap(offset, size, ioremap_wt);
98}
99EXPORT_SYMBOL(__wrap_ioremap_wt);
100
101void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
102{
103 return __nfit_test_ioremap(offset, size, ioremap_wc);
104}
105EXPORT_SYMBOL(__wrap_ioremap_wc);
106
80void __wrap_iounmap(volatile void __iomem *addr) 107void __wrap_iounmap(volatile void __iomem *addr)
81{ 108{
82 struct nfit_test_resource *nfit_res; 109 struct nfit_test_resource *nfit_res;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 4b69b8368de0..d0bdae40ccc9 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -128,6 +128,8 @@ struct nfit_test {
128 int num_pm; 128 int num_pm;
129 void **dimm; 129 void **dimm;
130 dma_addr_t *dimm_dma; 130 dma_addr_t *dimm_dma;
131 void **flush;
132 dma_addr_t *flush_dma;
131 void **label; 133 void **label;
132 dma_addr_t *label_dma; 134 dma_addr_t *label_dma;
133 void **spa_set; 135 void **spa_set;
@@ -155,7 +157,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
155 int i, rc; 157 int i, rc;
156 158
157 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) 159 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
158 return -ENXIO; 160 return -ENOTTY;
159 161
160 /* lookup label space for the given dimm */ 162 /* lookup label space for the given dimm */
161 for (i = 0; i < ARRAY_SIZE(handle); i++) 163 for (i = 0; i < ARRAY_SIZE(handle); i++)
@@ -331,7 +333,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
331 + sizeof(struct acpi_nfit_system_address) * NUM_SPA 333 + sizeof(struct acpi_nfit_system_address) * NUM_SPA
332 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 334 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
333 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 335 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
334 + sizeof(struct acpi_nfit_data_region) * NUM_BDW; 336 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
337 + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
335 int i; 338 int i;
336 339
337 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 340 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -356,6 +359,10 @@ static int nfit_test0_alloc(struct nfit_test *t)
356 if (!t->label[i]) 359 if (!t->label[i])
357 return -ENOMEM; 360 return -ENOMEM;
358 sprintf(t->label[i], "label%d", i); 361 sprintf(t->label[i], "label%d", i);
362
363 t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
364 if (!t->flush[i])
365 return -ENOMEM;
359 } 366 }
360 367
361 for (i = 0; i < NUM_DCR; i++) { 368 for (i = 0; i < NUM_DCR; i++) {
@@ -408,6 +415,7 @@ static void nfit_test0_setup(struct nfit_test *t)
408 struct acpi_nfit_system_address *spa; 415 struct acpi_nfit_system_address *spa;
409 struct acpi_nfit_control_region *dcr; 416 struct acpi_nfit_control_region *dcr;
410 struct acpi_nfit_data_region *bdw; 417 struct acpi_nfit_data_region *bdw;
418 struct acpi_nfit_flush_address *flush;
411 unsigned int offset; 419 unsigned int offset;
412 420
413 nfit_test_init_header(nfit_buf, size); 421 nfit_test_init_header(nfit_buf, size);
@@ -831,6 +839,39 @@ static void nfit_test0_setup(struct nfit_test *t)
831 bdw->capacity = DIMM_SIZE; 839 bdw->capacity = DIMM_SIZE;
832 bdw->start_address = 0; 840 bdw->start_address = 0;
833 841
842 offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
843 /* flush0 (dimm0) */
844 flush = nfit_buf + offset;
845 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
846 flush->header.length = sizeof(struct acpi_nfit_flush_address);
847 flush->device_handle = handle[0];
848 flush->hint_count = 1;
849 flush->hint_address[0] = t->flush_dma[0];
850
851 /* flush1 (dimm1) */
852 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
853 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
854 flush->header.length = sizeof(struct acpi_nfit_flush_address);
855 flush->device_handle = handle[1];
856 flush->hint_count = 1;
857 flush->hint_address[0] = t->flush_dma[1];
858
859 /* flush2 (dimm2) */
860 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
861 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
862 flush->header.length = sizeof(struct acpi_nfit_flush_address);
863 flush->device_handle = handle[2];
864 flush->hint_count = 1;
865 flush->hint_address[0] = t->flush_dma[2];
866
867 /* flush3 (dimm3) */
868 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
869 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
870 flush->header.length = sizeof(struct acpi_nfit_flush_address);
871 flush->device_handle = handle[3];
872 flush->hint_count = 1;
873 flush->hint_address[0] = t->flush_dma[3];
874
834 acpi_desc = &t->acpi_desc; 875 acpi_desc = &t->acpi_desc;
835 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); 876 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
836 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 877 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
@@ -933,6 +974,10 @@ static int nfit_test_probe(struct platform_device *pdev)
933 GFP_KERNEL); 974 GFP_KERNEL);
934 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 975 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
935 GFP_KERNEL); 976 GFP_KERNEL);
977 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
978 GFP_KERNEL);
979 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
980 GFP_KERNEL);
936 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 981 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
937 GFP_KERNEL); 982 GFP_KERNEL);
938 nfit_test->label_dma = devm_kcalloc(dev, num, 983 nfit_test->label_dma = devm_kcalloc(dev, num,
@@ -943,7 +988,8 @@ static int nfit_test_probe(struct platform_device *pdev)
943 sizeof(dma_addr_t), GFP_KERNEL); 988 sizeof(dma_addr_t), GFP_KERNEL);
944 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 989 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
945 && nfit_test->label_dma && nfit_test->dcr 990 && nfit_test->label_dma && nfit_test->dcr
946 && nfit_test->dcr_dma) 991 && nfit_test->dcr_dma && nfit_test->flush
992 && nfit_test->flush_dma)
947 /* pass */; 993 /* pass */;
948 else 994 else
949 return -ENOMEM; 995 return -ENOMEM;