aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/x86/tlb.txt2
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi13
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts3
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h26
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts7
-rw-r--r--arch/arm/boot/dts/rk3066a-bqcurie2.dts2
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30-apalis.dtsi11
-rw-r--r--arch/arm/boot/dts/tegra30-colibri.dtsi11
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts2
-rw-r--r--arch/arm/mach-imx/Kconfig2
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c10
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S2
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/mips/alchemy/devboards/db1200.c6
-rw-r--r--arch/mips/bcm47xx/setup.c13
-rw-r--r--arch/mips/cavium-octeon/setup.c19
-rw-r--r--arch/mips/include/asm/eva.h43
-rw-r--r--arch/mips/include/asm/gic.h2
-rw-r--r--arch/mips/include/asm/irq.h2
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h22
-rw-r--r--arch/mips/include/asm/mach-netlogic/topology.h7
-rw-r--r--arch/mips/include/asm/pgtable.h8
-rw-r--r--arch/mips/include/asm/syscall.h8
-rw-r--r--arch/mips/kernel/cps-vec.S4
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/scall64-o32.S12
-rw-r--r--arch/mips/loongson/loongson-3/cop2-ex.c8
-rw-r--r--arch/mips/loongson/loongson-3/numa.c2
-rw-r--r--arch/mips/mm/cache.c27
-rw-r--r--arch/mips/mti-malta/malta-memory.c14
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq.c2
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/mm/tlb.c10
-rw-r--r--drivers/bus/arm-ccn.c2
-rw-r--r--drivers/firmware/efi/vars.c8
-rw-r--r--drivers/gpio/devres.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c18
-rw-r--r--drivers/gpio/gpio-zynq.c36
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c39
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c33
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c7
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/cik.c43
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c11
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c8
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c10
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c19
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c4
-rw-r--r--drivers/sh/Makefile3
-rw-r--r--drivers/sh/intc/Kconfig6
-rw-r--r--fs/aio.c77
-rw-r--r--include/drm/drm_pciids.h7
-rw-r--r--include/linux/ftrace.h14
-rw-r--r--include/linux/gpio/consumer.h4
-rw-r--r--include/uapi/drm/radeon_drm.h1
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/kprobes.c13
-rw-r--r--kernel/trace/ftrace.c246
97 files changed, 899 insertions, 380 deletions
diff --git a/Documentation/x86/tlb.txt b/Documentation/x86/tlb.txt
index 2b3a82e69151..39d172326703 100644
--- a/Documentation/x86/tlb.txt
+++ b/Documentation/x86/tlb.txt
@@ -35,7 +35,7 @@ invlpg instruction (or instructions _near_ it) show up high in
35profiles. If you believe that individual invalidations being 35profiles. If you believe that individual invalidations being
36called too often, you can lower the tunable: 36called too often, you can lower the tunable:
37 37
38 /sys/debug/kernel/x86/tlb_single_page_flush_ceiling 38 /sys/kernel/debug/x86/tlb_single_page_flush_ceiling
39 39
40This will cause us to do the global flush for more cases. 40This will cause us to do the global flush for more cases.
41Lowering it to 0 will disable the use of the individual flushes. 41Lowering it to 0 will disable the use of the individual flushes.
diff --git a/MAINTAINERS b/MAINTAINERS
index f01f54f27750..1ff06dee651d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1277,6 +1277,7 @@ F: drivers/scsi/arm/
1277ARM/Rockchip SoC support 1277ARM/Rockchip SoC support
1278M: Heiko Stuebner <heiko@sntech.de> 1278M: Heiko Stuebner <heiko@sntech.de>
1279L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1279L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1280L: linux-rockchip@lists.infradead.org
1280S: Maintained 1281S: Maintained
1281F: arch/arm/mach-rockchip/ 1282F: arch/arm/mach-rockchip/
1282F: drivers/*/*rockchip* 1283F: drivers/*/*rockchip*
@@ -2065,7 +2066,7 @@ S: Supported
2065F: drivers/scsi/bnx2i/ 2066F: drivers/scsi/bnx2i/
2066 2067
2067BROADCOM KONA GPIO DRIVER 2068BROADCOM KONA GPIO DRIVER
2068M: Markus Mayer <markus.mayer@linaro.org> 2069M: Ray Jui <rjui@broadcom.com>
2069L: bcm-kernel-feedback-list@broadcom.com 2070L: bcm-kernel-feedback-list@broadcom.com
2070S: Supported 2071S: Supported
2071F: drivers/gpio/gpio-bcm-kona.c 2072F: drivers/gpio/gpio-bcm-kona.c
@@ -3121,6 +3122,17 @@ F: include/linux/host1x.h
3121F: include/uapi/drm/tegra_drm.h 3122F: include/uapi/drm/tegra_drm.h
3122F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt 3123F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
3123 3124
3125DRM DRIVERS FOR RENESAS
3126M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
3127L: dri-devel@lists.freedesktop.org
3128L: linux-sh@vger.kernel.org
3129T: git git://people.freedesktop.org/~airlied/linux
3130S: Supported
3131F: drivers/gpu/drm/rcar-du/
3132F: drivers/gpu/drm/shmobile/
3133F: include/linux/platform_data/rcar-du.h
3134F: include/linux/platform_data/shmob_drm.h
3135
3124DSBR100 USB FM RADIO DRIVER 3136DSBR100 USB FM RADIO DRIVER
3125M: Alexey Klimov <klimov.linux@gmail.com> 3137M: Alexey Klimov <klimov.linux@gmail.com>
3126L: linux-media@vger.kernel.org 3138L: linux-media@vger.kernel.org
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 6d6d23c83d30..adadaf97ac01 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -134,6 +134,8 @@
134 i2c@13860000 { 134 i2c@13860000 {
135 pinctrl-0 = <&i2c0_bus>; 135 pinctrl-0 = <&i2c0_bus>;
136 pinctrl-names = "default"; 136 pinctrl-names = "default";
137 samsung,i2c-sda-delay = <100>;
138 samsung,i2c-max-bus-freq = <400000>;
137 status = "okay"; 139 status = "okay";
138 140
139 usb3503: usb3503@08 { 141 usb3503: usb3503@08 {
@@ -148,6 +150,10 @@
148 150
149 max77686: pmic@09 { 151 max77686: pmic@09 {
150 compatible = "maxim,max77686"; 152 compatible = "maxim,max77686";
153 interrupt-parent = <&gpx3>;
154 interrupts = <2 0>;
155 pinctrl-names = "default";
156 pinctrl-0 = <&max77686_irq>;
151 reg = <0x09>; 157 reg = <0x09>;
152 #clock-cells = <1>; 158 #clock-cells = <1>;
153 159
@@ -368,4 +374,11 @@
368 samsung,pins = "gpx1-3"; 374 samsung,pins = "gpx1-3";
369 samsung,pin-pud = <0>; 375 samsung,pin-pud = <0>;
370 }; 376 };
377
378 max77686_irq: max77686-irq {
379 samsung,pins = "gpx3-2";
380 samsung,pin-function = <0>;
381 samsung,pin-pud = <0>;
382 samsung,pin-drv = <0>;
383 };
371}; 384};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 64fa27b36be0..c6c58c1c00e3 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -731,7 +731,7 @@
731 compatible = "fsl,imx53-vpu"; 731 compatible = "fsl,imx53-vpu";
732 reg = <0x63ff4000 0x1000>; 732 reg = <0x63ff4000 0x1000>;
733 interrupts = <9>; 733 interrupts = <9>;
734 clocks = <&clks IMX5_CLK_VPU_GATE>, 734 clocks = <&clks IMX5_CLK_VPU_REFERENCE_GATE>,
735 <&clks IMX5_CLK_VPU_GATE>; 735 <&clks IMX5_CLK_VPU_GATE>;
736 clock-names = "per", "ahb"; 736 clock-names = "per", "ahb";
737 resets = <&src 1>; 737 resets = <&src 1>;
diff --git a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
index 8c1cb53464a0..4fa254347798 100644
--- a/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
+++ b/arch/arm/boot/dts/imx6q-dmo-edmqmx6.dts
@@ -119,7 +119,7 @@
119 pinctrl-names = "default"; 119 pinctrl-names = "default";
120 pinctrl-0 = <&pinctrl_enet>; 120 pinctrl-0 = <&pinctrl_enet>;
121 phy-mode = "rgmii"; 121 phy-mode = "rgmii";
122 phy-reset-gpios = <&gpio3 23 0>; 122 phy-reset-gpios = <&gpio1 25 0>;
123 phy-supply = <&vgen2_1v2_eth>; 123 phy-supply = <&vgen2_1v2_eth>;
124 status = "okay"; 124 status = "okay";
125}; 125};
@@ -339,6 +339,7 @@
339 MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0 339 MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0
340 MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 340 MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0
341 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 341 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0
342 MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0
342 MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 343 MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
343 >; 344 >;
344 }; 345 };
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index 3e0b816dac08..bb9c6b78cb97 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -78,7 +78,7 @@
78#define MX6SX_PAD_GPIO1_IO07__USDHC2_WP 0x0030 0x0378 0x0870 0x1 0x1 78#define MX6SX_PAD_GPIO1_IO07__USDHC2_WP 0x0030 0x0378 0x0870 0x1 0x1
79#define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO 0x0030 0x0378 0x0770 0x2 0x0 79#define MX6SX_PAD_GPIO1_IO07__ENET2_MDIO 0x0030 0x0378 0x0770 0x2 0x0
80#define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK 0x0030 0x0378 0x0000 0x3 0x0 80#define MX6SX_PAD_GPIO1_IO07__AUDMUX_MCLK 0x0030 0x0378 0x0000 0x3 0x0
81#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B 0x0030 0x0378 0x082C 0x4 0x1 81#define MX6SX_PAD_GPIO1_IO07__UART1_CTS_B 0x0030 0x0378 0x0000 0x4 0x0
82#define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7 0x0030 0x0378 0x0000 0x5 0x0 82#define MX6SX_PAD_GPIO1_IO07__GPIO1_IO_7 0x0030 0x0378 0x0000 0x5 0x0
83#define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET 0x0030 0x0378 0x0000 0x6 0x0 83#define MX6SX_PAD_GPIO1_IO07__SRC_EARLY_RESET 0x0030 0x0378 0x0000 0x6 0x0
84#define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT 0x0030 0x0378 0x0000 0x7 0x0 84#define MX6SX_PAD_GPIO1_IO07__DCIC2_OUT 0x0030 0x0378 0x0000 0x7 0x0
@@ -96,7 +96,7 @@
96#define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B 0x0038 0x0380 0x0000 0x1 0x0 96#define MX6SX_PAD_GPIO1_IO09__WDOG2_WDOG_B 0x0038 0x0380 0x0000 0x1 0x0
97#define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1 0x0038 0x0380 0x0820 0x2 0x0 97#define MX6SX_PAD_GPIO1_IO09__SDMA_EXT_EVENT_1 0x0038 0x0380 0x0820 0x2 0x0
98#define MX6SX_PAD_GPIO1_IO09__CCM_OUT0 0x0038 0x0380 0x0000 0x3 0x0 98#define MX6SX_PAD_GPIO1_IO09__CCM_OUT0 0x0038 0x0380 0x0000 0x3 0x0
99#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B 0x0038 0x0380 0x0834 0x4 0x1 99#define MX6SX_PAD_GPIO1_IO09__UART2_CTS_B 0x0038 0x0380 0x0000 0x4 0x0
100#define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x0038 0x0380 0x0000 0x5 0x0 100#define MX6SX_PAD_GPIO1_IO09__GPIO1_IO_9 0x0038 0x0380 0x0000 0x5 0x0
101#define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT 0x0038 0x0380 0x0000 0x6 0x0 101#define MX6SX_PAD_GPIO1_IO09__SRC_INT_BOOT 0x0038 0x0380 0x0000 0x6 0x0
102#define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4 0x0038 0x0380 0x0000 0x7 0x0 102#define MX6SX_PAD_GPIO1_IO09__OBSERVE_MUX_OUT_4 0x0038 0x0380 0x0000 0x7 0x0
@@ -213,7 +213,7 @@
213#define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2 0x0068 0x03B0 0x079C 0x1 0x1 213#define MX6SX_PAD_CSI_DATA07__ESAI_TX3_RX2 0x0068 0x03B0 0x079C 0x1 0x1
214#define MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x0068 0x03B0 0x07C4 0x2 0x2 214#define MX6SX_PAD_CSI_DATA07__I2C4_SDA 0x0068 0x03B0 0x07C4 0x2 0x2
215#define MX6SX_PAD_CSI_DATA07__KPP_ROW_7 0x0068 0x03B0 0x07DC 0x3 0x0 215#define MX6SX_PAD_CSI_DATA07__KPP_ROW_7 0x0068 0x03B0 0x07DC 0x3 0x0
216#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B 0x0068 0x03B0 0x0854 0x4 0x1 216#define MX6SX_PAD_CSI_DATA07__UART6_CTS_B 0x0068 0x03B0 0x0000 0x4 0x0
217#define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21 0x0068 0x03B0 0x0000 0x5 0x0 217#define MX6SX_PAD_CSI_DATA07__GPIO1_IO_21 0x0068 0x03B0 0x0000 0x5 0x0
218#define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16 0x0068 0x03B0 0x0000 0x6 0x0 218#define MX6SX_PAD_CSI_DATA07__WEIM_DATA_16 0x0068 0x03B0 0x0000 0x6 0x0
219#define MX6SX_PAD_CSI_DATA07__DCIC1_OUT 0x0068 0x03B0 0x0000 0x7 0x0 219#define MX6SX_PAD_CSI_DATA07__DCIC1_OUT 0x0068 0x03B0 0x0000 0x7 0x0
@@ -254,7 +254,7 @@
254#define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC 0x0078 0x03C0 0x0708 0x0 0x0 254#define MX6SX_PAD_CSI_VSYNC__CSI1_VSYNC 0x0078 0x03C0 0x0708 0x0 0x0
255#define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0 0x0078 0x03C0 0x07A4 0x1 0x1 255#define MX6SX_PAD_CSI_VSYNC__ESAI_TX5_RX0 0x0078 0x03C0 0x07A4 0x1 0x1
256#define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x0078 0x03C0 0x0674 0x2 0x1 256#define MX6SX_PAD_CSI_VSYNC__AUDMUX_AUD6_RXD 0x0078 0x03C0 0x0674 0x2 0x1
257#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B 0x0078 0x03C0 0x0844 0x3 0x3 257#define MX6SX_PAD_CSI_VSYNC__UART4_CTS_B 0x0078 0x03C0 0x0000 0x3 0x0
258#define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT 0x0078 0x03C0 0x0000 0x4 0x0 258#define MX6SX_PAD_CSI_VSYNC__MQS_RIGHT 0x0078 0x03C0 0x0000 0x4 0x0
259#define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25 0x0078 0x03C0 0x0000 0x5 0x0 259#define MX6SX_PAD_CSI_VSYNC__GPIO1_IO_25 0x0078 0x03C0 0x0000 0x5 0x0
260#define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24 0x0078 0x03C0 0x0000 0x6 0x0 260#define MX6SX_PAD_CSI_VSYNC__WEIM_DATA_24 0x0078 0x03C0 0x0000 0x6 0x0
@@ -352,7 +352,7 @@
352#define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK 0x00A0 0x03E8 0x0000 0x0 0x0 352#define MX6SX_PAD_ENET2_TX_CLK__ENET2_TX_CLK 0x00A0 0x03E8 0x0000 0x0 0x0
353#define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x00A0 0x03E8 0x076C 0x1 0x1 353#define MX6SX_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x00A0 0x03E8 0x076C 0x1 0x1
354#define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA 0x00A0 0x03E8 0x07BC 0x2 0x1 354#define MX6SX_PAD_ENET2_TX_CLK__I2C3_SDA 0x00A0 0x03E8 0x07BC 0x2 0x1
355#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B 0x00A0 0x03E8 0x082C 0x3 0x3 355#define MX6SX_PAD_ENET2_TX_CLK__UART1_CTS_B 0x00A0 0x03E8 0x0000 0x3 0x0
356#define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK 0x00A0 0x03E8 0x07E8 0x4 0x1 356#define MX6SX_PAD_ENET2_TX_CLK__MLB_CLK 0x00A0 0x03E8 0x07E8 0x4 0x1
357#define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9 0x00A0 0x03E8 0x0000 0x5 0x0 357#define MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9 0x00A0 0x03E8 0x0000 0x5 0x0
358#define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR 0x00A0 0x03E8 0x0000 0x6 0x0 358#define MX6SX_PAD_ENET2_TX_CLK__USB_OTG2_PWR 0x00A0 0x03E8 0x0000 0x6 0x0
@@ -404,7 +404,7 @@
404#define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK 0x00B4 0x03FC 0x0808 0x7 0x0 404#define MX6SX_PAD_KEY_COL4__SAI2_RX_BCLK 0x00B4 0x03FC 0x0808 0x7 0x0
405#define MX6SX_PAD_KEY_ROW0__KPP_ROW_0 0x00B8 0x0400 0x0000 0x0 0x0 405#define MX6SX_PAD_KEY_ROW0__KPP_ROW_0 0x00B8 0x0400 0x0000 0x0 0x0
406#define MX6SX_PAD_KEY_ROW0__USDHC3_WP 0x00B8 0x0400 0x0000 0x1 0x0 406#define MX6SX_PAD_KEY_ROW0__USDHC3_WP 0x00B8 0x0400 0x0000 0x1 0x0
407#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B 0x00B8 0x0400 0x0854 0x2 0x3 407#define MX6SX_PAD_KEY_ROW0__UART6_CTS_B 0x00B8 0x0400 0x0000 0x2 0x0
408#define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI 0x00B8 0x0400 0x0718 0x3 0x0 408#define MX6SX_PAD_KEY_ROW0__ECSPI1_MOSI 0x00B8 0x0400 0x0718 0x3 0x0
409#define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD 0x00B8 0x0400 0x0660 0x4 0x0 409#define MX6SX_PAD_KEY_ROW0__AUDMUX_AUD5_TXD 0x00B8 0x0400 0x0660 0x4 0x0
410#define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x00B8 0x0400 0x0000 0x5 0x0 410#define MX6SX_PAD_KEY_ROW0__GPIO2_IO_15 0x00B8 0x0400 0x0000 0x5 0x0
@@ -423,7 +423,7 @@
423#define MX6SX_PAD_KEY_ROW1__M4_NMI 0x00BC 0x0404 0x0000 0x8 0x0 423#define MX6SX_PAD_KEY_ROW1__M4_NMI 0x00BC 0x0404 0x0000 0x8 0x0
424#define MX6SX_PAD_KEY_ROW2__KPP_ROW_2 0x00C0 0x0408 0x0000 0x0 0x0 424#define MX6SX_PAD_KEY_ROW2__KPP_ROW_2 0x00C0 0x0408 0x0000 0x0 0x0
425#define MX6SX_PAD_KEY_ROW2__USDHC4_WP 0x00C0 0x0408 0x0878 0x1 0x1 425#define MX6SX_PAD_KEY_ROW2__USDHC4_WP 0x00C0 0x0408 0x0878 0x1 0x1
426#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x00C0 0x0408 0x084C 0x2 0x3 426#define MX6SX_PAD_KEY_ROW2__UART5_CTS_B 0x00C0 0x0408 0x0000 0x2 0x0
427#define MX6SX_PAD_KEY_ROW2__CAN1_RX 0x00C0 0x0408 0x068C 0x3 0x1 427#define MX6SX_PAD_KEY_ROW2__CAN1_RX 0x00C0 0x0408 0x068C 0x3 0x1
428#define MX6SX_PAD_KEY_ROW2__CANFD_RX1 0x00C0 0x0408 0x0694 0x4 0x1 428#define MX6SX_PAD_KEY_ROW2__CANFD_RX1 0x00C0 0x0408 0x0694 0x4 0x1
429#define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17 0x00C0 0x0408 0x0000 0x5 0x0 429#define MX6SX_PAD_KEY_ROW2__GPIO2_IO_17 0x00C0 0x0408 0x0000 0x5 0x0
@@ -815,7 +815,7 @@
815#define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05 0x0164 0x04AC 0x0000 0x0 0x0 815#define MX6SX_PAD_NAND_DATA05__RAWNAND_DATA05 0x0164 0x04AC 0x0000 0x0 0x0
816#define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5 0x0164 0x04AC 0x0000 0x1 0x0 816#define MX6SX_PAD_NAND_DATA05__USDHC2_DATA5 0x0164 0x04AC 0x0000 0x1 0x0
817#define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS 0x0164 0x04AC 0x0000 0x2 0x0 817#define MX6SX_PAD_NAND_DATA05__QSPI2_B_DQS 0x0164 0x04AC 0x0000 0x2 0x0
818#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B 0x0164 0x04AC 0x083C 0x3 0x1 818#define MX6SX_PAD_NAND_DATA05__UART3_CTS_B 0x0164 0x04AC 0x0000 0x3 0x0
819#define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC 0x0164 0x04AC 0x064C 0x4 0x0 819#define MX6SX_PAD_NAND_DATA05__AUDMUX_AUD4_RXC 0x0164 0x04AC 0x064C 0x4 0x0
820#define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9 0x0164 0x04AC 0x0000 0x5 0x0 820#define MX6SX_PAD_NAND_DATA05__GPIO4_IO_9 0x0164 0x04AC 0x0000 0x5 0x0
821#define MX6SX_PAD_NAND_DATA05__WEIM_AD_5 0x0164 0x04AC 0x0000 0x6 0x0 821#define MX6SX_PAD_NAND_DATA05__WEIM_AD_5 0x0164 0x04AC 0x0000 0x6 0x0
@@ -957,7 +957,7 @@
957#define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12 0x019C 0x04E4 0x0000 0x7 0x0 957#define MX6SX_PAD_QSPI1A_SS1_B__SIM_M_HADDR_12 0x019C 0x04E4 0x0000 0x7 0x0
958#define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3 0x019C 0x04E4 0x0000 0x9 0x0 958#define MX6SX_PAD_QSPI1A_SS1_B__SDMA_DEBUG_PC_3 0x019C 0x04E4 0x0000 0x9 0x0
959#define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0 0x01A0 0x04E8 0x0000 0x0 0x0 959#define MX6SX_PAD_QSPI1B_DATA0__QSPI1_B_DATA_0 0x01A0 0x04E8 0x0000 0x0 0x0
960#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B 0x01A0 0x04E8 0x083C 0x1 0x4 960#define MX6SX_PAD_QSPI1B_DATA0__UART3_CTS_B 0x01A0 0x04E8 0x0000 0x1 0x0
961#define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI 0x01A0 0x04E8 0x0738 0x2 0x1 961#define MX6SX_PAD_QSPI1B_DATA0__ECSPI3_MOSI 0x01A0 0x04E8 0x0738 0x2 0x1
962#define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS 0x01A0 0x04E8 0x0778 0x3 0x2 962#define MX6SX_PAD_QSPI1B_DATA0__ESAI_RX_FS 0x01A0 0x04E8 0x0778 0x3 0x2
963#define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22 0x01A0 0x04E8 0x06F4 0x4 0x1 963#define MX6SX_PAD_QSPI1B_DATA0__CSI1_DATA_22 0x01A0 0x04E8 0x06F4 0x4 0x1
@@ -1236,7 +1236,7 @@
1236#define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS 0x0230 0x0578 0x0670 0x1 0x1 1236#define MX6SX_PAD_SD1_DATA2__AUDMUX_AUD5_TXFS 0x0230 0x0578 0x0670 0x1 0x1
1237#define MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x0230 0x0578 0x0000 0x2 0x0 1237#define MX6SX_PAD_SD1_DATA2__PWM3_OUT 0x0230 0x0578 0x0000 0x2 0x0
1238#define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2 0x0230 0x0578 0x0000 0x3 0x0 1238#define MX6SX_PAD_SD1_DATA2__GPT_COMPARE2 0x0230 0x0578 0x0000 0x3 0x0
1239#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B 0x0230 0x0578 0x0834 0x4 0x2 1239#define MX6SX_PAD_SD1_DATA2__UART2_CTS_B 0x0230 0x0578 0x0000 0x4 0x0
1240#define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4 0x0230 0x0578 0x0000 0x5 0x0 1240#define MX6SX_PAD_SD1_DATA2__GPIO6_IO_4 0x0230 0x0578 0x0000 0x5 0x0
1241#define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY 0x0230 0x0578 0x0000 0x6 0x0 1241#define MX6SX_PAD_SD1_DATA2__ECSPI4_RDY 0x0230 0x0578 0x0000 0x6 0x0
1242#define MX6SX_PAD_SD1_DATA2__CCM_OUT0 0x0230 0x0578 0x0000 0x7 0x0 1242#define MX6SX_PAD_SD1_DATA2__CCM_OUT0 0x0230 0x0578 0x0000 0x7 0x0
@@ -1315,7 +1315,7 @@
1315#define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3 0x024C 0x0594 0x0000 0x8 0x0 1315#define MX6SX_PAD_SD2_DATA3__VADC_CLAMP_CURRENT_3 0x024C 0x0594 0x0000 0x8 0x0
1316#define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31 0x024C 0x0594 0x0000 0x9 0x0 1316#define MX6SX_PAD_SD2_DATA3__MMDC_DEBUG_31 0x024C 0x0594 0x0000 0x9 0x0
1317#define MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x0250 0x0598 0x0000 0x0 0x0 1317#define MX6SX_PAD_SD3_CLK__USDHC3_CLK 0x0250 0x0598 0x0000 0x0 0x0
1318#define MX6SX_PAD_SD3_CLK__UART4_CTS_B 0x0250 0x0598 0x0844 0x1 0x0 1318#define MX6SX_PAD_SD3_CLK__UART4_CTS_B 0x0250 0x0598 0x0000 0x1 0x0
1319#define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK 0x0250 0x0598 0x0740 0x2 0x0 1319#define MX6SX_PAD_SD3_CLK__ECSPI4_SCLK 0x0250 0x0598 0x0740 0x2 0x0
1320#define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS 0x0250 0x0598 0x0680 0x3 0x0 1320#define MX6SX_PAD_SD3_CLK__AUDMUX_AUD6_RXFS 0x0250 0x0598 0x0680 0x3 0x0
1321#define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC 0x0250 0x0598 0x0000 0x4 0x0 1321#define MX6SX_PAD_SD3_CLK__LCDIF2_VSYNC 0x0250 0x0598 0x0000 0x4 0x0
@@ -1409,7 +1409,7 @@
1409#define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x0274 0x05BC 0x0000 0x0 0x0 1409#define MX6SX_PAD_SD3_DATA7__USDHC3_DATA7 0x0274 0x05BC 0x0000 0x0 0x0
1410#define MX6SX_PAD_SD3_DATA7__CAN1_RX 0x0274 0x05BC 0x068C 0x1 0x0 1410#define MX6SX_PAD_SD3_DATA7__CAN1_RX 0x0274 0x05BC 0x068C 0x1 0x0
1411#define MX6SX_PAD_SD3_DATA7__CANFD_RX1 0x0274 0x05BC 0x0694 0x2 0x0 1411#define MX6SX_PAD_SD3_DATA7__CANFD_RX1 0x0274 0x05BC 0x0694 0x2 0x0
1412#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x0274 0x05BC 0x083C 0x3 0x3 1412#define MX6SX_PAD_SD3_DATA7__UART3_CTS_B 0x0274 0x05BC 0x0000 0x3 0x0
1413#define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5 0x0274 0x05BC 0x0000 0x4 0x0 1413#define MX6SX_PAD_SD3_DATA7__LCDIF2_DATA_5 0x0274 0x05BC 0x0000 0x4 0x0
1414#define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9 0x0274 0x05BC 0x0000 0x5 0x0 1414#define MX6SX_PAD_SD3_DATA7__GPIO7_IO_9 0x0274 0x05BC 0x0000 0x5 0x0
1415#define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN 0x0274 0x05BC 0x0000 0x6 0x0 1415#define MX6SX_PAD_SD3_DATA7__ENET1_1588_EVENT0_IN 0x0274 0x05BC 0x0000 0x6 0x0
@@ -1510,7 +1510,7 @@
1510#define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1 0x0298 0x05E0 0x0000 0x9 0x0 1510#define MX6SX_PAD_SD4_DATA6__SDMA_DEBUG_EVENT_CHANNEL_1 0x0298 0x05E0 0x0000 0x9 0x0
1511#define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7 0x029C 0x05E4 0x0000 0x0 0x0 1511#define MX6SX_PAD_SD4_DATA7__USDHC4_DATA7 0x029C 0x05E4 0x0000 0x0 0x0
1512#define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08 0x029C 0x05E4 0x0000 0x1 0x0 1512#define MX6SX_PAD_SD4_DATA7__RAWNAND_DATA08 0x029C 0x05E4 0x0000 0x1 0x0
1513#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B 0x029C 0x05E4 0x084C 0x2 0x1 1513#define MX6SX_PAD_SD4_DATA7__UART5_CTS_B 0x029C 0x05E4 0x0000 0x2 0x0
1514#define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0 0x029C 0x05E4 0x073C 0x3 0x0 1514#define MX6SX_PAD_SD4_DATA7__ECSPI3_SS0 0x029C 0x05E4 0x073C 0x3 0x0
1515#define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15 0x029C 0x05E4 0x0000 0x4 0x0 1515#define MX6SX_PAD_SD4_DATA7__LCDIF2_DATA_15 0x029C 0x05E4 0x0000 0x4 0x0
1516#define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x029C 0x05E4 0x0000 0x5 0x0 1516#define MX6SX_PAD_SD4_DATA7__GPIO6_IO_21 0x029C 0x05E4 0x0000 0x5 0x0
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 23486c081a69..be59014474b2 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -275,11 +275,6 @@
275 renesas,function = "msiof0"; 275 renesas,function = "msiof0";
276 }; 276 };
277 277
278 i2c6_pins: i2c6 {
279 renesas,groups = "i2c6";
280 renesas,function = "i2c6";
281 };
282
283 usb0_pins: usb0 { 278 usb0_pins: usb0 {
284 renesas,groups = "usb0"; 279 renesas,groups = "usb0";
285 renesas,function = "usb0"; 280 renesas,function = "usb0";
@@ -420,8 +415,6 @@
420}; 415};
421 416
422&i2c6 { 417&i2c6 {
423 pinctrl-names = "default";
424 pinctrl-0 = <&i2c6_pins>;
425 status = "okay"; 418 status = "okay";
426 clock-frequency = <100000>; 419 clock-frequency = <100000>;
427 420
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
index 042f821d9e4d..c9d912da6141 100644
--- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -149,6 +149,8 @@
149&mmc0 { /* sdmmc */ 149&mmc0 { /* sdmmc */
150 num-slots = <1>; 150 num-slots = <1>;
151 status = "okay"; 151 status = "okay";
152 pinctrl-names = "default";
153 pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
152 vmmc-supply = <&vcc_sd0>; 154 vmmc-supply = <&vcc_sd0>;
153 155
154 slot@0 { 156 slot@0 {
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index 171b610db709..5e4e3c238b2d 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -179,6 +179,8 @@
179&mmc0 { 179&mmc0 {
180 num-slots = <1>; 180 num-slots = <1>;
181 status = "okay"; 181 status = "okay";
182 pinctrl-names = "default";
183 pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
182 vmmc-supply = <&vcc_sd0>; 184 vmmc-supply = <&vcc_sd0>;
183 185
184 slot@0 { 186 slot@0 {
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 44b07e512c24..e06fbfc55bb7 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -660,6 +660,8 @@
660 clock-frequency = <100000>; 660 clock-frequency = <100000>;
661 resets = <&apb2_rst 0>; 661 resets = <&apb2_rst 0>;
662 status = "disabled"; 662 status = "disabled";
663 #address-cells = <1>;
664 #size-cells = <0>;
663 }; 665 };
664 666
665 i2c1: i2c@01c2b000 { 667 i2c1: i2c@01c2b000 {
@@ -670,6 +672,8 @@
670 clock-frequency = <100000>; 672 clock-frequency = <100000>;
671 resets = <&apb2_rst 1>; 673 resets = <&apb2_rst 1>;
672 status = "disabled"; 674 status = "disabled";
675 #address-cells = <1>;
676 #size-cells = <0>;
673 }; 677 };
674 678
675 i2c2: i2c@01c2b400 { 679 i2c2: i2c@01c2b400 {
@@ -680,6 +684,8 @@
680 clock-frequency = <100000>; 684 clock-frequency = <100000>;
681 resets = <&apb2_rst 2>; 685 resets = <&apb2_rst 2>;
682 status = "disabled"; 686 status = "disabled";
687 #address-cells = <1>;
688 #size-cells = <0>;
683 }; 689 };
684 690
685 i2c3: i2c@01c2b800 { 691 i2c3: i2c@01c2b800 {
@@ -690,6 +696,8 @@
690 clock-frequency = <100000>; 696 clock-frequency = <100000>;
691 resets = <&apb2_rst 3>; 697 resets = <&apb2_rst 3>;
692 status = "disabled"; 698 status = "disabled";
699 #address-cells = <1>;
700 #size-cells = <0>;
693 }; 701 };
694 702
695 gmac: ethernet@01c30000 { 703 gmac: ethernet@01c30000 {
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index 8adaa7871dd3..a5446cba9804 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -423,7 +423,7 @@
423 vcc4-supply = <&sys_3v3_reg>; 423 vcc4-supply = <&sys_3v3_reg>;
424 vcc5-supply = <&sys_3v3_reg>; 424 vcc5-supply = <&sys_3v3_reg>;
425 vcc6-supply = <&vio_reg>; 425 vcc6-supply = <&vio_reg>;
426 vcc7-supply = <&sys_5v0_reg>; 426 vcc7-supply = <&charge_pump_5v0_reg>;
427 vccio-supply = <&sys_3v3_reg>; 427 vccio-supply = <&sys_3v3_reg>;
428 428
429 regulators { 429 regulators {
@@ -674,5 +674,14 @@
674 regulator-max-microvolt = <3300000>; 674 regulator-max-microvolt = <3300000>;
675 regulator-always-on; 675 regulator-always-on;
676 }; 676 };
677
678 charge_pump_5v0_reg: regulator@101 {
679 compatible = "regulator-fixed";
680 reg = <101>;
681 regulator-name = "5v0";
682 regulator-min-microvolt = <5000000>;
683 regulator-max-microvolt = <5000000>;
684 regulator-always-on;
685 };
677 }; 686 };
678}; 687};
diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi
index bf16f8e65627..c4ed1bec4d92 100644
--- a/arch/arm/boot/dts/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/tegra30-colibri.dtsi
@@ -201,7 +201,7 @@
201 vcc4-supply = <&sys_3v3_reg>; 201 vcc4-supply = <&sys_3v3_reg>;
202 vcc5-supply = <&sys_3v3_reg>; 202 vcc5-supply = <&sys_3v3_reg>;
203 vcc6-supply = <&vio_reg>; 203 vcc6-supply = <&vio_reg>;
204 vcc7-supply = <&sys_5v0_reg>; 204 vcc7-supply = <&charge_pump_5v0_reg>;
205 vccio-supply = <&sys_3v3_reg>; 205 vccio-supply = <&sys_3v3_reg>;
206 206
207 regulators { 207 regulators {
@@ -373,5 +373,14 @@
373 regulator-max-microvolt = <3300000>; 373 regulator-max-microvolt = <3300000>;
374 regulator-always-on; 374 regulator-always-on;
375 }; 375 };
376
377 charge_pump_5v0_reg: regulator@101 {
378 compatible = "regulator-fixed";
379 reg = <101>;
380 regulator-name = "5v0";
381 regulator-min-microvolt = <5000000>;
382 regulator-max-microvolt = <5000000>;
383 regulator-always-on;
384 };
376 }; 385 };
377}; 386};
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index 11d733406c7e..b8a5e8c68f06 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -168,7 +168,7 @@
168 }; 168 };
169 169
170 pinctrl_esdhc1: esdhc1grp { 170 pinctrl_esdhc1: esdhc1grp {
171 fsl,fsl,pins = < 171 fsl,pins = <
172 VF610_PAD_PTA24__ESDHC1_CLK 0x31ef 172 VF610_PAD_PTA24__ESDHC1_CLK 0x31ef
173 VF610_PAD_PTA25__ESDHC1_CMD 0x31ef 173 VF610_PAD_PTA25__ESDHC1_CMD 0x31ef
174 VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef 174 VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 9de84a215abd..be9a51afe05a 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -85,7 +85,6 @@ config SOC_IMX25
85 85
86config SOC_IMX27 86config SOC_IMX27
87 bool 87 bool
88 select ARCH_HAS_OPP
89 select CPU_ARM926T 88 select CPU_ARM926T
90 select IMX_HAVE_IOMUX_V1 89 select IMX_HAVE_IOMUX_V1
91 select MXC_AVIC 90 select MXC_AVIC
@@ -659,7 +658,6 @@ comment "Device tree only"
659 658
660config SOC_IMX5 659config SOC_IMX5
661 bool 660 bool
662 select ARCH_HAS_OPP
663 select HAVE_IMX_SRC 661 select HAVE_IMX_SRC
664 select MXC_TZIC 662 select MXC_TZIC
665 663
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index ac88599ca080..23c02932bf84 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -93,9 +93,11 @@ obj-$(CONFIG_HAVE_IMX_ANATOP) += anatop.o
93obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o 93obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
94obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o 94obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
95obj-$(CONFIG_HAVE_IMX_SRC) += src.o 95obj-$(CONFIG_HAVE_IMX_SRC) += src.o
96ifdef CONFIG_SOC_IMX6
96AFLAGS_headsmp.o :=-Wa,-march=armv7-a 97AFLAGS_headsmp.o :=-Wa,-march=armv7-a
97obj-$(CONFIG_SMP) += headsmp.o platsmp.o 98obj-$(CONFIG_SMP) += headsmp.o platsmp.o
98obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 99obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
100endif
99obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o 101obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
100obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o 102obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
101obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o mach-imx6sx.o 103obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o mach-imx6sx.o
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 6cceb7765c14..29d412975aff 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -194,6 +194,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
194 clk[IMX6QDL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); 194 clk[IMX6QDL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
195 clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); 195 clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
196 clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); 196 clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2);
197 if (cpu_is_imx6dl()) {
198 clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
199 clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
200 }
197 201
198 clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); 202 clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
199 clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); 203 clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock);
@@ -217,8 +221,10 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
217 clk[IMX6QDL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); 221 clk[IMX6QDL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
218 clk[IMX6QDL_CLK_ASRC_SEL] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); 222 clk[IMX6QDL_CLK_ASRC_SEL] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
219 clk[IMX6QDL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); 223 clk[IMX6QDL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
220 clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); 224 if (cpu_is_imx6q()) {
221 clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); 225 clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
226 clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
227 }
222 clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); 228 clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
223 clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); 229 clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
224 clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); 230 clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index 74b50f1982db..ca4ea2daf25b 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -173,6 +173,8 @@ ENTRY(imx6_suspend)
173 ldr r6, [r11, #0x0] 173 ldr r6, [r11, #0x0]
174 ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET] 174 ldr r11, [r0, #PM_INFO_MX6Q_GPC_V_OFFSET]
175 ldr r6, [r11, #0x0] 175 ldr r6, [r11, #0x0]
176 ldr r11, [r0, #PM_INFO_MX6Q_IOMUXC_V_OFFSET]
177 ldr r6, [r11, #0x0]
176 178
177 /* use r11 to store the IO address */ 179 /* use r11 to store the IO address */
178 ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET] 180 ldr r11, [r0, #PM_INFO_MX6Q_SRC_V_OFFSET]
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index e15dff790dbb..1e6c51c7c2d5 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -75,6 +75,7 @@ config ARCH_SH7372
75 select ARM_CPU_SUSPEND if PM || CPU_IDLE 75 select ARM_CPU_SUSPEND if PM || CPU_IDLE
76 select CPU_V7 76 select CPU_V7
77 select SH_CLK_CPG 77 select SH_CLK_CPG
78 select SH_INTC
78 select SYS_SUPPORTS_SH_CMT 79 select SYS_SUPPORTS_SH_CMT
79 select SYS_SUPPORTS_SH_TMU 80 select SYS_SUPPORTS_SH_TMU
80 81
@@ -85,6 +86,7 @@ config ARCH_SH73A0
85 select CPU_V7 86 select CPU_V7
86 select I2C 87 select I2C
87 select SH_CLK_CPG 88 select SH_CLK_CPG
89 select SH_INTC
88 select RENESAS_INTC_IRQPIN 90 select RENESAS_INTC_IRQPIN
89 select SYS_SUPPORTS_SH_CMT 91 select SYS_SUPPORTS_SH_CMT
90 select SYS_SUPPORTS_SH_TMU 92 select SYS_SUPPORTS_SH_TMU
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 24f0c6fb61d8..03aaa99e1ea0 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -465,6 +465,8 @@ static int __init arm64_enter_virtual_mode(void)
465 efi_native_runtime_setup(); 465 efi_native_runtime_setup();
466 set_bit(EFI_RUNTIME_SERVICES, &efi.flags); 466 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
467 467
468 efi.runtime_version = efi.systab->hdr.revision;
469
468 return 0; 470 return 0;
469 471
470err_unmap: 472err_unmap:
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index 776188908dfc..8c13675a12e7 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -847,6 +847,7 @@ int __init db1200_dev_setup(void)
847 pr_warn("DB1200: cant get I2C close to 50MHz\n"); 847 pr_warn("DB1200: cant get I2C close to 50MHz\n");
848 else 848 else
849 clk_set_rate(c, pfc); 849 clk_set_rate(c, pfc);
850 clk_prepare_enable(c);
850 clk_put(c); 851 clk_put(c);
851 } 852 }
852 853
@@ -922,11 +923,6 @@ int __init db1200_dev_setup(void)
922 } 923 }
923 924
924 /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */ 925 /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */
925 c = clk_get(NULL, "psc1_intclk");
926 if (!IS_ERR(c)) {
927 clk_prepare_enable(c);
928 clk_put(c);
929 }
930 __raw_writel(PSC_SEL_CLK_SERCLK, 926 __raw_writel(PSC_SEL_CLK_SERCLK,
931 (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); 927 (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
932 wmb(); 928 wmb();
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 2b63e7e7d3d3..ad439c273003 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -59,12 +59,21 @@ static void bcm47xx_machine_restart(char *command)
59 switch (bcm47xx_bus_type) { 59 switch (bcm47xx_bus_type) {
60#ifdef CONFIG_BCM47XX_SSB 60#ifdef CONFIG_BCM47XX_SSB
61 case BCM47XX_BUS_TYPE_SSB: 61 case BCM47XX_BUS_TYPE_SSB:
62 ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 3); 62 if (bcm47xx_bus.ssb.chip_id == 0x4785)
63 write_c0_diag4(1 << 22);
64 ssb_watchdog_timer_set(&bcm47xx_bus.ssb, 1);
65 if (bcm47xx_bus.ssb.chip_id == 0x4785) {
66 __asm__ __volatile__(
67 ".set\tmips3\n\t"
68 "sync\n\t"
69 "wait\n\t"
70 ".set\tmips0");
71 }
63 break; 72 break;
64#endif 73#endif
65#ifdef CONFIG_BCM47XX_BCMA 74#ifdef CONFIG_BCM47XX_BCMA
66 case BCM47XX_BUS_TYPE_BCMA: 75 case BCM47XX_BUS_TYPE_BCMA:
67 bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 3); 76 bcma_chipco_watchdog_timer_set(&bcm47xx_bus.bcma.bus.drv_cc, 1);
68 break; 77 break;
69#endif 78#endif
70 } 79 }
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 008e9c8b8eac..38f4c32e2816 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -263,7 +263,6 @@ static uint64_t crashk_size, crashk_base;
263static int octeon_uart; 263static int octeon_uart;
264 264
265extern asmlinkage void handle_int(void); 265extern asmlinkage void handle_int(void);
266extern asmlinkage void plat_irq_dispatch(void);
267 266
268/** 267/**
269 * Return non zero if we are currently running in the Octeon simulator 268 * Return non zero if we are currently running in the Octeon simulator
@@ -458,6 +457,18 @@ static void octeon_halt(void)
458 octeon_kill_core(NULL); 457 octeon_kill_core(NULL);
459} 458}
460 459
460static char __read_mostly octeon_system_type[80];
461
462static int __init init_octeon_system_type(void)
463{
464 snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
465 cvmx_board_type_to_string(octeon_bootinfo->board_type),
466 octeon_model_get_string(read_c0_prid()));
467
468 return 0;
469}
470early_initcall(init_octeon_system_type);
471
461/** 472/**
462 * Return a string representing the system type 473 * Return a string representing the system type
463 * 474 *
@@ -465,11 +476,7 @@ static void octeon_halt(void)
465 */ 476 */
466const char *octeon_board_type_string(void) 477const char *octeon_board_type_string(void)
467{ 478{
468 static char name[80]; 479 return octeon_system_type;
469 sprintf(name, "%s (%s)",
470 cvmx_board_type_to_string(octeon_bootinfo->board_type),
471 octeon_model_get_string(read_c0_prid()));
472 return name;
473} 480}
474 481
475const char *get_system_type(void) 482const char *get_system_type(void)
diff --git a/arch/mips/include/asm/eva.h b/arch/mips/include/asm/eva.h
new file mode 100644
index 000000000000..a3d1807f227c
--- /dev/null
+++ b/arch/mips/include/asm/eva.h
@@ -0,0 +1,43 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014, Imagination Technologies Ltd.
7 *
8 * EVA functions for generic code
9 */
10
11#ifndef _ASM_EVA_H
12#define _ASM_EVA_H
13
14#include <kernel-entry-init.h>
15
16#ifdef __ASSEMBLY__
17
18#ifdef CONFIG_EVA
19
20/*
21 * EVA early init code
22 *
23 * Platforms must define their own 'platform_eva_init' macro in
24 * their kernel-entry-init.h header. This macro usually does the
25 * platform specific configuration of the segmentation registers,
26 * and it is normally called from assembly code.
27 *
28 */
29
30.macro eva_init
31platform_eva_init
32.endm
33
34#else
35
36.macro eva_init
37.endm
38
39#endif /* CONFIG_EVA */
40
41#endif /* __ASSEMBLY__ */
42
43#endif
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 3f20b2111d56..d7699cf7e135 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -49,7 +49,7 @@
49#endif 49#endif
50#define GICBIS(reg, mask, bits) \ 50#define GICBIS(reg, mask, bits) \
51 do { u32 data; \ 51 do { u32 data; \
52 GICREAD((reg), data); \ 52 GICREAD(reg, data); \
53 data &= ~(mask); \ 53 data &= ~(mask); \
54 data |= ((bits) & (mask)); \ 54 data |= ((bits) & (mask)); \
55 GICWRITE((reg), data); \ 55 GICWRITE((reg), data); \
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index ae1f7b24dd1a..39f07aec640c 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -26,6 +26,8 @@ static inline int irq_canonicalize(int irq)
26#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 26#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
27#endif 27#endif
28 28
29asmlinkage void plat_irq_dispatch(void);
30
29extern void do_IRQ(unsigned int irq); 31extern void do_IRQ(unsigned int irq);
30 32
31extern void arch_init_irq(void); 33extern void arch_init_irq(void);
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 77eeda77e73c..0cf8622db27f 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -10,14 +10,15 @@
10#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 10#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
11#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 11#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
12 12
13#include <asm/regdef.h>
14#include <asm/mipsregs.h>
15
13 /* 16 /*
14 * Prepare segments for EVA boot: 17 * Prepare segments for EVA boot:
15 * 18 *
16 * This is in case the processor boots in legacy configuration 19 * This is in case the processor boots in legacy configuration
17 * (SI_EVAReset is de-asserted and CONFIG5.K == 0) 20 * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
18 * 21 *
19 * On entry, t1 is loaded with CP0_CONFIG
20 *
21 * ========================= Mappings ============================= 22 * ========================= Mappings =============================
22 * Virtual memory Physical memory Mapping 23 * Virtual memory Physical memory Mapping
23 * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg) 24 * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
@@ -30,12 +31,20 @@
30 * 31 *
31 * 32 *
32 * Lowmem is expanded to 2GB 33 * Lowmem is expanded to 2GB
34 *
35 * The following code uses the t0, t1, t2 and ra registers without
36 * previously preserving them.
37 *
33 */ 38 */
34 .macro eva_entry 39 .macro platform_eva_init
40
41 .set push
42 .set reorder
35 /* 43 /*
36 * Get Config.K0 value and use it to program 44 * Get Config.K0 value and use it to program
37 * the segmentation registers 45 * the segmentation registers
38 */ 46 */
47 mfc0 t1, CP0_CONFIG
39 andi t1, 0x7 /* CCA */ 48 andi t1, 0x7 /* CCA */
40 move t2, t1 49 move t2, t1
41 ins t2, t1, 16, 3 50 ins t2, t1, 16, 3
@@ -77,6 +86,8 @@
77 mtc0 t0, $16, 5 86 mtc0 t0, $16, 5
78 sync 87 sync
79 jal mips_ihb 88 jal mips_ihb
89
90 .set pop
80 .endm 91 .endm
81 92
82 .macro kernel_entry_setup 93 .macro kernel_entry_setup
@@ -95,7 +106,7 @@
95 sll t0, t0, 6 /* SC bit */ 106 sll t0, t0, 6 /* SC bit */
96 bgez t0, 9f 107 bgez t0, 9f
97 108
98 eva_entry 109 platform_eva_init
99 b 0f 110 b 0f
1009: 1119:
101 /* Assume we came from YAMON... */ 112 /* Assume we came from YAMON... */
@@ -127,8 +138,7 @@ nonsc_processor:
127#ifdef CONFIG_EVA 138#ifdef CONFIG_EVA
128 sync 139 sync
129 ehb 140 ehb
130 mfc0 t1, CP0_CONFIG 141 platform_eva_init
131 eva_entry
132#endif 142#endif
133 .endm 143 .endm
134 144
diff --git a/arch/mips/include/asm/mach-netlogic/topology.h b/arch/mips/include/asm/mach-netlogic/topology.h
index ceeb1f5e7129..0eb43c832b25 100644
--- a/arch/mips/include/asm/mach-netlogic/topology.h
+++ b/arch/mips/include/asm/mach-netlogic/topology.h
@@ -10,13 +10,6 @@
10 10
11#include <asm/mach-netlogic/multi-node.h> 11#include <asm/mach-netlogic/multi-node.h>
12 12
13#ifdef CONFIG_SMP
14#define topology_physical_package_id(cpu) cpu_to_node(cpu)
15#define topology_core_id(cpu) (cpu_logical_map(cpu) / NLM_THREADS_PER_CORE)
16#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
17#define topology_core_cpumask(cpu) cpumask_of_node(cpu_to_node(cpu))
18#endif
19
20#include <asm-generic/topology.h> 13#include <asm-generic/topology.h>
21 14
22#endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */ 15#endif /* _ASM_MACH_NETLOGIC_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 027c74db13f9..df49a308085c 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -122,6 +122,9 @@ do { \
122 } \ 122 } \
123} while(0) 123} while(0)
124 124
125extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
126 pte_t pteval);
127
125#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 128#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
126 129
127#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 130#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
@@ -145,7 +148,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
145 } 148 }
146 } 149 }
147} 150}
148#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
149 151
150static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 152static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
151{ 153{
@@ -183,7 +185,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
183 } 185 }
184#endif 186#endif
185} 187}
186#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
187 188
188static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 189static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
189{ 190{
@@ -390,15 +391,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
390 391
391extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 392extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
392 pte_t pte); 393 pte_t pte);
393extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
394 pte_t pte);
395 394
396static inline void update_mmu_cache(struct vm_area_struct *vma, 395static inline void update_mmu_cache(struct vm_area_struct *vma,
397 unsigned long address, pte_t *ptep) 396 unsigned long address, pte_t *ptep)
398{ 397{
399 pte_t pte = *ptep; 398 pte_t pte = *ptep;
400 __update_tlb(vma, address, pte); 399 __update_tlb(vma, address, pte);
401 __update_cache(vma, address, pte);
402} 400}
403 401
404static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 402static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 17960fe7a8ce..cdf68b33bd65 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -131,10 +131,12 @@ static inline int syscall_get_arch(void)
131{ 131{
132 int arch = EM_MIPS; 132 int arch = EM_MIPS;
133#ifdef CONFIG_64BIT 133#ifdef CONFIG_64BIT
134 if (!test_thread_flag(TIF_32BIT_REGS)) 134 if (!test_thread_flag(TIF_32BIT_REGS)) {
135 arch |= __AUDIT_ARCH_64BIT; 135 arch |= __AUDIT_ARCH_64BIT;
136 if (test_thread_flag(TIF_32BIT_ADDR)) 136 /* N32 sets only TIF_32BIT_ADDR */
137 arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; 137 if (test_thread_flag(TIF_32BIT_ADDR))
138 arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
139 }
138#endif 140#endif
139#if defined(__LITTLE_ENDIAN) 141#if defined(__LITTLE_ENDIAN)
140 arch |= __AUDIT_ARCH_LE; 142 arch |= __AUDIT_ARCH_LE;
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 6f4f739dad96..e6e97d2a5c9e 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -13,6 +13,7 @@
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h> 14#include <asm/asmmacro.h>
15#include <asm/cacheops.h> 15#include <asm/cacheops.h>
16#include <asm/eva.h>
16#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
17#include <asm/mipsmtregs.h> 18#include <asm/mipsmtregs.h>
18#include <asm/pm.h> 19#include <asm/pm.h>
@@ -166,6 +167,9 @@ dcache_done:
1661: jal mips_cps_core_init 1671: jal mips_cps_core_init
167 nop 168 nop
168 169
170 /* Do any EVA initialization if necessary */
171 eva_init
172
169 /* 173 /*
170 * Boot any other VPEs within this core that should be online, and 174 * Boot any other VPEs within this core that should be online, and
171 * deactivate this VPE if it should be offline. 175 * deactivate this VPE if it should be offline.
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 14bf74b0f51c..b63f2482f288 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -558,7 +558,7 @@ static int mipspmu_get_irq(void)
558 if (mipspmu.irq >= 0) { 558 if (mipspmu.irq >= 0) {
559 /* Request my own irq handler. */ 559 /* Request my own irq handler. */
560 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, 560 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
561 IRQF_PERCPU | IRQF_NOBALANCING, 561 IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD,
562 "mips_perf_pmu", NULL); 562 "mips_perf_pmu", NULL);
563 if (err) { 563 if (err) {
564 pr_warning("Unable to request IRQ%d for MIPS " 564 pr_warning("Unable to request IRQ%d for MIPS "
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 13b964fddc4a..25bb8400156d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -113,15 +113,19 @@ trace_a_syscall:
113 move s0, t2 # Save syscall pointer 113 move s0, t2 # Save syscall pointer
114 move a0, sp 114 move a0, sp
115 /* 115 /*
116 * syscall number is in v0 unless we called syscall(__NR_###) 116 * absolute syscall number is in v0 unless we called syscall(__NR_###)
117 * where the real syscall number is in a0 117 * where the real syscall number is in a0
118 * note: NR_syscall is the first O32 syscall but the macro is 118 * note: NR_syscall is the first O32 syscall but the macro is
119 * only defined when compiling with -mabi=32 (CONFIG_32BIT) 119 * only defined when compiling with -mabi=32 (CONFIG_32BIT)
120 * therefore __NR_O32_Linux is used (4000) 120 * therefore __NR_O32_Linux is used (4000)
121 */ 121 */
122 addiu a1, v0, __NR_O32_Linux 122 .set push
123 bnez v0, 1f /* __NR_syscall at offset 0 */ 123 .set reorder
124 lw a1, PT_R4(sp) 124 subu t1, v0, __NR_O32_Linux
125 move a1, v0
126 bnez t1, 1f /* __NR_syscall at offset 0 */
127 lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
128 .set pop
125 129
1261: jal syscall_trace_enter 1301: jal syscall_trace_enter
127 131
diff --git a/arch/mips/loongson/loongson-3/cop2-ex.c b/arch/mips/loongson/loongson-3/cop2-ex.c
index 9182e8d2967c..b03e37d2071a 100644
--- a/arch/mips/loongson/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson/loongson-3/cop2-ex.c
@@ -22,13 +22,13 @@
22static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, 22static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
23 void *data) 23 void *data)
24{ 24{
25 int fpu_enabled; 25 int fpu_owned;
26 int fr = !test_thread_flag(TIF_32BIT_FPREGS); 26 int fr = !test_thread_flag(TIF_32BIT_FPREGS);
27 27
28 switch (action) { 28 switch (action) {
29 case CU2_EXCEPTION: 29 case CU2_EXCEPTION:
30 preempt_disable(); 30 preempt_disable();
31 fpu_enabled = read_c0_status() & ST0_CU1; 31 fpu_owned = __is_fpu_owner();
32 if (!fr) 32 if (!fr)
33 set_c0_status(ST0_CU1 | ST0_CU2); 33 set_c0_status(ST0_CU1 | ST0_CU2);
34 else 34 else
@@ -39,8 +39,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
39 KSTK_STATUS(current) |= ST0_FR; 39 KSTK_STATUS(current) |= ST0_FR;
40 else 40 else
41 KSTK_STATUS(current) &= ~ST0_FR; 41 KSTK_STATUS(current) &= ~ST0_FR;
42 /* If FPU is enabled, we needn't init or restore fp */ 42 /* If FPU is owned, we needn't init or restore fp */
43 if(!fpu_enabled) { 43 if (!fpu_owned) {
44 set_thread_flag(TIF_USEDFPU); 44 set_thread_flag(TIF_USEDFPU);
45 if (!used_math()) { 45 if (!used_math()) {
46 _init_fpu(); 46 _init_fpu();
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c
index ca025a6ba559..37ed184398c6 100644
--- a/arch/mips/loongson/loongson-3/numa.c
+++ b/arch/mips/loongson/loongson-3/numa.c
@@ -24,8 +24,6 @@
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/pgalloc.h> 25#include <asm/pgalloc.h>
26#include <asm/sections.h> 26#include <asm/sections.h>
27#include <linux/bootmem.h>
28#include <linux/init.h>
29#include <linux/irq.h> 27#include <linux/irq.h>
30#include <asm/bootinfo.h> 28#include <asm/bootinfo.h>
31#include <asm/mc146818-time.h> 29#include <asm/mc146818-time.h>
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index f7b91d3a371d..7e3ea7766822 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -119,25 +119,36 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
119 119
120EXPORT_SYMBOL(__flush_anon_page); 120EXPORT_SYMBOL(__flush_anon_page);
121 121
122void __update_cache(struct vm_area_struct *vma, unsigned long address, 122static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
123 pte_t pte)
124{ 123{
125 struct page *page; 124 struct page *page;
126 unsigned long pfn, addr; 125 unsigned long pfn = pte_pfn(pteval);
127 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
128 126
129 pfn = pte_pfn(pte);
130 if (unlikely(!pfn_valid(pfn))) 127 if (unlikely(!pfn_valid(pfn)))
131 return; 128 return;
129
132 page = pfn_to_page(pfn); 130 page = pfn_to_page(pfn);
133 if (page_mapping(page) && Page_dcache_dirty(page)) { 131 if (page_mapping(page) && Page_dcache_dirty(page)) {
134 addr = (unsigned long) page_address(page); 132 unsigned long page_addr = (unsigned long) page_address(page);
135 if (exec || pages_do_alias(addr, address & PAGE_MASK)) 133
136 flush_data_cache_page(addr); 134 if (!cpu_has_ic_fills_f_dc ||
135 pages_do_alias(page_addr, address & PAGE_MASK))
136 flush_data_cache_page(page_addr);
137 ClearPageDcacheDirty(page); 137 ClearPageDcacheDirty(page);
138 } 138 }
139} 139}
140 140
141void set_pte_at(struct mm_struct *mm, unsigned long addr,
142 pte_t *ptep, pte_t pteval)
143{
144 if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
145 if (pte_present(pteval))
146 mips_flush_dcache_from_pte(pteval, addr);
147 }
148
149 set_pte(ptep, pteval);
150}
151
141unsigned long _page_cachable_default; 152unsigned long _page_cachable_default;
142EXPORT_SYMBOL(_page_cachable_default); 153EXPORT_SYMBOL(_page_cachable_default);
143 154
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 0c35dee0a215..8fddd2cdbff7 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -35,13 +35,19 @@ fw_memblock_t * __init fw_getmdesc(int eva)
35 /* otherwise look in the environment */ 35 /* otherwise look in the environment */
36 36
37 memsize_str = fw_getenv("memsize"); 37 memsize_str = fw_getenv("memsize");
38 if (memsize_str) 38 if (memsize_str) {
39 tmp = kstrtol(memsize_str, 0, &memsize); 39 tmp = kstrtoul(memsize_str, 0, &memsize);
40 if (tmp)
41 pr_warn("Failed to read the 'memsize' env variable.\n");
42 }
40 if (eva) { 43 if (eva) {
41 /* Look for ememsize for EVA */ 44 /* Look for ememsize for EVA */
42 ememsize_str = fw_getenv("ememsize"); 45 ememsize_str = fw_getenv("ememsize");
43 if (ememsize_str) 46 if (ememsize_str) {
44 tmp = kstrtol(ememsize_str, 0, &ememsize); 47 tmp = kstrtoul(ememsize_str, 0, &ememsize);
48 if (tmp)
49 pr_warn("Failed to read the 'ememsize' env variable.\n");
50 }
45 } 51 }
46 if (!memsize && !ememsize) { 52 if (!memsize && !ememsize) {
47 pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); 53 pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c
index 941744aabb51..f914c753de21 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq.c
@@ -51,7 +51,7 @@ static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); }
51 * the range 40-71. 51 * the range 40-71.
52 */ 52 */
53 53
54asmlinkage void plat_irq_dispatch(struct pt_regs *regs) 54asmlinkage void plat_irq_dispatch(void)
55{ 55{
56 u32 pending; 56 u32 pending;
57 57
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 453fa5c09550..b319846ad97f 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -172,6 +172,7 @@ menu "System type"
172# 172#
173config CPU_SH2 173config CPU_SH2
174 bool 174 bool
175 select SH_INTC
175 176
176config CPU_SH2A 177config CPU_SH2A
177 bool 178 bool
@@ -182,6 +183,7 @@ config CPU_SH3
182 bool 183 bool
183 select CPU_HAS_INTEVT 184 select CPU_HAS_INTEVT
184 select CPU_HAS_SR_RB 185 select CPU_HAS_SR_RB
186 select SH_INTC
185 select SYS_SUPPORTS_SH_TMU 187 select SYS_SUPPORTS_SH_TMU
186 188
187config CPU_SH4 189config CPU_SH4
@@ -189,6 +191,7 @@ config CPU_SH4
189 select CPU_HAS_INTEVT 191 select CPU_HAS_INTEVT
190 select CPU_HAS_SR_RB 192 select CPU_HAS_SR_RB
191 select CPU_HAS_FPU if !CPU_SH4AL_DSP 193 select CPU_HAS_FPU if !CPU_SH4AL_DSP
194 select SH_INTC
192 select SYS_SUPPORTS_SH_TMU 195 select SYS_SUPPORTS_SH_TMU
193 select SYS_SUPPORTS_HUGETLBFS if MMU 196 select SYS_SUPPORTS_HUGETLBFS if MMU
194 197
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 47c410d99f5d..4b0e1dfa2226 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -683,7 +683,7 @@ END(syscall_badsys)
683sysenter_badsys: 683sysenter_badsys:
684 movl $-ENOSYS,%eax 684 movl $-ENOSYS,%eax
685 jmp sysenter_after_call 685 jmp sysenter_after_call
686END(syscall_badsys) 686END(sysenter_badsys)
687 CFI_ENDPROC 687 CFI_ENDPROC
688 688
689.macro FIXUP_ESPFIX_STACK 689.macro FIXUP_ESPFIX_STACK
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 1fe33987de02..ee61c36d64f8 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -49,7 +49,13 @@ void leave_mm(int cpu)
49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { 49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); 50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir); 51 load_cr3(swapper_pg_dir);
52 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 52 /*
53 * This gets called in the idle path where RCU
54 * functions differently. Tracing normally
55 * uses RCU, so we have to call the tracepoint
56 * specially here.
57 */
58 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
53 } 59 }
54} 60}
55EXPORT_SYMBOL_GPL(leave_mm); 61EXPORT_SYMBOL_GPL(leave_mm);
@@ -174,7 +180,7 @@ void flush_tlb_current_task(void)
174 * 180 *
175 * This is in units of pages. 181 * This is in units of pages.
176 */ 182 */
177unsigned long tlb_single_page_flush_ceiling = 33; 183static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
178 184
179void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 185void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
180 unsigned long end, unsigned long vmflag) 186 unsigned long end, unsigned long vmflag)
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 3266f8ff9311..6f550d9e7a2d 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -662,7 +662,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
662 } 662 }
663 if (e->num_vcs && vc >= e->num_vcs) { 663 if (e->num_vcs && vc >= e->num_vcs) {
664 dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", 664 dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
665 port, node_xp); 665 vc, node_xp);
666 return -EINVAL; 666 return -EINVAL;
667 } 667 }
668 valid = 1; 668 valid = 1;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index f0a43646a2f3..5abe943e3404 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
481 */ 481 */
482static void efivar_entry_list_del_unlock(struct efivar_entry *entry) 482static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
483{ 483{
484 WARN_ON(!spin_is_locked(&__efivars->lock)); 484 lockdep_assert_held(&__efivars->lock);
485 485
486 list_del(&entry->list); 486 list_del(&entry->list);
487 spin_unlock_irq(&__efivars->lock); 487 spin_unlock_irq(&__efivars->lock);
@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
507 const struct efivar_operations *ops = __efivars->ops; 507 const struct efivar_operations *ops = __efivars->ops;
508 efi_status_t status; 508 efi_status_t status;
509 509
510 WARN_ON(!spin_is_locked(&__efivars->lock)); 510 lockdep_assert_held(&__efivars->lock);
511 511
512 status = ops->set_variable(entry->var.VariableName, 512 status = ops->set_variable(entry->var.VariableName,
513 &entry->var.VendorGuid, 513 &entry->var.VendorGuid,
@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
667 int strsize1, strsize2; 667 int strsize1, strsize2;
668 bool found = false; 668 bool found = false;
669 669
670 WARN_ON(!spin_is_locked(&__efivars->lock)); 670 lockdep_assert_held(&__efivars->lock);
671 671
672 list_for_each_entry_safe(entry, n, head, list) { 672 list_for_each_entry_safe(entry, n, head, list) {
673 strsize1 = ucs2_strsize(name, 1024); 673 strsize1 = ucs2_strsize(name, 1024);
@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
739 const struct efivar_operations *ops = __efivars->ops; 739 const struct efivar_operations *ops = __efivars->ops;
740 efi_status_t status; 740 efi_status_t status;
741 741
742 WARN_ON(!spin_is_locked(&__efivars->lock)); 742 lockdep_assert_held(&__efivars->lock);
743 743
744 status = ops->get_variable(entry->var.VariableName, 744 status = ops->get_variable(entry->var.VariableName,
745 &entry->var.VendorGuid, 745 &entry->var.VendorGuid,
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 41b2f40578d5..954b9f6b0ef8 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -90,7 +90,7 @@ struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
90 struct gpio_desc **dr; 90 struct gpio_desc **dr;
91 struct gpio_desc *desc; 91 struct gpio_desc *desc;
92 92
93 dr = devres_alloc(devm_gpiod_release, sizeof(struct gpiod_desc *), 93 dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
94 GFP_KERNEL); 94 GFP_KERNEL);
95 if (!dr) 95 if (!dr)
96 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index ff9eb911b5e4..fa945ec9ccff 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -407,9 +407,27 @@ static int lp_gpio_runtime_resume(struct device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410static int lp_gpio_resume(struct device *dev)
411{
412 struct platform_device *pdev = to_platform_device(dev);
413 struct lp_gpio *lg = platform_get_drvdata(pdev);
414 unsigned long reg;
415 int i;
416
417 /* on some hardware suspend clears input sensing, re-enable it here */
418 for (i = 0; i < lg->chip.ngpio; i++) {
419 if (gpiochip_is_requested(&lg->chip, i) != NULL) {
420 reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2);
421 outl(inl(reg) & ~GPINDIS_BIT, reg);
422 }
423 }
424 return 0;
425}
426
410static const struct dev_pm_ops lp_gpio_pm_ops = { 427static const struct dev_pm_ops lp_gpio_pm_ops = {
411 .runtime_suspend = lp_gpio_runtime_suspend, 428 .runtime_suspend = lp_gpio_runtime_suspend,
412 .runtime_resume = lp_gpio_runtime_resume, 429 .runtime_resume = lp_gpio_runtime_resume,
430 .resume = lp_gpio_resume,
413}; 431};
414 432
415static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = { 433static const struct acpi_device_id lynxpoint_gpio_acpi_match[] = {
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index c3145f91fda3..31ad5df5dbc9 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -95,6 +95,9 @@ struct zynq_gpio {
95 struct clk *clk; 95 struct clk *clk;
96}; 96};
97 97
98static struct irq_chip zynq_gpio_level_irqchip;
99static struct irq_chip zynq_gpio_edge_irqchip;
100
98/** 101/**
99 * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank 102 * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
100 * for a given pin in the GPIO device 103 * for a given pin in the GPIO device
@@ -410,6 +413,15 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
410 gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); 413 gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
411 writel_relaxed(int_any, 414 writel_relaxed(int_any,
412 gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); 415 gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
416
417 if (type & IRQ_TYPE_LEVEL_MASK) {
418 __irq_set_chip_handler_name_locked(irq_data->irq,
419 &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
420 } else {
421 __irq_set_chip_handler_name_locked(irq_data->irq,
422 &zynq_gpio_edge_irqchip, handle_level_irq, NULL);
423 }
424
413 return 0; 425 return 0;
414} 426}
415 427
@@ -424,9 +436,21 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
424} 436}
425 437
426/* irq chip descriptor */ 438/* irq chip descriptor */
427static struct irq_chip zynq_gpio_irqchip = { 439static struct irq_chip zynq_gpio_level_irqchip = {
428 .name = DRIVER_NAME, 440 .name = DRIVER_NAME,
429 .irq_enable = zynq_gpio_irq_enable, 441 .irq_enable = zynq_gpio_irq_enable,
442 .irq_eoi = zynq_gpio_irq_ack,
443 .irq_mask = zynq_gpio_irq_mask,
444 .irq_unmask = zynq_gpio_irq_unmask,
445 .irq_set_type = zynq_gpio_set_irq_type,
446 .irq_set_wake = zynq_gpio_set_wake,
447 .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
448};
449
450static struct irq_chip zynq_gpio_edge_irqchip = {
451 .name = DRIVER_NAME,
452 .irq_enable = zynq_gpio_irq_enable,
453 .irq_ack = zynq_gpio_irq_ack,
430 .irq_mask = zynq_gpio_irq_mask, 454 .irq_mask = zynq_gpio_irq_mask,
431 .irq_unmask = zynq_gpio_irq_unmask, 455 .irq_unmask = zynq_gpio_irq_unmask,
432 .irq_set_type = zynq_gpio_set_irq_type, 456 .irq_set_type = zynq_gpio_set_irq_type,
@@ -469,10 +493,6 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
469 offset); 493 offset);
470 generic_handle_irq(gpio_irq); 494 generic_handle_irq(gpio_irq);
471 } 495 }
472
473 /* clear IRQ in HW */
474 writel_relaxed(int_sts, gpio->base_addr +
475 ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
476 } 496 }
477 } 497 }
478 498
@@ -610,14 +630,14 @@ static int zynq_gpio_probe(struct platform_device *pdev)
610 writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + 630 writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
611 ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); 631 ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
612 632
613 ret = gpiochip_irqchip_add(chip, &zynq_gpio_irqchip, 0, 633 ret = gpiochip_irqchip_add(chip, &zynq_gpio_edge_irqchip, 0,
614 handle_simple_irq, IRQ_TYPE_NONE); 634 handle_level_irq, IRQ_TYPE_NONE);
615 if (ret) { 635 if (ret) {
616 dev_err(&pdev->dev, "Failed to add irq chip\n"); 636 dev_err(&pdev->dev, "Failed to add irq chip\n");
617 goto err_rm_gpiochip; 637 goto err_rm_gpiochip;
618 } 638 }
619 639
620 gpiochip_set_chained_irqchip(chip, &zynq_gpio_irqchip, irq, 640 gpiochip_set_chained_irqchip(chip, &zynq_gpio_edge_irqchip, irq,
621 zynq_gpio_irqhandler); 641 zynq_gpio_irqhandler);
622 642
623 pm_runtime_set_active(&pdev->dev); 643 pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 7cfdc2278905..604dbe60bdee 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -307,7 +307,5 @@ void of_gpiochip_add(struct gpio_chip *chip)
307void of_gpiochip_remove(struct gpio_chip *chip) 307void of_gpiochip_remove(struct gpio_chip *chip)
308{ 308{
309 gpiochip_remove_pin_ranges(chip); 309 gpiochip_remove_pin_ranges(chip);
310 310 of_node_put(chip->of_node);
311 if (chip->of_node)
312 of_node_put(chip->of_node);
313} 311}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ec96f9a9724c..e27cdbe9d524 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
494 return true; 494 return true;
495} 495}
496 496
497void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
498{
499 spin_lock_irq(&dev_priv->irq_lock);
500
501 dev_priv->long_hpd_port_mask = 0;
502 dev_priv->short_hpd_port_mask = 0;
503 dev_priv->hpd_event_bits = 0;
504
505 spin_unlock_irq(&dev_priv->irq_lock);
506
507 cancel_work_sync(&dev_priv->dig_port_work);
508 cancel_work_sync(&dev_priv->hotplug_work);
509 cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
510}
511
512static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
513{
514 struct drm_device *dev = dev_priv->dev;
515 struct drm_encoder *encoder;
516
517 drm_modeset_lock_all(dev);
518 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
519 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
520
521 if (intel_encoder->suspend)
522 intel_encoder->suspend(intel_encoder);
523 }
524 drm_modeset_unlock_all(dev);
525}
526
497static int i915_drm_freeze(struct drm_device *dev) 527static int i915_drm_freeze(struct drm_device *dev)
498{ 528{
499 struct drm_i915_private *dev_priv = dev->dev_private; 529 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
538 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 568 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
539 569
540 intel_runtime_pm_disable_interrupts(dev); 570 intel_runtime_pm_disable_interrupts(dev);
571 intel_hpd_cancel_work(dev_priv);
572
573 intel_suspend_encoders(dev_priv);
541 574
542 intel_suspend_gt_powersave(dev); 575 intel_suspend_gt_powersave(dev);
543 576
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4412f6a4383b..7a830eac5ba3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1458,7 +1458,7 @@ struct drm_i915_private {
1458 } hpd_mark; 1458 } hpd_mark;
1459 } hpd_stats[HPD_NUM_PINS]; 1459 } hpd_stats[HPD_NUM_PINS];
1460 u32 hpd_event_bits; 1460 u32 hpd_event_bits;
1461 struct timer_list hotplug_reenable_timer; 1461 struct delayed_work hotplug_reenable_work;
1462 1462
1463 struct i915_fbc fbc; 1463 struct i915_fbc fbc;
1464 struct i915_drrs drrs; 1464 struct i915_drrs drrs;
@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2178extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2178extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2179extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2179extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2181void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2181 2182
2182extern void intel_console_resume(struct work_struct *work); 2183extern void intel_console_resume(struct work_struct *work);
2183 2184
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 390ccc2a3096..0050ee9470f1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
1189 * some connectors */ 1189 * some connectors */
1190 if (hpd_disabled) { 1190 if (hpd_disabled) {
1191 drm_kms_helper_poll_enable(dev); 1191 drm_kms_helper_poll_enable(dev);
1192 mod_timer(&dev_priv->hotplug_reenable_timer, 1192 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 } 1194 }
1195 1195
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
1213 drm_kms_helper_hotplug_event(dev); 1213 drm_kms_helper_hotplug_event(dev);
1214} 1214}
1215 1215
1216static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1217{
1218 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1219}
1220
1221static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1216static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1222{ 1217{
1223 struct drm_i915_private *dev_priv = dev->dev_private; 1218 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3892 if (!dev_priv) 3887 if (!dev_priv)
3893 return; 3888 return;
3894 3889
3895 intel_hpd_irq_uninstall(dev_priv);
3896
3897 gen8_irq_reset(dev); 3890 gen8_irq_reset(dev);
3898} 3891}
3899 3892
@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3908 3901
3909 I915_WRITE(VLV_MASTER_IER, 0); 3902 I915_WRITE(VLV_MASTER_IER, 0);
3910 3903
3911 intel_hpd_irq_uninstall(dev_priv);
3912
3913 for_each_pipe(pipe) 3904 for_each_pipe(pipe)
3914 I915_WRITE(PIPESTAT(pipe), 0xffff); 3905 I915_WRITE(PIPESTAT(pipe), 0xffff);
3915 3906
@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3988 if (!dev_priv) 3979 if (!dev_priv)
3989 return; 3980 return;
3990 3981
3991 intel_hpd_irq_uninstall(dev_priv);
3992
3993 ironlake_irq_reset(dev); 3982 ironlake_irq_reset(dev);
3994} 3983}
3995 3984
@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
4360 struct drm_i915_private *dev_priv = dev->dev_private; 4349 struct drm_i915_private *dev_priv = dev->dev_private;
4361 int pipe; 4350 int pipe;
4362 4351
4363 intel_hpd_irq_uninstall(dev_priv);
4364
4365 if (I915_HAS_HOTPLUG(dev)) { 4352 if (I915_HAS_HOTPLUG(dev)) {
4366 I915_WRITE(PORT_HOTPLUG_EN, 0); 4353 I915_WRITE(PORT_HOTPLUG_EN, 0);
4367 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4354 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
4598 if (!dev_priv) 4585 if (!dev_priv)
4599 return; 4586 return;
4600 4587
4601 intel_hpd_irq_uninstall(dev_priv);
4602
4603 I915_WRITE(PORT_HOTPLUG_EN, 0); 4588 I915_WRITE(PORT_HOTPLUG_EN, 0);
4604 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4589 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4605 4590
@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
4615 I915_WRITE(IIR, I915_READ(IIR)); 4600 I915_WRITE(IIR, I915_READ(IIR));
4616} 4601}
4617 4602
4618static void intel_hpd_irq_reenable(unsigned long data) 4603static void intel_hpd_irq_reenable(struct work_struct *work)
4619{ 4604{
4620 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4605 struct drm_i915_private *dev_priv =
4606 container_of(work, typeof(*dev_priv),
4607 hotplug_reenable_work.work);
4621 struct drm_device *dev = dev_priv->dev; 4608 struct drm_device *dev = dev_priv->dev;
4622 struct drm_mode_config *mode_config = &dev->mode_config; 4609 struct drm_mode_config *mode_config = &dev->mode_config;
4623 unsigned long irqflags; 4610 unsigned long irqflags;
4624 int i; 4611 int i;
4625 4612
4613 intel_runtime_pm_get(dev_priv);
4614
4626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4627 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4616 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4628 struct drm_connector *connector; 4617 struct drm_connector *connector;
@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
4648 if (dev_priv->display.hpd_irq_setup) 4637 if (dev_priv->display.hpd_irq_setup)
4649 dev_priv->display.hpd_irq_setup(dev); 4638 dev_priv->display.hpd_irq_setup(dev);
4650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4639 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4640
4641 intel_runtime_pm_put(dev_priv);
4651} 4642}
4652 4643
4653void intel_irq_init(struct drm_device *dev) 4644void intel_irq_init(struct drm_device *dev)
@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
4670 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4661 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4671 i915_hangcheck_elapsed, 4662 i915_hangcheck_elapsed,
4672 (unsigned long) dev); 4663 (unsigned long) dev);
4673 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4664 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4674 (unsigned long) dev_priv); 4665 intel_hpd_irq_reenable);
4675 4666
4676 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4667 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4677 4668
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2efaf8e8d9c4..e8abfce40976 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
699 goto out; 699 goto out;
700 } 700 }
701 701
702 drm_modeset_acquire_init(&ctx, 0);
703
702 /* for pre-945g platforms use load detect */ 704 /* for pre-945g platforms use load detect */
703 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { 705 if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
704 if (intel_crt_detect_ddc(connector)) 706 if (intel_crt_detect_ddc(connector))
705 status = connector_status_connected; 707 status = connector_status_connected;
706 else 708 else
707 status = intel_crt_load_detect(crt); 709 status = intel_crt_load_detect(crt);
708 intel_release_load_detect_pipe(connector, &tmp, &ctx); 710 intel_release_load_detect_pipe(connector, &tmp);
709 } else 711 } else
710 status = connector_status_unknown; 712 status = connector_status_unknown;
711 713
714 drm_modeset_drop_locks(&ctx);
715 drm_modeset_acquire_fini(&ctx);
716
712out: 717out:
713 intel_display_power_put(dev_priv, power_domain); 718 intel_display_power_put(dev_priv, power_domain);
714 return status; 719 return status;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 018fb7222f60..d074d704f458 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
8462 connector->base.id, connector->name, 8462 connector->base.id, connector->name,
8463 encoder->base.id, encoder->name); 8463 encoder->base.id, encoder->name);
8464 8464
8465 drm_modeset_acquire_init(ctx, 0);
8466
8467retry: 8465retry:
8468 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8466 ret = drm_modeset_lock(&config->connection_mutex, ctx);
8469 if (ret) 8467 if (ret)
@@ -8502,10 +8500,14 @@ retry:
8502 i++; 8500 i++;
8503 if (!(encoder->possible_crtcs & (1 << i))) 8501 if (!(encoder->possible_crtcs & (1 << i)))
8504 continue; 8502 continue;
8505 if (!possible_crtc->enabled) { 8503 if (possible_crtc->enabled)
8506 crtc = possible_crtc; 8504 continue;
8507 break; 8505 /* This can occur when applying the pipe A quirk on resume. */
8508 } 8506 if (to_intel_crtc(possible_crtc)->new_enabled)
8507 continue;
8508
8509 crtc = possible_crtc;
8510 break;
8509 } 8511 }
8510 8512
8511 /* 8513 /*
@@ -8574,15 +8576,11 @@ fail_unlock:
8574 goto retry; 8576 goto retry;
8575 } 8577 }
8576 8578
8577 drm_modeset_drop_locks(ctx);
8578 drm_modeset_acquire_fini(ctx);
8579
8580 return false; 8579 return false;
8581} 8580}
8582 8581
8583void intel_release_load_detect_pipe(struct drm_connector *connector, 8582void intel_release_load_detect_pipe(struct drm_connector *connector,
8584 struct intel_load_detect_pipe *old, 8583 struct intel_load_detect_pipe *old)
8585 struct drm_modeset_acquire_ctx *ctx)
8586{ 8584{
8587 struct intel_encoder *intel_encoder = 8585 struct intel_encoder *intel_encoder =
8588 intel_attached_encoder(connector); 8586 intel_attached_encoder(connector);
@@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
8606 drm_framebuffer_unreference(old->release_fb); 8604 drm_framebuffer_unreference(old->release_fb);
8607 } 8605 }
8608 8606
8609 goto unlock;
8610 return; 8607 return;
8611 } 8608 }
8612 8609
8613 /* Switch crtc and encoder back off if necessary */ 8610 /* Switch crtc and encoder back off if necessary */
8614 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8611 if (old->dpms_mode != DRM_MODE_DPMS_ON)
8615 connector->funcs->dpms(connector, old->dpms_mode); 8612 connector->funcs->dpms(connector, old->dpms_mode);
8616
8617unlock:
8618 drm_modeset_drop_locks(ctx);
8619 drm_modeset_acquire_fini(ctx);
8620} 8613}
8621 8614
8622static int i9xx_pll_refclk(struct drm_device *dev, 8615static int i9xx_pll_refclk(struct drm_device *dev,
@@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11700 }; 11693 };
11701 const struct drm_rect clip = { 11694 const struct drm_rect clip = {
11702 /* integer pixels */ 11695 /* integer pixels */
11703 .x2 = intel_crtc->config.pipe_src_w, 11696 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11704 .y2 = intel_crtc->config.pipe_src_h, 11697 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11705 }; 11698 };
11706 bool visible; 11699 bool visible;
11707 int ret; 11700 int ret;
@@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
12659 struct intel_connector *connector; 12652 struct intel_connector *connector;
12660 struct drm_connector *crt = NULL; 12653 struct drm_connector *crt = NULL;
12661 struct intel_load_detect_pipe load_detect_temp; 12654 struct intel_load_detect_pipe load_detect_temp;
12662 struct drm_modeset_acquire_ctx ctx; 12655 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
12663 12656
12664 /* We can't just switch on the pipe A, we need to set things up with a 12657 /* We can't just switch on the pipe A, we need to set things up with a
12665 * proper mode and output configuration. As a gross hack, enable pipe A 12658 * proper mode and output configuration. As a gross hack, enable pipe A
@@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
12676 if (!crt) 12669 if (!crt)
12677 return; 12670 return;
12678 12671
12679 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) 12672 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
12680 intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); 12673 intel_release_load_detect_pipe(crt, &load_detect_temp);
12681
12682
12683} 12674}
12684 12675
12685static bool 12676static bool
@@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
13112 * experience fancy races otherwise. 13103 * experience fancy races otherwise.
13113 */ 13104 */
13114 drm_irq_uninstall(dev); 13105 drm_irq_uninstall(dev);
13115 cancel_work_sync(&dev_priv->hotplug_work); 13106 intel_hpd_cancel_work(dev_priv);
13116 dev_priv->pm._irqs_disabled = true; 13107 dev_priv->pm._irqs_disabled = true;
13117 13108
13118 /* 13109 /*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee3942f0b068..67cfed6d911a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
3553 if (WARN_ON(!intel_encoder->base.crtc)) 3553 if (WARN_ON(!intel_encoder->base.crtc))
3554 return; 3554 return;
3555 3555
3556 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3557 return;
3558
3556 /* Try to read receiver status if the link appears to be up */ 3559 /* Try to read receiver status if the link appears to be up */
3557 if (!intel_dp_get_link_status(intel_dp, link_status)) { 3560 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3558 return; 3561 return;
@@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4003 kfree(intel_dig_port); 4006 kfree(intel_dig_port);
4004} 4007}
4005 4008
4009static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4010{
4011 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4012
4013 if (!is_edp(intel_dp))
4014 return;
4015
4016 edp_panel_vdd_off_sync(intel_dp);
4017}
4018
4006static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4019static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4007{ 4020{
4008 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); 4021 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
@@ -4037,15 +4050,21 @@ bool
4037intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) 4050intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4038{ 4051{
4039 struct intel_dp *intel_dp = &intel_dig_port->dp; 4052 struct intel_dp *intel_dp = &intel_dig_port->dp;
4053 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4040 struct drm_device *dev = intel_dig_port->base.base.dev; 4054 struct drm_device *dev = intel_dig_port->base.base.dev;
4041 struct drm_i915_private *dev_priv = dev->dev_private; 4055 struct drm_i915_private *dev_priv = dev->dev_private;
4042 int ret; 4056 enum intel_display_power_domain power_domain;
4057 bool ret = true;
4058
4043 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4059 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4044 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4060 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4045 4061
4046 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, 4062 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4047 long_hpd ? "long" : "short"); 4063 long_hpd ? "long" : "short");
4048 4064
4065 power_domain = intel_display_port_power_domain(intel_encoder);
4066 intel_display_power_get(dev_priv, power_domain);
4067
4049 if (long_hpd) { 4068 if (long_hpd) {
4050 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 4069 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4051 goto mst_fail; 4070 goto mst_fail;
@@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4061 4080
4062 } else { 4081 } else {
4063 if (intel_dp->is_mst) { 4082 if (intel_dp->is_mst) {
4064 ret = intel_dp_check_mst_status(intel_dp); 4083 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4065 if (ret == -EINVAL)
4066 goto mst_fail; 4084 goto mst_fail;
4067 } 4085 }
4068 4086
@@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4076 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4094 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4077 } 4095 }
4078 } 4096 }
4079 return false; 4097 ret = false;
4098 goto put_power;
4080mst_fail: 4099mst_fail:
4081 /* if we were in MST mode, and device is not there get out of MST mode */ 4100 /* if we were in MST mode, and device is not there get out of MST mode */
4082 if (intel_dp->is_mst) { 4101 if (intel_dp->is_mst) {
@@ -4084,7 +4103,10 @@ mst_fail:
4084 intel_dp->is_mst = false; 4103 intel_dp->is_mst = false;
4085 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 4104 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4086 } 4105 }
4087 return true; 4106put_power:
4107 intel_display_power_put(dev_priv, power_domain);
4108
4109 return ret;
4088} 4110}
4089 4111
4090/* Return which DP Port should be selected for Transcoder DP control */ 4112/* Return which DP Port should be selected for Transcoder DP control */
@@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4722 intel_encoder->disable = intel_disable_dp; 4744 intel_encoder->disable = intel_disable_dp;
4723 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4745 intel_encoder->get_hw_state = intel_dp_get_hw_state;
4724 intel_encoder->get_config = intel_dp_get_config; 4746 intel_encoder->get_config = intel_dp_get_config;
4747 intel_encoder->suspend = intel_dp_encoder_suspend;
4725 if (IS_CHERRYVIEW(dev)) { 4748 if (IS_CHERRYVIEW(dev)) {
4726 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; 4749 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4727 intel_encoder->pre_enable = chv_pre_enable_dp; 4750 intel_encoder->pre_enable = chv_pre_enable_dp;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4b2664bd5b81..b8c8bbd8e5f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -153,6 +153,12 @@ struct intel_encoder {
153 * be set correctly before calling this function. */ 153 * be set correctly before calling this function. */
154 void (*get_config)(struct intel_encoder *, 154 void (*get_config)(struct intel_encoder *,
155 struct intel_crtc_config *pipe_config); 155 struct intel_crtc_config *pipe_config);
156 /*
157 * Called during system suspend after all pending requests for the
158 * encoder are flushed (for example for DP AUX transactions) and
159 * device interrupts are disabled.
160 */
161 void (*suspend)(struct intel_encoder *);
156 int crtc_mask; 162 int crtc_mask;
157 enum hpd_pin hpd_pin; 163 enum hpd_pin hpd_pin;
158}; 164};
@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
830 struct intel_load_detect_pipe *old, 836 struct intel_load_detect_pipe *old,
831 struct drm_modeset_acquire_ctx *ctx); 837 struct drm_modeset_acquire_ctx *ctx);
832void intel_release_load_detect_pipe(struct drm_connector *connector, 838void intel_release_load_detect_pipe(struct drm_connector *connector,
833 struct intel_load_detect_pipe *old, 839 struct intel_load_detect_pipe *old);
834 struct drm_modeset_acquire_ctx *ctx);
835int intel_pin_and_fence_fb_obj(struct drm_device *dev, 840int intel_pin_and_fence_fb_obj(struct drm_device *dev,
836 struct drm_i915_gem_object *obj, 841 struct drm_i915_gem_object *obj,
837 struct intel_engine_cs *pipelined); 842 struct intel_engine_cs *pipelined);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e211eef4b7e4..32186a656816 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1323 struct intel_load_detect_pipe tmp; 1323 struct intel_load_detect_pipe tmp;
1324 struct drm_modeset_acquire_ctx ctx; 1324 struct drm_modeset_acquire_ctx ctx;
1325 1325
1326 drm_modeset_acquire_init(&ctx, 0);
1327
1326 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { 1328 if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
1327 type = intel_tv_detect_type(intel_tv, connector); 1329 type = intel_tv_detect_type(intel_tv, connector);
1328 intel_release_load_detect_pipe(connector, &tmp, &ctx); 1330 intel_release_load_detect_pipe(connector, &tmp);
1329 } else 1331 } else
1330 return connector_status_unknown; 1332 return connector_status_unknown;
1333
1334 drm_modeset_drop_locks(&ctx);
1335 drm_modeset_acquire_fini(&ctx);
1331 } else 1336 } else
1332 return connector->status; 1337 return connector->status;
1333 1338
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0013ad0db9ef..f77b7135ee4c 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ 78 atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
79 si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ 79 si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 022561e28707..d416bb2ff48d 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
869 WREG32_SMC(CG_THERMAL_CTRL, tmp); 869 WREG32_SMC(CG_THERMAL_CTRL, tmp);
870#endif 870#endif
871 871
872 rdev->pm.dpm.thermal.min_temp = low_temp;
873 rdev->pm.dpm.thermal.max_temp = high_temp;
874
872 return 0; 875 return 0;
873} 876}
874 877
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b625646bf3e2..79a5a5519bd6 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3483 u32 mc_shared_chmap, mc_arb_ramcfg; 3483 u32 mc_shared_chmap, mc_arb_ramcfg;
3484 u32 hdp_host_path_cntl; 3484 u32 hdp_host_path_cntl;
3485 u32 tmp; 3485 u32 tmp;
3486 int i, j, k; 3486 int i, j;
3487 3487
3488 switch (rdev->family) { 3488 switch (rdev->family) {
3489 case CHIP_BONAIRE: 3489 case CHIP_BONAIRE:
@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3544 (rdev->pdev->device == 0x130B) || 3544 (rdev->pdev->device == 0x130B) ||
3545 (rdev->pdev->device == 0x130E) || 3545 (rdev->pdev->device == 0x130E) ||
3546 (rdev->pdev->device == 0x1315) || 3546 (rdev->pdev->device == 0x1315) ||
3547 (rdev->pdev->device == 0x1318) ||
3547 (rdev->pdev->device == 0x131B)) { 3548 (rdev->pdev->device == 0x131B)) {
3548 rdev->config.cik.max_cu_per_sh = 4; 3549 rdev->config.cik.max_cu_per_sh = 4;
3549 rdev->config.cik.max_backends_per_se = 1; 3550 rdev->config.cik.max_backends_per_se = 1;
@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
3672 rdev->config.cik.max_sh_per_se, 3673 rdev->config.cik.max_sh_per_se,
3673 rdev->config.cik.max_backends_per_se); 3674 rdev->config.cik.max_backends_per_se);
3674 3675
3676 rdev->config.cik.active_cus = 0;
3675 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 3677 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
3676 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 3678 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
3677 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { 3679 rdev->config.cik.active_cus +=
3678 rdev->config.cik.active_cus += 3680 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3679 hweight32(cik_get_cu_active_bitmap(rdev, i, j));
3680 }
3681 } 3681 }
3682 } 3682 }
3683 3683
@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3801 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 3801 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3802 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); 3802 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
3803 radeon_ring_write(ring, 0xDEADBEEF); 3803 radeon_ring_write(ring, 0xDEADBEEF);
3804 radeon_ring_unlock_commit(rdev, ring); 3804 radeon_ring_unlock_commit(rdev, ring, false);
3805 3805
3806 for (i = 0; i < rdev->usec_timeout; i++) { 3806 for (i = 0; i < rdev->usec_timeout; i++) {
3807 tmp = RREG32(scratch); 3807 tmp = RREG32(scratch);
@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3920 radeon_ring_write(ring, 0); 3920 radeon_ring_write(ring, 0);
3921} 3921}
3922 3922
3923/**
3924 * cik_semaphore_ring_emit - emit a semaphore on the CP ring
3925 *
3926 * @rdev: radeon_device pointer
3927 * @ring: radeon ring buffer object
3928 * @semaphore: radeon semaphore object
3929 * @emit_wait: Is this a sempahore wait?
3930 *
3931 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
3932 * from running ahead of semaphore waits.
3933 */
3923bool cik_semaphore_ring_emit(struct radeon_device *rdev, 3934bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3924 struct radeon_ring *ring, 3935 struct radeon_ring *ring,
3925 struct radeon_semaphore *semaphore, 3936 struct radeon_semaphore *semaphore,
@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
3932 radeon_ring_write(ring, lower_32_bits(addr)); 3943 radeon_ring_write(ring, lower_32_bits(addr));
3933 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); 3944 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3934 3945
3946 if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
3947 /* Prevent the PFP from running ahead of the semaphore wait */
3948 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3949 radeon_ring_write(ring, 0x0);
3950 }
3951
3935 return true; 3952 return true;
3936} 3953}
3937 3954
@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
4004 return r; 4021 return r;
4005 } 4022 }
4006 4023
4007 radeon_ring_unlock_commit(rdev, ring); 4024 radeon_ring_unlock_commit(rdev, ring, false);
4008 radeon_semaphore_free(rdev, &sem, *fence); 4025 radeon_semaphore_free(rdev, &sem, *fence);
4009 4026
4010 return r; 4027 return r;
@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
4103 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); 4120 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
4104 ib.ptr[2] = 0xDEADBEEF; 4121 ib.ptr[2] = 0xDEADBEEF;
4105 ib.length_dw = 3; 4122 ib.length_dw = 3;
4106 r = radeon_ib_schedule(rdev, &ib, NULL); 4123 r = radeon_ib_schedule(rdev, &ib, NULL, false);
4107 if (r) { 4124 if (r) {
4108 radeon_scratch_free(rdev, scratch); 4125 radeon_scratch_free(rdev, scratch);
4109 radeon_ib_free(rdev, &ib); 4126 radeon_ib_free(rdev, &ib);
@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
4324 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 4341 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
4325 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 4342 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
4326 4343
4327 radeon_ring_unlock_commit(rdev, ring); 4344 radeon_ring_unlock_commit(rdev, ring, false);
4328 4345
4329 return 0; 4346 return 0;
4330} 4347}
@@ -5958,14 +5975,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5958 5975
5959 /* update SH_MEM_* regs */ 5976 /* update SH_MEM_* regs */
5960 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5977 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5961 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5978 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5962 WRITE_DATA_DST_SEL(0))); 5979 WRITE_DATA_DST_SEL(0)));
5963 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 5980 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5964 radeon_ring_write(ring, 0); 5981 radeon_ring_write(ring, 0);
5965 radeon_ring_write(ring, VMID(vm->id)); 5982 radeon_ring_write(ring, VMID(vm->id));
5966 5983
5967 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); 5984 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
5968 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5985 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5969 WRITE_DATA_DST_SEL(0))); 5986 WRITE_DATA_DST_SEL(0)));
5970 radeon_ring_write(ring, SH_MEM_BASES >> 2); 5987 radeon_ring_write(ring, SH_MEM_BASES >> 2);
5971 radeon_ring_write(ring, 0); 5988 radeon_ring_write(ring, 0);
@@ -5976,7 +5993,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5976 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ 5993 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
5977 5994
5978 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5995 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5979 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5996 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5980 WRITE_DATA_DST_SEL(0))); 5997 WRITE_DATA_DST_SEL(0)));
5981 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 5998 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5982 radeon_ring_write(ring, 0); 5999 radeon_ring_write(ring, 0);
@@ -5987,7 +6004,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5987 6004
5988 /* bits 0-15 are the VM contexts0-15 */ 6005 /* bits 0-15 are the VM contexts0-15 */
5989 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6006 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5990 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 6007 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5991 WRITE_DATA_DST_SEL(0))); 6008 WRITE_DATA_DST_SEL(0)));
5992 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 6009 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5993 radeon_ring_write(ring, 0); 6010 radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index bcf480510ac2..192278bc993c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
596 return r; 596 return r;
597 } 597 }
598 598
599 radeon_ring_unlock_commit(rdev, ring); 599 radeon_ring_unlock_commit(rdev, ring, false);
600 radeon_semaphore_free(rdev, &sem, *fence); 600 radeon_semaphore_free(rdev, &sem, *fence);
601 601
602 return r; 602 return r;
@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
638 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); 638 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
639 radeon_ring_write(ring, 1); /* number of DWs to follow */ 639 radeon_ring_write(ring, 1); /* number of DWs to follow */
640 radeon_ring_write(ring, 0xDEADBEEF); 640 radeon_ring_write(ring, 0xDEADBEEF);
641 radeon_ring_unlock_commit(rdev, ring); 641 radeon_ring_unlock_commit(rdev, ring, false);
642 642
643 for (i = 0; i < rdev->usec_timeout; i++) { 643 for (i = 0; i < rdev->usec_timeout; i++) {
644 tmp = readl(ptr); 644 tmp = readl(ptr);
@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
695 ib.ptr[4] = 0xDEADBEEF; 695 ib.ptr[4] = 0xDEADBEEF;
696 ib.length_dw = 5; 696 ib.length_dw = 5;
697 697
698 r = radeon_ib_schedule(rdev, &ib, NULL); 698 r = radeon_ib_schedule(rdev, &ib, NULL, false);
699 if (r) { 699 if (r) {
700 radeon_ib_free(rdev, &ib); 700 radeon_ib_free(rdev, &ib);
701 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 701 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4fedd14e670a..dbca60c7d097 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2870 radeon_ring_write(ring, 0); 2870 radeon_ring_write(ring, 0);
2871 radeon_ring_write(ring, 0); 2871 radeon_ring_write(ring, 0);
2872 radeon_ring_unlock_commit(rdev, ring); 2872 radeon_ring_unlock_commit(rdev, ring, false);
2873 2873
2874 cp_me = 0xff; 2874 cp_me = 0xff;
2875 WREG32(CP_ME_CNTL, cp_me); 2875 WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2913 radeon_ring_write(ring, 0x00000010); /* */ 2913 radeon_ring_write(ring, 0x00000010); /* */
2914 2914
2915 radeon_ring_unlock_commit(rdev, ring); 2915 radeon_ring_unlock_commit(rdev, ring, false);
2916 2916
2917 return 0; 2917 return 0;
2918} 2918}
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 478caefe0fef..afaba388c36d 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
155 return r; 155 return r;
156 } 156 }
157 157
158 radeon_ring_unlock_commit(rdev, ring); 158 radeon_ring_unlock_commit(rdev, ring, false);
159 radeon_semaphore_free(rdev, &sem, *fence); 159 radeon_semaphore_free(rdev, &sem, *fence);
160 160
161 return r; 161 return r;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9ef8c38f2d66..8b58e11b64fa 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1438 return kv_enable_uvd_dpm(rdev, !gate); 1438 return kv_enable_uvd_dpm(rdev, !gate);
1439} 1439}
1440 1440
1441static u8 kv_get_vce_boot_level(struct radeon_device *rdev) 1441static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
1442{ 1442{
1443 u8 i; 1443 u8 i;
1444 struct radeon_vce_clock_voltage_dependency_table *table = 1444 struct radeon_vce_clock_voltage_dependency_table *table =
1445 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1445 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1446 1446
1447 for (i = 0; i < table->count; i++) { 1447 for (i = 0; i < table->count; i++) {
1448 if (table->entries[i].evclk >= 0) /* XXX */ 1448 if (table->entries[i].evclk >= evclk)
1449 break; 1449 break;
1450 } 1450 }
1451 1451
@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
1468 if (pi->caps_stable_p_state) 1468 if (pi->caps_stable_p_state)
1469 pi->vce_boot_level = table->count - 1; 1469 pi->vce_boot_level = table->count - 1;
1470 else 1470 else
1471 pi->vce_boot_level = kv_get_vce_boot_level(rdev); 1471 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
1472 1472
1473 ret = kv_copy_bytes_to_smc(rdev, 1473 ret = kv_copy_bytes_to_smc(rdev,
1474 pi->dpm_table_start + 1474 pi->dpm_table_start +
@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)
2726 pi->caps_sclk_ds = true; 2726 pi->caps_sclk_ds = true;
2727 pi->enable_auto_thermal_throttling = true; 2727 pi->enable_auto_thermal_throttling = true;
2728 pi->disable_nb_ps3_in_battery = false; 2728 pi->disable_nb_ps3_in_battery = false;
2729 pi->bapm_enable = true; 2729 if (radeon_bapm == 0)
2730 pi->bapm_enable = false;
2731 else
2732 pi->bapm_enable = true;
2730 pi->voltage_drop_t = 0; 2733 pi->voltage_drop_t = 0;
2731 pi->caps_sclk_throttle_low_notification = false; 2734 pi->caps_sclk_throttle_low_notification = false;
2732 pi->caps_fps = false; /* true? */ 2735 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 327b85f7fd0d..ba89375f197f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
1505 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1505 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1506 radeon_ring_write(ring, 0); 1506 radeon_ring_write(ring, 0);
1507 radeon_ring_write(ring, 0); 1507 radeon_ring_write(ring, 0);
1508 radeon_ring_unlock_commit(rdev, ring); 1508 radeon_ring_unlock_commit(rdev, ring, false);
1509 1509
1510 cayman_cp_enable(rdev, true); 1510 cayman_cp_enable(rdev, true);
1511 1511
@@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
1547 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1547 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1548 radeon_ring_write(ring, 0x00000010); /* */ 1548 radeon_ring_write(ring, 0x00000010); /* */
1549 1549
1550 radeon_ring_unlock_commit(rdev, ring); 1550 radeon_ring_unlock_commit(rdev, ring, false);
1551 1551
1552 /* XXX init other rings */ 1552 /* XXX init other rings */
1553 1553
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 04b5940b8923..4c5ec44ff328 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
925 if (fence) { 925 if (fence) {
926 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 926 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
927 } 927 }
928 radeon_ring_unlock_commit(rdev, ring); 928 radeon_ring_unlock_commit(rdev, ring, false);
929 return r; 929 return r;
930} 930}
931 931
@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
958 RADEON_ISYNC_ANY3D_IDLE2D | 958 RADEON_ISYNC_ANY3D_IDLE2D |
959 RADEON_ISYNC_WAIT_IDLEGUI | 959 RADEON_ISYNC_WAIT_IDLEGUI |
960 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 960 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
961 radeon_ring_unlock_commit(rdev, ring); 961 radeon_ring_unlock_commit(rdev, ring, false);
962} 962}
963 963
964 964
@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3638 } 3638 }
3639 radeon_ring_write(ring, PACKET0(scratch, 0)); 3639 radeon_ring_write(ring, PACKET0(scratch, 0));
3640 radeon_ring_write(ring, 0xDEADBEEF); 3640 radeon_ring_write(ring, 0xDEADBEEF);
3641 radeon_ring_unlock_commit(rdev, ring); 3641 radeon_ring_unlock_commit(rdev, ring, false);
3642 for (i = 0; i < rdev->usec_timeout; i++) { 3642 for (i = 0; i < rdev->usec_timeout; i++) {
3643 tmp = RREG32(scratch); 3643 tmp = RREG32(scratch);
3644 if (tmp == 0xDEADBEEF) { 3644 if (tmp == 0xDEADBEEF) {
@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3700 ib.ptr[6] = PACKET2(0); 3700 ib.ptr[6] = PACKET2(0);
3701 ib.ptr[7] = PACKET2(0); 3701 ib.ptr[7] = PACKET2(0);
3702 ib.length_dw = 8; 3702 ib.length_dw = 8;
3703 r = radeon_ib_schedule(rdev, &ib, NULL); 3703 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3704 if (r) { 3704 if (r) {
3705 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3705 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3706 goto free_ib; 3706 goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 58f0473aa73f..67780374a652 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
121 if (fence) { 121 if (fence) {
122 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 122 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
123 } 123 }
124 radeon_ring_unlock_commit(rdev, ring); 124 radeon_ring_unlock_commit(rdev, ring, false);
125 return r; 125 return r;
126} 126}
127 127
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 75b30338c226..1bc4704034ce 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
295 radeon_ring_write(ring, 295 radeon_ring_write(ring,
296 R300_GEOMETRY_ROUND_NEAREST | 296 R300_GEOMETRY_ROUND_NEAREST |
297 R300_COLOR_ROUND_NEAREST); 297 R300_COLOR_ROUND_NEAREST);
298 radeon_ring_unlock_commit(rdev, ring); 298 radeon_ring_unlock_commit(rdev, ring, false);
299} 299}
300 300
301static void r300_errata(struct radeon_device *rdev) 301static void r300_errata(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 802b19220a21..2828605aef3f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); 219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
220 radeon_ring_write(ring, rdev->config.r300.resync_scratch); 220 radeon_ring_write(ring, rdev->config.r300.resync_scratch);
221 radeon_ring_write(ring, 0xDEADBEEF); 221 radeon_ring_write(ring, 0xDEADBEEF);
222 radeon_ring_unlock_commit(rdev, ring); 222 radeon_ring_unlock_commit(rdev, ring, false);
223} 223}
224 224
225static void r420_cp_errata_fini(struct radeon_device *rdev) 225static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
232 radeon_ring_lock(rdev, ring, 8); 232 radeon_ring_lock(rdev, ring, 8);
233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
234 radeon_ring_write(ring, R300_RB3D_DC_FINISH); 234 radeon_ring_write(ring, R300_RB3D_DC_FINISH);
235 radeon_ring_unlock_commit(rdev, ring); 235 radeon_ring_unlock_commit(rdev, ring, false);
236 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); 236 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
237} 237}
238 238
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c70a504d96af..e8bf0ea2dade 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2547,7 +2547,7 @@ int r600_cp_start(struct radeon_device *rdev)
2547 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2547 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2548 radeon_ring_write(ring, 0); 2548 radeon_ring_write(ring, 0);
2549 radeon_ring_write(ring, 0); 2549 radeon_ring_write(ring, 0);
2550 radeon_ring_unlock_commit(rdev, ring); 2550 radeon_ring_unlock_commit(rdev, ring, false);
2551 2551
2552 cp_me = 0xff; 2552 cp_me = 0xff;
2553 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2553 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2683,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2683 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2683 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2684 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2684 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2685 radeon_ring_write(ring, 0xDEADBEEF); 2685 radeon_ring_write(ring, 0xDEADBEEF);
2686 radeon_ring_unlock_commit(rdev, ring); 2686 radeon_ring_unlock_commit(rdev, ring, false);
2687 for (i = 0; i < rdev->usec_timeout; i++) { 2687 for (i = 0; i < rdev->usec_timeout; i++) {
2688 tmp = RREG32(scratch); 2688 tmp = RREG32(scratch);
2689 if (tmp == 0xDEADBEEF) 2689 if (tmp == 0xDEADBEEF)
@@ -2753,6 +2753,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2753 } 2753 }
2754} 2754}
2755 2755
2756/**
2757 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2758 *
2759 * @rdev: radeon_device pointer
2760 * @ring: radeon ring buffer object
2761 * @semaphore: radeon semaphore object
2762 * @emit_wait: Is this a sempahore wait?
2763 *
2764 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2765 * from running ahead of semaphore waits.
2766 */
2756bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2767bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2757 struct radeon_ring *ring, 2768 struct radeon_ring *ring,
2758 struct radeon_semaphore *semaphore, 2769 struct radeon_semaphore *semaphore,
@@ -2768,6 +2779,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2768 radeon_ring_write(ring, lower_32_bits(addr)); 2779 radeon_ring_write(ring, lower_32_bits(addr));
2769 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2780 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2770 2781
2782 /* PFP_SYNC_ME packet only exists on 7xx+ */
2783 if (emit_wait && (rdev->family >= CHIP_RV770)) {
2784 /* Prevent the PFP from running ahead of the semaphore wait */
2785 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2786 radeon_ring_write(ring, 0x0);
2787 }
2788
2771 return true; 2789 return true;
2772} 2790}
2773 2791
@@ -2845,7 +2863,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2845 return r; 2863 return r;
2846 } 2864 }
2847 2865
2848 radeon_ring_unlock_commit(rdev, ring); 2866 radeon_ring_unlock_commit(rdev, ring, false);
2849 radeon_semaphore_free(rdev, &sem, *fence); 2867 radeon_semaphore_free(rdev, &sem, *fence);
2850 2868
2851 return r; 2869 return r;
@@ -3165,7 +3183,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3165 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3183 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3166 ib.ptr[2] = 0xDEADBEEF; 3184 ib.ptr[2] = 0xDEADBEEF;
3167 ib.length_dw = 3; 3185 ib.length_dw = 3;
3168 r = radeon_ib_schedule(rdev, &ib, NULL); 3186 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3169 if (r) { 3187 if (r) {
3170 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3188 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3171 goto free_ib; 3189 goto free_ib;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 4969cef44a19..51fd98553eaf 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
261 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 261 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
262 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 262 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
263 radeon_ring_write(ring, 0xDEADBEEF); 263 radeon_ring_write(ring, 0xDEADBEEF);
264 radeon_ring_unlock_commit(rdev, ring); 264 radeon_ring_unlock_commit(rdev, ring, false);
265 265
266 for (i = 0; i < rdev->usec_timeout; i++) { 266 for (i = 0; i < rdev->usec_timeout; i++) {
267 tmp = readl(ptr); 267 tmp = readl(ptr);
@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
368 ib.ptr[3] = 0xDEADBEEF; 368 ib.ptr[3] = 0xDEADBEEF;
369 ib.length_dw = 4; 369 ib.length_dw = 4;
370 370
371 r = radeon_ib_schedule(rdev, &ib, NULL); 371 r = radeon_ib_schedule(rdev, &ib, NULL, false);
372 if (r) { 372 if (r) {
373 radeon_ib_free(rdev, &ib); 373 radeon_ib_free(rdev, &ib);
374 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 374 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
493 return r; 493 return r;
494 } 494 }
495 495
496 radeon_ring_unlock_commit(rdev, ring); 496 radeon_ring_unlock_commit(rdev, ring, false);
497 radeon_semaphore_free(rdev, &sem, *fence); 497 radeon_semaphore_free(rdev, &sem, *fence);
498 498
499 return r; 499 return r;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f94e7a9afe75..0c4a7d8d93e0 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1597,6 +1597,7 @@
1597 */ 1597 */
1598# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) 1598# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1599# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) 1599# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
1600#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
1600#define PACKET3_SURFACE_SYNC 0x43 1601#define PACKET3_SURFACE_SYNC 0x43
1601# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1602# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1602# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ 1603# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9e1732eb402c..b281886f6f51 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -105,6 +105,7 @@ extern int radeon_vm_size;
105extern int radeon_vm_block_size; 105extern int radeon_vm_block_size;
106extern int radeon_deep_color; 106extern int radeon_deep_color;
107extern int radeon_use_pflipirq; 107extern int radeon_use_pflipirq;
108extern int radeon_bapm;
108 109
109/* 110/*
110 * Copy from radeon_drv.h so we don't have to include both and have conflicting 111 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -967,7 +968,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
967 unsigned size); 968 unsigned size);
968void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 969void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
969int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 970int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
970 struct radeon_ib *const_ib); 971 struct radeon_ib *const_ib, bool hdp_flush);
971int radeon_ib_pool_init(struct radeon_device *rdev); 972int radeon_ib_pool_init(struct radeon_device *rdev);
972void radeon_ib_pool_fini(struct radeon_device *rdev); 973void radeon_ib_pool_fini(struct radeon_device *rdev);
973int radeon_ib_ring_tests(struct radeon_device *rdev); 974int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -977,8 +978,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
977void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 978void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
978int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 979int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
979int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 980int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
980void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 981void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
981void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 982 bool hdp_flush);
983void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
984 bool hdp_flush);
982void radeon_ring_undo(struct radeon_ring *ring); 985void radeon_ring_undo(struct radeon_ring *ring);
983void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 986void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
984int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 987int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ee712c199b25..83f382e8e40e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
132 * the buffers used for read only, which doubles the range 132 * the buffers used for read only, which doubles the range
133 * to 0 to 31. 32 is reserved for the kernel driver. 133 * to 0 to 31. 32 is reserved for the kernel driver.
134 */ 134 */
135 priority = (r->flags & 0xf) * 2 + !!r->write_domain; 135 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
136 + !!r->write_domain;
136 137
137 /* the first reloc of an UVD job is the msg and that must be in 138 /* the first reloc of an UVD job is the msg and that must be in
138 VRAM, also but everything into VRAM on AGP cards to avoid 139 VRAM, also but everything into VRAM on AGP cards to avoid
@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
450 radeon_vce_note_usage(rdev); 451 radeon_vce_note_usage(rdev);
451 452
452 radeon_cs_sync_rings(parser); 453 radeon_cs_sync_rings(parser);
453 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 454 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
454 if (r) { 455 if (r) {
455 DRM_ERROR("Failed to schedule IB !\n"); 456 DRM_ERROR("Failed to schedule IB !\n");
456 } 457 }
@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
541 542
542 if ((rdev->family >= CHIP_TAHITI) && 543 if ((rdev->family >= CHIP_TAHITI) &&
543 (parser->chunk_const_ib_idx != -1)) { 544 (parser->chunk_const_ib_idx != -1)) {
544 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 545 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
545 } else { 546 } else {
546 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 547 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
547 } 548 }
548 549
549out: 550out:
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c8ea050c8fa4..6a219bcee66d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1680 radeon_save_bios_scratch_regs(rdev); 1680 radeon_save_bios_scratch_regs(rdev);
1681 /* block TTM */ 1681 /* block TTM */
1682 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 1682 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1683 radeon_pm_suspend(rdev);
1684 radeon_suspend(rdev); 1683 radeon_suspend(rdev);
1684 radeon_hpd_fini(rdev);
1685 1685
1686 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1686 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1687 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], 1687 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1726,9 +1726,39 @@ retry:
1726 } 1726 }
1727 } 1727 }
1728 1728
1729 radeon_pm_resume(rdev); 1729 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1730 /* do dpm late init */
1731 r = radeon_pm_late_init(rdev);
1732 if (r) {
1733 rdev->pm.dpm_enabled = false;
1734 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1735 }
1736 } else {
1737 /* resume old pm late */
1738 radeon_pm_resume(rdev);
1739 }
1740
1741 /* init dig PHYs, disp eng pll */
1742 if (rdev->is_atom_bios) {
1743 radeon_atom_encoder_init(rdev);
1744 radeon_atom_disp_eng_pll_init(rdev);
1745 /* turn on the BL */
1746 if (rdev->mode_info.bl_encoder) {
1747 u8 bl_level = radeon_get_backlight_level(rdev,
1748 rdev->mode_info.bl_encoder);
1749 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1750 bl_level);
1751 }
1752 }
1753 /* reset hpd state */
1754 radeon_hpd_init(rdev);
1755
1730 drm_helper_resume_force_mode(rdev->ddev); 1756 drm_helper_resume_force_mode(rdev->ddev);
1731 1757
1758 /* set the power state here in case we are a PX system or headless */
1759 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1760 radeon_pm_compute_clocks(rdev);
1761
1732 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1762 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1733 if (r) { 1763 if (r) {
1734 /* bad news, how to tell it to userspace ? */ 1764 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 092d067f93e1..8df888908833 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -180,6 +180,7 @@ int radeon_vm_size = 8;
180int radeon_vm_block_size = -1; 180int radeon_vm_block_size = -1;
181int radeon_deep_color = 0; 181int radeon_deep_color = 0;
182int radeon_use_pflipirq = 2; 182int radeon_use_pflipirq = 2;
183int radeon_bapm = -1;
183 184
184MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 185MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
185module_param_named(no_wb, radeon_no_wb, int, 0444); 186module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
259MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))"); 260MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
260module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); 261module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
261 262
263MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
264module_param_named(bapm, radeon_bapm, int, 0444);
265
262static struct pci_device_id pciidlist[] = { 266static struct pci_device_id pciidlist[] = {
263 radeon_PCI_IDS 267 radeon_PCI_IDS
264}; 268};
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 65b0c213488d..5bf2c0a05827 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
107 * @rdev: radeon_device pointer 107 * @rdev: radeon_device pointer
108 * @ib: IB object to schedule 108 * @ib: IB object to schedule
109 * @const_ib: Const IB to schedule (SI only) 109 * @const_ib: Const IB to schedule (SI only)
110 * @hdp_flush: Whether or not to perform an HDP cache flush
110 * 111 *
111 * Schedule an IB on the associated ring (all asics). 112 * Schedule an IB on the associated ring (all asics).
112 * Returns 0 on success, error on failure. 113 * Returns 0 on success, error on failure.
@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
122 * to SI there was just a DE IB. 123 * to SI there was just a DE IB.
123 */ 124 */
124int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 125int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
125 struct radeon_ib *const_ib) 126 struct radeon_ib *const_ib, bool hdp_flush)
126{ 127{
127 struct radeon_ring *ring = &rdev->ring[ib->ring]; 128 struct radeon_ring *ring = &rdev->ring[ib->ring];
128 int r = 0; 129 int r = 0;
@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
176 if (ib->vm) 177 if (ib->vm)
177 radeon_vm_fence(rdev, ib->vm, ib->fence); 178 radeon_vm_fence(rdev, ib->vm, ib->fence);
178 179
179 radeon_ring_unlock_commit(rdev, ring); 180 radeon_ring_unlock_commit(rdev, ring, hdp_flush);
180 return 0; 181 return 0;
181} 182}
182 183
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 23314be49480..164898b0010c 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
460 struct radeon_device *rdev = ddev->dev_private; 460 struct radeon_device *rdev = ddev->dev_private;
461 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 461 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
462 462
463 if ((rdev->flags & RADEON_IS_PX) &&
464 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
465 return snprintf(buf, PAGE_SIZE, "off\n");
466
467 return snprintf(buf, PAGE_SIZE, "%s\n", 463 return snprintf(buf, PAGE_SIZE, "%s\n",
468 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 464 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
469 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 465 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
477 struct drm_device *ddev = dev_get_drvdata(dev); 473 struct drm_device *ddev = dev_get_drvdata(dev);
478 struct radeon_device *rdev = ddev->dev_private; 474 struct radeon_device *rdev = ddev->dev_private;
479 475
480 /* Can't set dpm state when the card is off */
481 if ((rdev->flags & RADEON_IS_PX) &&
482 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
483 return -EINVAL;
484
485 mutex_lock(&rdev->pm.mutex); 476 mutex_lock(&rdev->pm.mutex);
486 if (strncmp("battery", buf, strlen("battery")) == 0) 477 if (strncmp("battery", buf, strlen("battery")) == 0)
487 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 478 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
495 goto fail; 486 goto fail;
496 } 487 }
497 mutex_unlock(&rdev->pm.mutex); 488 mutex_unlock(&rdev->pm.mutex);
498 radeon_pm_compute_clocks(rdev); 489
490 /* Can't set dpm state when the card is off */
491 if (!(rdev->flags & RADEON_IS_PX) ||
492 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
493 radeon_pm_compute_clocks(rdev);
494
499fail: 495fail:
500 return count; 496 return count;
501} 497}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 5b4e0cf231a0..d65607902537 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
177 * 177 *
178 * @rdev: radeon_device pointer 178 * @rdev: radeon_device pointer
179 * @ring: radeon_ring structure holding ring information 179 * @ring: radeon_ring structure holding ring information
180 * @hdp_flush: Whether or not to perform an HDP cache flush
180 * 181 *
181 * Update the wptr (write pointer) to tell the GPU to 182 * Update the wptr (write pointer) to tell the GPU to
182 * execute new commands on the ring buffer (all asics). 183 * execute new commands on the ring buffer (all asics).
183 */ 184 */
184void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) 185void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
186 bool hdp_flush)
185{ 187{
186 /* If we are emitting the HDP flush via the ring buffer, we need to 188 /* If we are emitting the HDP flush via the ring buffer, we need to
187 * do it before padding. 189 * do it before padding.
188 */ 190 */
189 if (rdev->asic->ring[ring->idx]->hdp_flush) 191 if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
190 rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); 192 rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
191 /* We pad to match fetch size */ 193 /* We pad to match fetch size */
192 while (ring->wptr & ring->align_mask) { 194 while (ring->wptr & ring->align_mask) {
@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
196 /* If we are emitting the HDP flush via MMIO, we need to do it after 198 /* If we are emitting the HDP flush via MMIO, we need to do it after
197 * all CPU writes to VRAM finished. 199 * all CPU writes to VRAM finished.
198 */ 200 */
199 if (rdev->asic->mmio_hdp_flush) 201 if (hdp_flush && rdev->asic->mmio_hdp_flush)
200 rdev->asic->mmio_hdp_flush(rdev); 202 rdev->asic->mmio_hdp_flush(rdev);
201 radeon_ring_set_wptr(rdev, ring); 203 radeon_ring_set_wptr(rdev, ring);
202} 204}
@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
207 * 209 *
208 * @rdev: radeon_device pointer 210 * @rdev: radeon_device pointer
209 * @ring: radeon_ring structure holding ring information 211 * @ring: radeon_ring structure holding ring information
212 * @hdp_flush: Whether or not to perform an HDP cache flush
210 * 213 *
211 * Call radeon_ring_commit() then unlock the ring (all asics). 214 * Call radeon_ring_commit() then unlock the ring (all asics).
212 */ 215 */
213void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) 216void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
217 bool hdp_flush)
214{ 218{
215 radeon_ring_commit(rdev, ring); 219 radeon_ring_commit(rdev, ring, hdp_flush);
216 mutex_unlock(&rdev->ring_lock); 220 mutex_unlock(&rdev->ring_lock);
217} 221}
218 222
@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
372 radeon_ring_write(ring, data[i]); 376 radeon_ring_write(ring, data[i]);
373 } 377 }
374 378
375 radeon_ring_unlock_commit(rdev, ring); 379 radeon_ring_unlock_commit(rdev, ring, false);
376 kfree(data); 380 kfree(data);
377 return 0; 381 return 0;
378} 382}
@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
400 /* Allocate ring buffer */ 404 /* Allocate ring buffer */
401 if (ring->ring_obj == NULL) { 405 if (ring->ring_obj == NULL) {
402 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, 406 r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
403 RADEON_GEM_DOMAIN_GTT, 407 RADEON_GEM_DOMAIN_GTT, 0,
404 (rdev->flags & RADEON_IS_PCIE) ?
405 RADEON_GEM_GTT_WC : 0,
406 NULL, &ring->ring_obj); 408 NULL, &ring->ring_obj);
407 if (r) { 409 if (r) {
408 dev_err(rdev->dev, "(%d) ring create failed\n", r); 410 dev_err(rdev->dev, "(%d) ring create failed\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index dbd6bcde92de..56d9fd66d8ae 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
179 continue; 179 continue;
180 } 180 }
181 181
182 radeon_ring_commit(rdev, &rdev->ring[i]); 182 radeon_ring_commit(rdev, &rdev->ring[i], false);
183 radeon_fence_note_sync(fence, ring); 183 radeon_fence_note_sync(fence, ring);
184 184
185 semaphore->gpu_addr += 8; 185 semaphore->gpu_addr += 8;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5adf4207453d..17bc3dced9f1 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
288 return r; 288 return r;
289 } 289 }
290 radeon_fence_emit(rdev, fence, ring->idx); 290 radeon_fence_emit(rdev, fence, ring->idx);
291 radeon_ring_unlock_commit(rdev, ring); 291 radeon_ring_unlock_commit(rdev, ring, false);
292 } 292 }
293 return 0; 293 return 0;
294} 294}
@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
313 goto out_cleanup; 313 goto out_cleanup;
314 } 314 }
315 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 315 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
316 radeon_ring_unlock_commit(rdev, ringA); 316 radeon_ring_unlock_commit(rdev, ringA, false);
317 317
318 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 318 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
319 if (r) 319 if (r)
@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
325 goto out_cleanup; 325 goto out_cleanup;
326 } 326 }
327 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 327 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
328 radeon_ring_unlock_commit(rdev, ringA); 328 radeon_ring_unlock_commit(rdev, ringA, false);
329 329
330 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 330 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
331 if (r) 331 if (r)
@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
344 goto out_cleanup; 344 goto out_cleanup;
345 } 345 }
346 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 346 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
347 radeon_ring_unlock_commit(rdev, ringB); 347 radeon_ring_unlock_commit(rdev, ringB, false);
348 348
349 r = radeon_fence_wait(fence1, false); 349 r = radeon_fence_wait(fence1, false);
350 if (r) { 350 if (r) {
@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
365 goto out_cleanup; 365 goto out_cleanup;
366 } 366 }
367 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 367 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
368 radeon_ring_unlock_commit(rdev, ringB); 368 radeon_ring_unlock_commit(rdev, ringB, false);
369 369
370 r = radeon_fence_wait(fence2, false); 370 r = radeon_fence_wait(fence2, false);
371 if (r) { 371 if (r) {
@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
408 goto out_cleanup; 408 goto out_cleanup;
409 } 409 }
410 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 410 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
411 radeon_ring_unlock_commit(rdev, ringA); 411 radeon_ring_unlock_commit(rdev, ringA, false);
412 412
413 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 413 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
414 if (r) 414 if (r)
@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
420 goto out_cleanup; 420 goto out_cleanup;
421 } 421 }
422 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 422 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
423 radeon_ring_unlock_commit(rdev, ringB); 423 radeon_ring_unlock_commit(rdev, ringB, false);
424 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 424 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
425 if (r) 425 if (r)
426 goto out_cleanup; 426 goto out_cleanup;
@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
442 goto out_cleanup; 442 goto out_cleanup;
443 } 443 }
444 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 444 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
445 radeon_ring_unlock_commit(rdev, ringC); 445 radeon_ring_unlock_commit(rdev, ringC, false);
446 446
447 for (i = 0; i < 30; ++i) { 447 for (i = 0; i < 30; ++i) {
448 mdelay(100); 448 mdelay(100);
@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
468 goto out_cleanup; 468 goto out_cleanup;
469 } 469 }
470 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 470 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
471 radeon_ring_unlock_commit(rdev, ringC); 471 radeon_ring_unlock_commit(rdev, ringC, false);
472 472
473 mdelay(1000); 473 mdelay(1000);
474 474
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6bf55ec85b62..341848a14376 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
646 ib.ptr[i] = PACKET2(0); 646 ib.ptr[i] = PACKET2(0);
647 ib.length_dw = 16; 647 ib.length_dw = 16;
648 648
649 r = radeon_ib_schedule(rdev, &ib, NULL); 649 r = radeon_ib_schedule(rdev, &ib, NULL, false);
650 if (r) 650 if (r)
651 goto err; 651 goto err;
652 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); 652 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index f9b70a43aa52..c7190aadbd89 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
368 for (i = ib.length_dw; i < ib_size_dw; ++i) 368 for (i = ib.length_dw; i < ib_size_dw; ++i)
369 ib.ptr[i] = 0x0; 369 ib.ptr[i] = 0x0;
370 370
371 r = radeon_ib_schedule(rdev, &ib, NULL); 371 r = radeon_ib_schedule(rdev, &ib, NULL, false);
372 if (r) { 372 if (r) {
373 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 373 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
374 } 374 }
@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
425 for (i = ib.length_dw; i < ib_size_dw; ++i) 425 for (i = ib.length_dw; i < ib_size_dw; ++i)
426 ib.ptr[i] = 0x0; 426 ib.ptr[i] = 0x0;
427 427
428 r = radeon_ib_schedule(rdev, &ib, NULL); 428 r = radeon_ib_schedule(rdev, &ib, NULL, false);
429 if (r) { 429 if (r) {
430 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 430 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
431 } 431 }
@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
715 return r; 715 return r;
716 } 716 }
717 radeon_ring_write(ring, VCE_CMD_END); 717 radeon_ring_write(ring, VCE_CMD_END);
718 radeon_ring_unlock_commit(rdev, ring); 718 radeon_ring_unlock_commit(rdev, ring, false);
719 719
720 for (i = 0; i < rdev->usec_timeout; i++) { 720 for (i = 0; i < rdev->usec_timeout; i++) {
721 if (vce_v1_0_get_rptr(rdev, ring) != rptr) 721 if (vce_v1_0_get_rptr(rdev, ring) != rptr)
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ccae4d9dc3de..088ffdc2f577 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
420 radeon_asic_vm_pad_ib(rdev, &ib); 420 radeon_asic_vm_pad_ib(rdev, &ib);
421 WARN_ON(ib.length_dw > 64); 421 WARN_ON(ib.length_dw > 64);
422 422
423 r = radeon_ib_schedule(rdev, &ib, NULL); 423 r = radeon_ib_schedule(rdev, &ib, NULL, false);
424 if (r) 424 if (r)
425 goto error; 425 goto error;
426 426
@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
483 /* add a clone of the bo_va to clear the old address */ 483 /* add a clone of the bo_va to clear the old address */
484 struct radeon_bo_va *tmp; 484 struct radeon_bo_va *tmp;
485 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 485 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
486 if (!tmp) {
487 mutex_unlock(&vm->mutex);
488 return -ENOMEM;
489 }
486 tmp->it.start = bo_va->it.start; 490 tmp->it.start = bo_va->it.start;
487 tmp->it.last = bo_va->it.last; 491 tmp->it.last = bo_va->it.last;
488 tmp->vm = vm; 492 tmp->vm = vm;
@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
693 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); 697 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
694 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 698 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
695 WARN_ON(ib.length_dw > ndw); 699 WARN_ON(ib.length_dw > ndw);
696 r = radeon_ib_schedule(rdev, &ib, NULL); 700 r = radeon_ib_schedule(rdev, &ib, NULL, false);
697 if (r) { 701 if (r) {
698 radeon_ib_free(rdev, &ib); 702 radeon_ib_free(rdev, &ib);
699 return r; 703 return r;
@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
957 WARN_ON(ib.length_dw > ndw); 961 WARN_ON(ib.length_dw > ndw);
958 962
959 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 963 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
960 r = radeon_ib_schedule(rdev, &ib, NULL); 964 r = radeon_ib_schedule(rdev, &ib, NULL, false);
961 if (r) { 965 if (r) {
962 radeon_ib_free(rdev, &ib); 966 radeon_ib_free(rdev, &ib);
963 return r; 967 return r;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 3e21e869015f..8a477bf1fdb3 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
124 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); 124 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
125 radeon_ring_write(ring, PACKET0(0x20C8, 0)); 125 radeon_ring_write(ring, PACKET0(0x20C8, 0));
126 radeon_ring_write(ring, 0); 126 radeon_ring_write(ring, 0);
127 radeon_ring_unlock_commit(rdev, ring); 127 radeon_ring_unlock_commit(rdev, ring, false);
128} 128}
129 129
130int rv515_mc_wait_for_idle(struct radeon_device *rdev) 130int rv515_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index bbf2e076ee45..74426ac2bb5c 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
90 return r; 90 return r;
91 } 91 }
92 92
93 radeon_ring_unlock_commit(rdev, ring); 93 radeon_ring_unlock_commit(rdev, ring, false);
94 radeon_semaphore_free(rdev, &sem, *fence); 94 radeon_semaphore_free(rdev, &sem, *fence);
95 95
96 return r; 96 return r;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 011779bd2b3d..a1274a31405c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
3057 u32 sx_debug_1; 3057 u32 sx_debug_1;
3058 u32 hdp_host_path_cntl; 3058 u32 hdp_host_path_cntl;
3059 u32 tmp; 3059 u32 tmp;
3060 int i, j, k; 3060 int i, j;
3061 3061
3062 switch (rdev->family) { 3062 switch (rdev->family) {
3063 case CHIP_TAHITI: 3063 case CHIP_TAHITI:
@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
3255 rdev->config.si.max_sh_per_se, 3255 rdev->config.si.max_sh_per_se,
3256 rdev->config.si.max_cu_per_sh); 3256 rdev->config.si.max_cu_per_sh);
3257 3257
3258 rdev->config.si.active_cus = 0;
3258 for (i = 0; i < rdev->config.si.max_shader_engines; i++) { 3259 for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3259 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { 3260 for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3260 for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { 3261 rdev->config.si.active_cus +=
3261 rdev->config.si.active_cus += 3262 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3262 hweight32(si_get_cu_active_bitmap(rdev, i, j));
3263 }
3264 } 3263 }
3265 } 3264 }
3266 3265
@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
3541 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 3540 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3542 radeon_ring_write(ring, 0xc000); 3541 radeon_ring_write(ring, 0xc000);
3543 radeon_ring_write(ring, 0xe000); 3542 radeon_ring_write(ring, 0xe000);
3544 radeon_ring_unlock_commit(rdev, ring); 3543 radeon_ring_unlock_commit(rdev, ring, false);
3545 3544
3546 si_cp_enable(rdev, true); 3545 si_cp_enable(rdev, true);
3547 3546
@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
3570 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 3569 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3571 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 3570 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3572 3571
3573 radeon_ring_unlock_commit(rdev, ring); 3572 radeon_ring_unlock_commit(rdev, ring, false);
3574 3573
3575 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { 3574 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3576 ring = &rdev->ring[i]; 3575 ring = &rdev->ring[i];
@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
3580 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); 3579 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3581 radeon_ring_write(ring, 0); 3580 radeon_ring_write(ring, 0);
3582 3581
3583 radeon_ring_unlock_commit(rdev, ring); 3582 radeon_ring_unlock_commit(rdev, ring, false);
3584 } 3583 }
3585 3584
3586 return 0; 3585 return 0;
@@ -5028,7 +5027,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5028 5027
5029 /* flush hdp cache */ 5028 /* flush hdp cache */
5030 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5029 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5031 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5030 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5032 WRITE_DATA_DST_SEL(0))); 5031 WRITE_DATA_DST_SEL(0)));
5033 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); 5032 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5034 radeon_ring_write(ring, 0); 5033 radeon_ring_write(ring, 0);
@@ -5036,7 +5035,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5036 5035
5037 /* bits 0-15 are the VM contexts0-15 */ 5036 /* bits 0-15 are the VM contexts0-15 */
5038 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5037 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5039 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 5038 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5040 WRITE_DATA_DST_SEL(0))); 5039 WRITE_DATA_DST_SEL(0)));
5041 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 5040 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5042 radeon_ring_write(ring, 0); 5041 radeon_ring_write(ring, 0);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index 716505129450..7c22baaf94db 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
275 return r; 275 return r;
276 } 276 }
277 277
278 radeon_ring_unlock_commit(rdev, ring); 278 radeon_ring_unlock_commit(rdev, ring, false);
279 radeon_semaphore_free(rdev, &sem, *fence); 279 radeon_semaphore_free(rdev, &sem, *fence);
280 280
281 return r; 281 return r;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 32e50be9c4ac..57f780053b3e 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 /* There are stability issues reported on with 1877 if (radeon_bapm == -1) {
1878 * bapm enabled when switching between AC and battery 1878 /* There are stability issues reported on with
1879 * power. At the same time, some MSI boards hang 1879 * bapm enabled when switching between AC and battery
1880 * if it's not enabled and dpm is enabled. Just enable 1880 * power. At the same time, some MSI boards hang
1881 * it for MSI boards right now. 1881 * if it's not enabled and dpm is enabled. Just enable
1882 */ 1882 * it for MSI boards right now.
1883 if (rdev->pdev->subsystem_vendor == 0x1462) 1883 */
1884 pi->enable_bapm = true; 1884 if (rdev->pdev->subsystem_vendor == 0x1462)
1885 else 1885 pi->enable_bapm = true;
1886 else
1887 pi->enable_bapm = false;
1888 } else if (radeon_bapm == 0) {
1886 pi->enable_bapm = false; 1889 pi->enable_bapm = false;
1890 } else {
1891 pi->enable_bapm = true;
1892 }
1887 pi->enable_nbps_policy = true; 1893 pi->enable_nbps_policy = true;
1888 pi->enable_sclk_ds = true; 1894 pi->enable_sclk_ds = true;
1889 pi->enable_gfx_power_gating = true; 1895 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index be42c8125203..cda391347286 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
124 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); 124 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
125 radeon_ring_write(ring, 3); 125 radeon_ring_write(ring, 3);
126 126
127 radeon_ring_unlock_commit(rdev, ring); 127 radeon_ring_unlock_commit(rdev, ring, false);
128 128
129done: 129done:
130 /* lower clocks again */ 130 /* lower clocks again */
@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
331 } 331 }
332 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 332 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
333 radeon_ring_write(ring, 0xDEADBEEF); 333 radeon_ring_write(ring, 0xDEADBEEF);
334 radeon_ring_unlock_commit(rdev, ring); 334 radeon_ring_unlock_commit(rdev, ring, false);
335 for (i = 0; i < rdev->usec_timeout; i++) { 335 for (i = 0; i < rdev->usec_timeout; i++) {
336 tmp = RREG32(UVD_CONTEXT_ID); 336 tmp = RREG32(UVD_CONTEXT_ID);
337 if (tmp == 0xDEADBEEF) 337 if (tmp == 0xDEADBEEF)
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 788ed9b59b4e..114203f32843 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,8 +1,7 @@
1# 1#
2# Makefile for the SuperH specific drivers. 2# Makefile for the SuperH specific drivers.
3# 3#
4obj-$(CONFIG_SUPERH) += intc/ 4obj-$(CONFIG_SH_INTC) += intc/
5obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/
6ifneq ($(CONFIG_COMMON_CLK),y) 5ifneq ($(CONFIG_COMMON_CLK),y)
7obj-$(CONFIG_HAVE_CLK) += clk/ 6obj-$(CONFIG_HAVE_CLK) += clk/
8endif 7endif
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig
index 60228fae943f..6a1b05ddc8c9 100644
--- a/drivers/sh/intc/Kconfig
+++ b/drivers/sh/intc/Kconfig
@@ -1,7 +1,9 @@
1config SH_INTC 1config SH_INTC
2 def_bool y 2 bool
3 select IRQ_DOMAIN 3 select IRQ_DOMAIN
4 4
5if SH_INTC
6
5comment "Interrupt controller options" 7comment "Interrupt controller options"
6 8
7config INTC_USERIMASK 9config INTC_USERIMASK
@@ -37,3 +39,5 @@ config INTC_MAPPING_DEBUG
37 between system IRQs and the per-controller id tables. 39 between system IRQs and the per-controller id tables.
38 40
39 If in doubt, say N. 41 If in doubt, say N.
42
43endif
diff --git a/fs/aio.c b/fs/aio.c
index ae635872affb..97bc62cbe2da 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -141,6 +141,7 @@ struct kioctx {
141 141
142 struct { 142 struct {
143 unsigned tail; 143 unsigned tail;
144 unsigned completed_events;
144 spinlock_t completion_lock; 145 spinlock_t completion_lock;
145 } ____cacheline_aligned_in_smp; 146 } ____cacheline_aligned_in_smp;
146 147
@@ -857,6 +858,68 @@ out:
857 return ret; 858 return ret;
858} 859}
859 860
861/* refill_reqs_available
862 * Updates the reqs_available reference counts used for tracking the
863 * number of free slots in the completion ring. This can be called
864 * from aio_complete() (to optimistically update reqs_available) or
865 * from aio_get_req() (the we're out of events case). It must be
866 * called holding ctx->completion_lock.
867 */
868static void refill_reqs_available(struct kioctx *ctx, unsigned head,
869 unsigned tail)
870{
871 unsigned events_in_ring, completed;
872
873 /* Clamp head since userland can write to it. */
874 head %= ctx->nr_events;
875 if (head <= tail)
876 events_in_ring = tail - head;
877 else
878 events_in_ring = ctx->nr_events - (head - tail);
879
880 completed = ctx->completed_events;
881 if (events_in_ring < completed)
882 completed -= events_in_ring;
883 else
884 completed = 0;
885
886 if (!completed)
887 return;
888
889 ctx->completed_events -= completed;
890 put_reqs_available(ctx, completed);
891}
892
893/* user_refill_reqs_available
894 * Called to refill reqs_available when aio_get_req() encounters an
895 * out of space in the completion ring.
896 */
897static void user_refill_reqs_available(struct kioctx *ctx)
898{
899 spin_lock_irq(&ctx->completion_lock);
900 if (ctx->completed_events) {
901 struct aio_ring *ring;
902 unsigned head;
903
904 /* Access of ring->head may race with aio_read_events_ring()
905 * here, but that's okay since whether we read the old version
906 * or the new version, and either will be valid. The important
907 * part is that head cannot pass tail since we prevent
908 * aio_complete() from updating tail by holding
909 * ctx->completion_lock. Even if head is invalid, the check
910 * against ctx->completed_events below will make sure we do the
911 * safe/right thing.
912 */
913 ring = kmap_atomic(ctx->ring_pages[0]);
914 head = ring->head;
915 kunmap_atomic(ring);
916
917 refill_reqs_available(ctx, head, ctx->tail);
918 }
919
920 spin_unlock_irq(&ctx->completion_lock);
921}
922
860/* aio_get_req 923/* aio_get_req
861 * Allocate a slot for an aio request. 924 * Allocate a slot for an aio request.
862 * Returns NULL if no requests are free. 925 * Returns NULL if no requests are free.
@@ -865,8 +928,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
865{ 928{
866 struct kiocb *req; 929 struct kiocb *req;
867 930
868 if (!get_reqs_available(ctx)) 931 if (!get_reqs_available(ctx)) {
869 return NULL; 932 user_refill_reqs_available(ctx);
933 if (!get_reqs_available(ctx))
934 return NULL;
935 }
870 936
871 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 937 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
872 if (unlikely(!req)) 938 if (unlikely(!req))
@@ -925,8 +991,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
925 struct kioctx *ctx = iocb->ki_ctx; 991 struct kioctx *ctx = iocb->ki_ctx;
926 struct aio_ring *ring; 992 struct aio_ring *ring;
927 struct io_event *ev_page, *event; 993 struct io_event *ev_page, *event;
994 unsigned tail, pos, head;
928 unsigned long flags; 995 unsigned long flags;
929 unsigned tail, pos;
930 996
931 /* 997 /*
932 * Special case handling for sync iocbs: 998 * Special case handling for sync iocbs:
@@ -987,10 +1053,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
987 ctx->tail = tail; 1053 ctx->tail = tail;
988 1054
989 ring = kmap_atomic(ctx->ring_pages[0]); 1055 ring = kmap_atomic(ctx->ring_pages[0]);
1056 head = ring->head;
990 ring->tail = tail; 1057 ring->tail = tail;
991 kunmap_atomic(ring); 1058 kunmap_atomic(ring);
992 flush_dcache_page(ctx->ring_pages[0]); 1059 flush_dcache_page(ctx->ring_pages[0]);
993 1060
1061 ctx->completed_events++;
1062 if (ctx->completed_events > 1)
1063 refill_reqs_available(ctx, head, tail);
994 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1064 spin_unlock_irqrestore(&ctx->completion_lock, flags);
995 1065
996 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1066 pr_debug("added to ring %p at [%u]\n", iocb, tail);
@@ -1005,7 +1075,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
1005 1075
1006 /* everything turned out well, dispose of the aiocb. */ 1076 /* everything turned out well, dispose of the aiocb. */
1007 kiocb_free(iocb); 1077 kiocb_free(iocb);
1008 put_reqs_available(ctx, 1);
1009 1078
1010 /* 1079 /*
1011 * We have to order our ring_info tail store above and test 1080 * We have to order our ring_info tail store above and test
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 6dfd64b3a604..e973540cd15b 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -17,6 +17,7 @@
17 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 17 {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
18 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 18 {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
19 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 19 {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
20 {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
20 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 21 {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
21 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 22 {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
22 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 23 {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -164,8 +165,11 @@
164 {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 165 {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
165 {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 166 {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
166 {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 167 {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
167 {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 170 {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 171 {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
172 {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 173 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
170 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 174 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
171 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 175 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
@@ -175,6 +179,8 @@
175 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 179 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 180 {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 181 {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
182 {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
183 {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
178 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ 184 {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
179 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ 185 {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
180 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ 186 {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
@@ -297,6 +303,7 @@
297 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 303 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
298 {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 304 {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
299 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 305 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
306 {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
300 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 307 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
301 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 308 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
302 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 309 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6bb5e3f2a3b4..f0b0edbf55a9 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -102,6 +102,15 @@ enum {
102 FTRACE_OPS_FL_DELETED = 1 << 8, 102 FTRACE_OPS_FL_DELETED = 1 << 8,
103}; 103};
104 104
105#ifdef CONFIG_DYNAMIC_FTRACE
106/* The hash used to know what functions callbacks trace */
107struct ftrace_ops_hash {
108 struct ftrace_hash *notrace_hash;
109 struct ftrace_hash *filter_hash;
110 struct mutex regex_lock;
111};
112#endif
113
105/* 114/*
106 * Note, ftrace_ops can be referenced outside of RCU protection. 115 * Note, ftrace_ops can be referenced outside of RCU protection.
107 * (Although, for perf, the control ops prevent that). If ftrace_ops is 116 * (Although, for perf, the control ops prevent that). If ftrace_ops is
@@ -121,10 +130,9 @@ struct ftrace_ops {
121 int __percpu *disabled; 130 int __percpu *disabled;
122#ifdef CONFIG_DYNAMIC_FTRACE 131#ifdef CONFIG_DYNAMIC_FTRACE
123 int nr_trampolines; 132 int nr_trampolines;
124 struct ftrace_hash *notrace_hash; 133 struct ftrace_ops_hash local_hash;
125 struct ftrace_hash *filter_hash; 134 struct ftrace_ops_hash *func_hash;
126 struct ftrace_hash *tramp_hash; 135 struct ftrace_hash *tramp_hash;
127 struct mutex regex_lock;
128 unsigned long trampoline; 136 unsigned long trampoline;
129#endif 137#endif
130}; 138};
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index b7ce0c64c6f3..c7e17de732f3 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -16,8 +16,6 @@ struct device;
16 */ 16 */
17struct gpio_desc; 17struct gpio_desc;
18 18
19#ifdef CONFIG_GPIOLIB
20
21#define GPIOD_FLAGS_BIT_DIR_SET BIT(0) 19#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
22#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) 20#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
23#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) 21#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
@@ -34,6 +32,8 @@ enum gpiod_flags {
34 GPIOD_FLAGS_BIT_DIR_VAL, 32 GPIOD_FLAGS_BIT_DIR_VAL,
35}; 33};
36 34
35#ifdef CONFIG_GPIOLIB
36
37/* Acquire and dispose GPIOs */ 37/* Acquire and dispose GPIOs */
38struct gpio_desc *__must_check __gpiod_get(struct device *dev, 38struct gpio_desc *__must_check __gpiod_get(struct device *dev,
39 const char *con_id, 39 const char *con_id,
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 509b2d7a41b7..fea6099608ef 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk {
944}; 944};
945 945
946/* drm_radeon_cs_reloc.flags */ 946/* drm_radeon_cs_reloc.flags */
947#define RADEON_RELOC_PRIO_MASK (0xf << 0)
947 948
948struct drm_radeon_cs_reloc { 949struct drm_radeon_cs_reloc {
949 uint32_t handle; 950 uint32_t handle;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1cf24b3e42ec..f9c1ed002dbc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -41,6 +41,7 @@
41#include <linux/cgroup.h> 41#include <linux/cgroup.h>
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/mman.h> 43#include <linux/mman.h>
44#include <linux/compat.h>
44 45
45#include "internal.h" 46#include "internal.h"
46 47
@@ -3717,6 +3718,26 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3717 return 0; 3718 return 0;
3718} 3719}
3719 3720
3721#ifdef CONFIG_COMPAT
3722static long perf_compat_ioctl(struct file *file, unsigned int cmd,
3723 unsigned long arg)
3724{
3725 switch (_IOC_NR(cmd)) {
3726 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
3727 case _IOC_NR(PERF_EVENT_IOC_ID):
3728 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
3729 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
3730 cmd &= ~IOCSIZE_MASK;
3731 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
3732 }
3733 break;
3734 }
3735 return perf_ioctl(file, cmd, arg);
3736}
3737#else
3738# define perf_compat_ioctl NULL
3739#endif
3740
3720int perf_event_task_enable(void) 3741int perf_event_task_enable(void)
3721{ 3742{
3722 struct perf_event *event; 3743 struct perf_event *event;
@@ -4222,7 +4243,7 @@ static const struct file_operations perf_fops = {
4222 .read = perf_read, 4243 .read = perf_read,
4223 .poll = perf_poll, 4244 .poll = perf_poll,
4224 .unlocked_ioctl = perf_ioctl, 4245 .unlocked_ioctl = perf_ioctl,
4225 .compat_ioctl = perf_ioctl, 4246 .compat_ioctl = perf_compat_ioctl,
4226 .mmap = perf_mmap, 4247 .mmap = perf_mmap,
4227 .fasync = perf_fasync, 4248 .fasync = perf_fasync,
4228}; 4249};
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 734e9a7d280b..3995f546d0f3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1778,7 +1778,18 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1778 unsigned long hash, flags = 0; 1778 unsigned long hash, flags = 0;
1779 struct kretprobe_instance *ri; 1779 struct kretprobe_instance *ri;
1780 1780
1781 /*TODO: consider to only swap the RA after the last pre_handler fired */ 1781 /*
1782 * To avoid deadlocks, prohibit return probing in NMI contexts,
1783 * just skip the probe and increase the (inexact) 'nmissed'
1784 * statistical counter, so that the user is informed that
1785 * something happened:
1786 */
1787 if (unlikely(in_nmi())) {
1788 rp->nmissed++;
1789 return 0;
1790 }
1791
1792 /* TODO: consider to only swap the RA after the last pre_handler fired */
1782 hash = hash_ptr(current, KPROBE_HASH_BITS); 1793 hash = hash_ptr(current, KPROBE_HASH_BITS);
1783 raw_spin_lock_irqsave(&rp->lock, flags); 1794 raw_spin_lock_irqsave(&rp->lock, flags);
1784 if (!hlist_empty(&rp->free_instances)) { 1795 if (!hlist_empty(&rp->free_instances)) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1654b12c891a..5916a8e59e87 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -65,15 +65,21 @@
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 66
67#ifdef CONFIG_DYNAMIC_FTRACE 67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_REGEX_LOCK(opsname) \ 68#define INIT_OPS_HASH(opsname) \
69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), 69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71#define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
70#else 74#else
71#define INIT_REGEX_LOCK(opsname) 75#define INIT_OPS_HASH(opsname)
76#define ASSIGN_OPS_HASH(opsname, val)
72#endif 77#endif
73 78
74static struct ftrace_ops ftrace_list_end __read_mostly = { 79static struct ftrace_ops ftrace_list_end __read_mostly = {
75 .func = ftrace_stub, 80 .func = ftrace_stub,
76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 INIT_OPS_HASH(ftrace_list_end)
77}; 83};
78 84
79/* ftrace_enabled is a method to turn ftrace on or off */ 85/* ftrace_enabled is a method to turn ftrace on or off */
@@ -140,7 +146,8 @@ static inline void ftrace_ops_init(struct ftrace_ops *ops)
140{ 146{
141#ifdef CONFIG_DYNAMIC_FTRACE 147#ifdef CONFIG_DYNAMIC_FTRACE
142 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 148 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
143 mutex_init(&ops->regex_lock); 149 mutex_init(&ops->local_hash.regex_lock);
150 ops->func_hash = &ops->local_hash;
144 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 151 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
145 } 152 }
146#endif 153#endif
@@ -899,7 +906,7 @@ static void unregister_ftrace_profiler(void)
899static struct ftrace_ops ftrace_profile_ops __read_mostly = { 906static struct ftrace_ops ftrace_profile_ops __read_mostly = {
900 .func = function_profile_call, 907 .func = function_profile_call,
901 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 908 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
902 INIT_REGEX_LOCK(ftrace_profile_ops) 909 INIT_OPS_HASH(ftrace_profile_ops)
903}; 910};
904 911
905static int register_ftrace_profiler(void) 912static int register_ftrace_profiler(void)
@@ -1081,11 +1088,12 @@ static const struct ftrace_hash empty_hash = {
1081#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1088#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1082 1089
1083static struct ftrace_ops global_ops = { 1090static struct ftrace_ops global_ops = {
1084 .func = ftrace_stub, 1091 .func = ftrace_stub,
1085 .notrace_hash = EMPTY_HASH, 1092 .local_hash.notrace_hash = EMPTY_HASH,
1086 .filter_hash = EMPTY_HASH, 1093 .local_hash.filter_hash = EMPTY_HASH,
1087 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 1094 INIT_OPS_HASH(global_ops)
1088 INIT_REGEX_LOCK(global_ops) 1095 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1096 FTRACE_OPS_FL_INITIALIZED,
1089}; 1097};
1090 1098
1091struct ftrace_page { 1099struct ftrace_page {
@@ -1226,8 +1234,8 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1226void ftrace_free_filter(struct ftrace_ops *ops) 1234void ftrace_free_filter(struct ftrace_ops *ops)
1227{ 1235{
1228 ftrace_ops_init(ops); 1236 ftrace_ops_init(ops);
1229 free_ftrace_hash(ops->filter_hash); 1237 free_ftrace_hash(ops->func_hash->filter_hash);
1230 free_ftrace_hash(ops->notrace_hash); 1238 free_ftrace_hash(ops->func_hash->notrace_hash);
1231} 1239}
1232 1240
1233static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1241static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
@@ -1288,9 +1296,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1288} 1296}
1289 1297
1290static void 1298static void
1291ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); 1299ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1292static void 1300static void
1293ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); 1301ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1294 1302
1295static int 1303static int
1296ftrace_hash_move(struct ftrace_ops *ops, int enable, 1304ftrace_hash_move(struct ftrace_ops *ops, int enable,
@@ -1342,13 +1350,13 @@ update:
1342 * Remove the current set, update the hash and add 1350 * Remove the current set, update the hash and add
1343 * them back. 1351 * them back.
1344 */ 1352 */
1345 ftrace_hash_rec_disable(ops, enable); 1353 ftrace_hash_rec_disable_modify(ops, enable);
1346 1354
1347 old_hash = *dst; 1355 old_hash = *dst;
1348 rcu_assign_pointer(*dst, new_hash); 1356 rcu_assign_pointer(*dst, new_hash);
1349 free_ftrace_hash_rcu(old_hash); 1357 free_ftrace_hash_rcu(old_hash);
1350 1358
1351 ftrace_hash_rec_enable(ops, enable); 1359 ftrace_hash_rec_enable_modify(ops, enable);
1352 1360
1353 return 0; 1361 return 0;
1354} 1362}
@@ -1382,8 +1390,8 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1382 return 0; 1390 return 0;
1383#endif 1391#endif
1384 1392
1385 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1393 filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1386 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1394 notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1387 1395
1388 if ((ftrace_hash_empty(filter_hash) || 1396 if ((ftrace_hash_empty(filter_hash) ||
1389 ftrace_lookup_ip(filter_hash, ip)) && 1397 ftrace_lookup_ip(filter_hash, ip)) &&
@@ -1503,25 +1511,38 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1503static void ftrace_remove_tramp(struct ftrace_ops *ops, 1511static void ftrace_remove_tramp(struct ftrace_ops *ops,
1504 struct dyn_ftrace *rec) 1512 struct dyn_ftrace *rec)
1505{ 1513{
1506 struct ftrace_func_entry *entry; 1514 /* If TRAMP is not set, no ops should have a trampoline for this */
1507 1515 if (!(rec->flags & FTRACE_FL_TRAMP))
1508 entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
1509 if (!entry)
1510 return; 1516 return;
1511 1517
1518 rec->flags &= ~FTRACE_FL_TRAMP;
1519
1520 if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
1521 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
1522 ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
1523 return;
1512 /* 1524 /*
1513 * The tramp_hash entry will be removed at time 1525 * The tramp_hash entry will be removed at time
1514 * of update. 1526 * of update.
1515 */ 1527 */
1516 ops->nr_trampolines--; 1528 ops->nr_trampolines--;
1517 rec->flags &= ~FTRACE_FL_TRAMP;
1518} 1529}
1519 1530
1520static void ftrace_clear_tramps(struct dyn_ftrace *rec) 1531static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
1521{ 1532{
1522 struct ftrace_ops *op; 1533 struct ftrace_ops *op;
1523 1534
1535 /* If TRAMP is not set, no ops should have a trampoline for this */
1536 if (!(rec->flags & FTRACE_FL_TRAMP))
1537 return;
1538
1524 do_for_each_ftrace_op(op, ftrace_ops_list) { 1539 do_for_each_ftrace_op(op, ftrace_ops_list) {
1540 /*
1541 * This function is called to clear other tramps
1542 * not the one that is being updated.
1543 */
1544 if (op == ops)
1545 continue;
1525 if (op->nr_trampolines) 1546 if (op->nr_trampolines)
1526 ftrace_remove_tramp(op, rec); 1547 ftrace_remove_tramp(op, rec);
1527 } while_for_each_ftrace_op(op); 1548 } while_for_each_ftrace_op(op);
@@ -1554,14 +1575,14 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1554 * gets inversed. 1575 * gets inversed.
1555 */ 1576 */
1556 if (filter_hash) { 1577 if (filter_hash) {
1557 hash = ops->filter_hash; 1578 hash = ops->func_hash->filter_hash;
1558 other_hash = ops->notrace_hash; 1579 other_hash = ops->func_hash->notrace_hash;
1559 if (ftrace_hash_empty(hash)) 1580 if (ftrace_hash_empty(hash))
1560 all = 1; 1581 all = 1;
1561 } else { 1582 } else {
1562 inc = !inc; 1583 inc = !inc;
1563 hash = ops->notrace_hash; 1584 hash = ops->func_hash->notrace_hash;
1564 other_hash = ops->filter_hash; 1585 other_hash = ops->func_hash->filter_hash;
1565 /* 1586 /*
1566 * If the notrace hash has no items, 1587 * If the notrace hash has no items,
1567 * then there's nothing to do. 1588 * then there's nothing to do.
@@ -1622,13 +1643,10 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1622 /* 1643 /*
1623 * If we are adding another function callback 1644 * If we are adding another function callback
1624 * to this function, and the previous had a 1645 * to this function, and the previous had a
1625 * trampoline used, then we need to go back to 1646 * custom trampoline in use, then we need to go
1626 * the default trampoline. 1647 * back to the default trampoline.
1627 */ 1648 */
1628 rec->flags &= ~FTRACE_FL_TRAMP; 1649 ftrace_clear_tramps(rec, ops);
1629
1630 /* remove trampolines from any ops for this rec */
1631 ftrace_clear_tramps(rec);
1632 } 1650 }
1633 1651
1634 /* 1652 /*
@@ -1682,6 +1700,41 @@ static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1682 __ftrace_hash_rec_update(ops, filter_hash, 1); 1700 __ftrace_hash_rec_update(ops, filter_hash, 1);
1683} 1701}
1684 1702
1703static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1704 int filter_hash, int inc)
1705{
1706 struct ftrace_ops *op;
1707
1708 __ftrace_hash_rec_update(ops, filter_hash, inc);
1709
1710 if (ops->func_hash != &global_ops.local_hash)
1711 return;
1712
1713 /*
1714 * If the ops shares the global_ops hash, then we need to update
1715 * all ops that are enabled and use this hash.
1716 */
1717 do_for_each_ftrace_op(op, ftrace_ops_list) {
1718 /* Already done */
1719 if (op == ops)
1720 continue;
1721 if (op->func_hash == &global_ops.local_hash)
1722 __ftrace_hash_rec_update(op, filter_hash, inc);
1723 } while_for_each_ftrace_op(op);
1724}
1725
1726static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1727 int filter_hash)
1728{
1729 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1730}
1731
1732static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1733 int filter_hash)
1734{
1735 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1736}
1737
1685static void print_ip_ins(const char *fmt, unsigned char *p) 1738static void print_ip_ins(const char *fmt, unsigned char *p)
1686{ 1739{
1687 int i; 1740 int i;
@@ -1896,8 +1949,8 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
1896 if (rec->flags & FTRACE_FL_TRAMP) { 1949 if (rec->flags & FTRACE_FL_TRAMP) {
1897 ops = ftrace_find_tramp_ops_new(rec); 1950 ops = ftrace_find_tramp_ops_new(rec);
1898 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 1951 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
1899 pr_warning("Bad trampoline accounting at: %p (%pS)\n", 1952 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
1900 (void *)rec->ip, (void *)rec->ip); 1953 (void *)rec->ip, (void *)rec->ip, rec->flags);
1901 /* Ftrace is shutting down, return anything */ 1954 /* Ftrace is shutting down, return anything */
1902 return (unsigned long)FTRACE_ADDR; 1955 return (unsigned long)FTRACE_ADDR;
1903 } 1956 }
@@ -1964,7 +2017,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1964 return ftrace_make_call(rec, ftrace_addr); 2017 return ftrace_make_call(rec, ftrace_addr);
1965 2018
1966 case FTRACE_UPDATE_MAKE_NOP: 2019 case FTRACE_UPDATE_MAKE_NOP:
1967 return ftrace_make_nop(NULL, rec, ftrace_addr); 2020 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
1968 2021
1969 case FTRACE_UPDATE_MODIFY_CALL: 2022 case FTRACE_UPDATE_MODIFY_CALL:
1970 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2023 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
@@ -2227,7 +2280,10 @@ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
2227 } while_for_each_ftrace_rec(); 2280 } while_for_each_ftrace_rec();
2228 2281
2229 /* The number of recs in the hash must match nr_trampolines */ 2282 /* The number of recs in the hash must match nr_trampolines */
2230 FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); 2283 if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
2284 pr_warn("count=%ld trampolines=%d\n",
2285 ops->tramp_hash->count,
2286 ops->nr_trampolines);
2231 2287
2232 return 0; 2288 return 0;
2233} 2289}
@@ -2436,8 +2492,8 @@ static inline int ops_traces_mod(struct ftrace_ops *ops)
2436 * Filter_hash being empty will default to trace module. 2492 * Filter_hash being empty will default to trace module.
2437 * But notrace hash requires a test of individual module functions. 2493 * But notrace hash requires a test of individual module functions.
2438 */ 2494 */
2439 return ftrace_hash_empty(ops->filter_hash) && 2495 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2440 ftrace_hash_empty(ops->notrace_hash); 2496 ftrace_hash_empty(ops->func_hash->notrace_hash);
2441} 2497}
2442 2498
2443/* 2499/*
@@ -2459,12 +2515,12 @@ ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2459 return 0; 2515 return 0;
2460 2516
2461 /* The function must be in the filter */ 2517 /* The function must be in the filter */
2462 if (!ftrace_hash_empty(ops->filter_hash) && 2518 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2463 !ftrace_lookup_ip(ops->filter_hash, rec->ip)) 2519 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2464 return 0; 2520 return 0;
2465 2521
2466 /* If in notrace hash, we ignore it too */ 2522 /* If in notrace hash, we ignore it too */
2467 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) 2523 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2468 return 0; 2524 return 0;
2469 2525
2470 return 1; 2526 return 1;
@@ -2785,10 +2841,10 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
2785 } else { 2841 } else {
2786 rec = &iter->pg->records[iter->idx++]; 2842 rec = &iter->pg->records[iter->idx++];
2787 if (((iter->flags & FTRACE_ITER_FILTER) && 2843 if (((iter->flags & FTRACE_ITER_FILTER) &&
2788 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || 2844 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
2789 2845
2790 ((iter->flags & FTRACE_ITER_NOTRACE) && 2846 ((iter->flags & FTRACE_ITER_NOTRACE) &&
2791 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2847 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
2792 2848
2793 ((iter->flags & FTRACE_ITER_ENABLED) && 2849 ((iter->flags & FTRACE_ITER_ENABLED) &&
2794 !(rec->flags & FTRACE_FL_ENABLED))) { 2850 !(rec->flags & FTRACE_FL_ENABLED))) {
@@ -2837,9 +2893,9 @@ static void *t_start(struct seq_file *m, loff_t *pos)
2837 * functions are enabled. 2893 * functions are enabled.
2838 */ 2894 */
2839 if ((iter->flags & FTRACE_ITER_FILTER && 2895 if ((iter->flags & FTRACE_ITER_FILTER &&
2840 ftrace_hash_empty(ops->filter_hash)) || 2896 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
2841 (iter->flags & FTRACE_ITER_NOTRACE && 2897 (iter->flags & FTRACE_ITER_NOTRACE &&
2842 ftrace_hash_empty(ops->notrace_hash))) { 2898 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
2843 if (*pos > 0) 2899 if (*pos > 0)
2844 return t_hash_start(m, pos); 2900 return t_hash_start(m, pos);
2845 iter->flags |= FTRACE_ITER_PRINTALL; 2901 iter->flags |= FTRACE_ITER_PRINTALL;
@@ -3001,12 +3057,12 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
3001 iter->ops = ops; 3057 iter->ops = ops;
3002 iter->flags = flag; 3058 iter->flags = flag;
3003 3059
3004 mutex_lock(&ops->regex_lock); 3060 mutex_lock(&ops->func_hash->regex_lock);
3005 3061
3006 if (flag & FTRACE_ITER_NOTRACE) 3062 if (flag & FTRACE_ITER_NOTRACE)
3007 hash = ops->notrace_hash; 3063 hash = ops->func_hash->notrace_hash;
3008 else 3064 else
3009 hash = ops->filter_hash; 3065 hash = ops->func_hash->filter_hash;
3010 3066
3011 if (file->f_mode & FMODE_WRITE) { 3067 if (file->f_mode & FMODE_WRITE) {
3012 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3068 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
@@ -3041,7 +3097,7 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
3041 file->private_data = iter; 3097 file->private_data = iter;
3042 3098
3043 out_unlock: 3099 out_unlock:
3044 mutex_unlock(&ops->regex_lock); 3100 mutex_unlock(&ops->func_hash->regex_lock);
3045 3101
3046 return ret; 3102 return ret;
3047} 3103}
@@ -3279,7 +3335,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
3279{ 3335{
3280 .func = function_trace_probe_call, 3336 .func = function_trace_probe_call,
3281 .flags = FTRACE_OPS_FL_INITIALIZED, 3337 .flags = FTRACE_OPS_FL_INITIALIZED,
3282 INIT_REGEX_LOCK(trace_probe_ops) 3338 INIT_OPS_HASH(trace_probe_ops)
3283}; 3339};
3284 3340
3285static int ftrace_probe_registered; 3341static int ftrace_probe_registered;
@@ -3342,7 +3398,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3342 void *data) 3398 void *data)
3343{ 3399{
3344 struct ftrace_func_probe *entry; 3400 struct ftrace_func_probe *entry;
3345 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3401 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3346 struct ftrace_hash *hash; 3402 struct ftrace_hash *hash;
3347 struct ftrace_page *pg; 3403 struct ftrace_page *pg;
3348 struct dyn_ftrace *rec; 3404 struct dyn_ftrace *rec;
@@ -3359,7 +3415,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3359 if (WARN_ON(not)) 3415 if (WARN_ON(not))
3360 return -EINVAL; 3416 return -EINVAL;
3361 3417
3362 mutex_lock(&trace_probe_ops.regex_lock); 3418 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3363 3419
3364 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3420 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3365 if (!hash) { 3421 if (!hash) {
@@ -3428,7 +3484,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3428 out_unlock: 3484 out_unlock:
3429 mutex_unlock(&ftrace_lock); 3485 mutex_unlock(&ftrace_lock);
3430 out: 3486 out:
3431 mutex_unlock(&trace_probe_ops.regex_lock); 3487 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3432 free_ftrace_hash(hash); 3488 free_ftrace_hash(hash);
3433 3489
3434 return count; 3490 return count;
@@ -3446,7 +3502,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3446 struct ftrace_func_entry *rec_entry; 3502 struct ftrace_func_entry *rec_entry;
3447 struct ftrace_func_probe *entry; 3503 struct ftrace_func_probe *entry;
3448 struct ftrace_func_probe *p; 3504 struct ftrace_func_probe *p;
3449 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3505 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3450 struct list_head free_list; 3506 struct list_head free_list;
3451 struct ftrace_hash *hash; 3507 struct ftrace_hash *hash;
3452 struct hlist_node *tmp; 3508 struct hlist_node *tmp;
@@ -3468,7 +3524,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3468 return; 3524 return;
3469 } 3525 }
3470 3526
3471 mutex_lock(&trace_probe_ops.regex_lock); 3527 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3472 3528
3473 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3529 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3474 if (!hash) 3530 if (!hash)
@@ -3521,7 +3577,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3521 mutex_unlock(&ftrace_lock); 3577 mutex_unlock(&ftrace_lock);
3522 3578
3523 out_unlock: 3579 out_unlock:
3524 mutex_unlock(&trace_probe_ops.regex_lock); 3580 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3525 free_ftrace_hash(hash); 3581 free_ftrace_hash(hash);
3526} 3582}
3527 3583
@@ -3717,12 +3773,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3717 if (unlikely(ftrace_disabled)) 3773 if (unlikely(ftrace_disabled))
3718 return -ENODEV; 3774 return -ENODEV;
3719 3775
3720 mutex_lock(&ops->regex_lock); 3776 mutex_lock(&ops->func_hash->regex_lock);
3721 3777
3722 if (enable) 3778 if (enable)
3723 orig_hash = &ops->filter_hash; 3779 orig_hash = &ops->func_hash->filter_hash;
3724 else 3780 else
3725 orig_hash = &ops->notrace_hash; 3781 orig_hash = &ops->func_hash->notrace_hash;
3726 3782
3727 if (reset) 3783 if (reset)
3728 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 3784 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
@@ -3752,7 +3808,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3752 mutex_unlock(&ftrace_lock); 3808 mutex_unlock(&ftrace_lock);
3753 3809
3754 out_regex_unlock: 3810 out_regex_unlock:
3755 mutex_unlock(&ops->regex_lock); 3811 mutex_unlock(&ops->func_hash->regex_lock);
3756 3812
3757 free_ftrace_hash(hash); 3813 free_ftrace_hash(hash);
3758 return ret; 3814 return ret;
@@ -3975,15 +4031,15 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3975 4031
3976 trace_parser_put(parser); 4032 trace_parser_put(parser);
3977 4033
3978 mutex_lock(&iter->ops->regex_lock); 4034 mutex_lock(&iter->ops->func_hash->regex_lock);
3979 4035
3980 if (file->f_mode & FMODE_WRITE) { 4036 if (file->f_mode & FMODE_WRITE) {
3981 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 4037 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3982 4038
3983 if (filter_hash) 4039 if (filter_hash)
3984 orig_hash = &iter->ops->filter_hash; 4040 orig_hash = &iter->ops->func_hash->filter_hash;
3985 else 4041 else
3986 orig_hash = &iter->ops->notrace_hash; 4042 orig_hash = &iter->ops->func_hash->notrace_hash;
3987 4043
3988 mutex_lock(&ftrace_lock); 4044 mutex_lock(&ftrace_lock);
3989 ret = ftrace_hash_move(iter->ops, filter_hash, 4045 ret = ftrace_hash_move(iter->ops, filter_hash,
@@ -3994,7 +4050,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
3994 mutex_unlock(&ftrace_lock); 4050 mutex_unlock(&ftrace_lock);
3995 } 4051 }
3996 4052
3997 mutex_unlock(&iter->ops->regex_lock); 4053 mutex_unlock(&iter->ops->func_hash->regex_lock);
3998 free_ftrace_hash(iter->hash); 4054 free_ftrace_hash(iter->hash);
3999 kfree(iter); 4055 kfree(iter);
4000 4056
@@ -4611,7 +4667,6 @@ void __init ftrace_init(void)
4611static struct ftrace_ops global_ops = { 4667static struct ftrace_ops global_ops = {
4612 .func = ftrace_stub, 4668 .func = ftrace_stub,
4613 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4669 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4614 INIT_REGEX_LOCK(global_ops)
4615}; 4670};
4616 4671
4617static int __init ftrace_nodyn_init(void) 4672static int __init ftrace_nodyn_init(void)
@@ -4713,7 +4768,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4713static struct ftrace_ops control_ops = { 4768static struct ftrace_ops control_ops = {
4714 .func = ftrace_ops_control_func, 4769 .func = ftrace_ops_control_func,
4715 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4770 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4716 INIT_REGEX_LOCK(control_ops) 4771 INIT_OPS_HASH(control_ops)
4717}; 4772};
4718 4773
4719static inline void 4774static inline void
@@ -5145,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
5145 5200
5146#ifdef CONFIG_FUNCTION_GRAPH_TRACER 5201#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5147 5202
5203static struct ftrace_ops graph_ops = {
5204 .func = ftrace_stub,
5205 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5206 FTRACE_OPS_FL_INITIALIZED |
5207 FTRACE_OPS_FL_STUB,
5208#ifdef FTRACE_GRAPH_TRAMP_ADDR
5209 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5210#endif
5211 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5212};
5213
5148static int ftrace_graph_active; 5214static int ftrace_graph_active;
5149 5215
5150int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 5216int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -5307,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5307 */ 5373 */
5308static void update_function_graph_func(void) 5374static void update_function_graph_func(void)
5309{ 5375{
5310 if (ftrace_ops_list == &ftrace_list_end || 5376 struct ftrace_ops *op;
5311 (ftrace_ops_list == &global_ops && 5377 bool do_test = false;
5312 global_ops.next == &ftrace_list_end)) 5378
5313 ftrace_graph_entry = __ftrace_graph_entry; 5379 /*
5314 else 5380 * The graph and global ops share the same set of functions
5381 * to test. If any other ops is on the list, then
5382 * the graph tracing needs to test if its the function
5383 * it should call.
5384 */
5385 do_for_each_ftrace_op(op, ftrace_ops_list) {
5386 if (op != &global_ops && op != &graph_ops &&
5387 op != &ftrace_list_end) {
5388 do_test = true;
5389 /* in double loop, break out with goto */
5390 goto out;
5391 }
5392 } while_for_each_ftrace_op(op);
5393 out:
5394 if (do_test)
5315 ftrace_graph_entry = ftrace_graph_entry_test; 5395 ftrace_graph_entry = ftrace_graph_entry_test;
5396 else
5397 ftrace_graph_entry = __ftrace_graph_entry;
5316} 5398}
5317 5399
5318static struct notifier_block ftrace_suspend_notifier = { 5400static struct notifier_block ftrace_suspend_notifier = {
@@ -5353,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5353 ftrace_graph_entry = ftrace_graph_entry_test; 5435 ftrace_graph_entry = ftrace_graph_entry_test;
5354 update_function_graph_func(); 5436 update_function_graph_func();
5355 5437
5356 /* Function graph doesn't use the .func field of global_ops */ 5438 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5357 global_ops.flags |= FTRACE_OPS_FL_STUB;
5358
5359#ifdef CONFIG_DYNAMIC_FTRACE
5360 /* Optimize function graph calling (if implemented by arch) */
5361 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5362 global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
5363#endif
5364
5365 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
5366 5439
5367out: 5440out:
5368 mutex_unlock(&ftrace_lock); 5441 mutex_unlock(&ftrace_lock);
@@ -5380,12 +5453,7 @@ void unregister_ftrace_graph(void)
5380 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5453 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5381 ftrace_graph_entry = ftrace_graph_entry_stub; 5454 ftrace_graph_entry = ftrace_graph_entry_stub;
5382 __ftrace_graph_entry = ftrace_graph_entry_stub; 5455 __ftrace_graph_entry = ftrace_graph_entry_stub;
5383 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5456 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5384 global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5385#ifdef CONFIG_DYNAMIC_FTRACE
5386 if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5387 global_ops.trampoline = 0;
5388#endif
5389 unregister_pm_notifier(&ftrace_suspend_notifier); 5457 unregister_pm_notifier(&ftrace_suspend_notifier);
5390 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5458 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5391 5459