diff options
author | Dave Airlie <airlied@redhat.com> | 2015-03-23 21:12:20 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-03-23 21:12:20 -0400 |
commit | 74ccbff99787b68e4eb01ef8cf29789229ab0f5d (patch) | |
tree | d4e322105618e5b3887f7dc6b54e5d2346974675 | |
parent | ae10c2248593fb84c6951d67c98c9c934997e56a (diff) | |
parent | 0f9e9cd61f46c07246e30871fd638ffeaca3c576 (diff) |
Merge tag 'drm-intel-next-2015-03-13-merge' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2015-03-13-rebased:
- EU count report param for gen9+ (Jeff McGee)
- piles of pll/wm/... fixes for chv, finally out of preliminary hw support
(Ville, Vijay)
- gen9 rps support from Akash
- more work to move towards atomic from Matt, Ander and others
- runtime pm support for skl (Damien)
- edp1.4 intermediate link clock support (Sonika)
- use frontbuffer tracking for fbc (Paulo)
- remove ilk rc6 (John Harrison)
- a bunch of smaller things and fixes all over
Includes backmerge because git rerere couldn't keep up any more.
* tag 'drm-intel-next-2015-03-13-merge' of git://anongit.freedesktop.org/drm-intel: (366 commits)
drm/i915: Make sure the primary plane is enabled before reading out the fb state
drm/i915: Update DRIVER_DATE to 20150313
drm/i915: Fix vmap_batch page iterator overrun
drm/i915: Export total subslice and EU counts
drm/i915: redefine WARN_ON_ONCE to include the condition
drm/i915/skl: Implement WaDisableHBR2
drm/i915: Remove the preliminary_hw_support shackles from CHV
drm/i915: Read CHV_PLL_DW8 from the correct offset
drm/i915: Rewrite IVB FDI bifurcation conflict checks
drm/i915: Rewrite some some of the FDI lane checks
drm/i915/skl: Enable the RPS interrupts programming
drm/i915/skl: Enabling processing of Turbo interrupts
drm/i915/skl: Updated the i915_frequency_info debugfs function
drm/i915: Simplify the way BC bifurcation state consistency is kept
drm/i915/skl: Updated the act_freq_mhz_show sysfs function
drm/i915/skl: Updated the gen9_enable_rps function
drm/i915/skl: Updated the gen6_rps_limits function
drm/i915/skl: Restructured the gen6_set_rps_thresholds function
drm/i915/skl: Updated the gen6_set_rps function
drm/i915/skl: Updated the gen6_init_rps_frequencies function
...
290 files changed, 4289 insertions, 2440 deletions
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt index f4445e5a2bbb..1e097037349c 100644 --- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt +++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt | |||
@@ -22,6 +22,8 @@ Optional Properties: | |||
22 | - pclkN, clkN: Pairs of parent of input clock and input clock to the | 22 | - pclkN, clkN: Pairs of parent of input clock and input clock to the |
23 | devices in this power domain. Maximum of 4 pairs (N = 0 to 3) | 23 | devices in this power domain. Maximum of 4 pairs (N = 0 to 3) |
24 | are supported currently. | 24 | are supported currently. |
25 | - power-domains: phandle pointing to the parent power domain, for more details | ||
26 | see Documentation/devicetree/bindings/power/power_domain.txt | ||
25 | 27 | ||
26 | Node of a device using power domains must have a power-domains property | 28 | Node of a device using power domains must have a power-domains property |
27 | defined with a phandle to respective power domain. | 29 | defined with a phandle to respective power domain. |
diff --git a/Documentation/devicetree/bindings/arm/sti.txt b/Documentation/devicetree/bindings/arm/sti.txt index d70ec358736c..8d27f6b084c7 100644 --- a/Documentation/devicetree/bindings/arm/sti.txt +++ b/Documentation/devicetree/bindings/arm/sti.txt | |||
@@ -13,6 +13,10 @@ Boards with the ST STiH407 SoC shall have the following properties: | |||
13 | Required root node property: | 13 | Required root node property: |
14 | compatible = "st,stih407"; | 14 | compatible = "st,stih407"; |
15 | 15 | ||
16 | Boards with the ST STiH410 SoC shall have the following properties: | ||
17 | Required root node property: | ||
18 | compatible = "st,stih410"; | ||
19 | |||
16 | Boards with the ST STiH418 SoC shall have the following properties: | 20 | Boards with the ST STiH418 SoC shall have the following properties: |
17 | Required root node property: | 21 | Required root node property: |
18 | compatible = "st,stih418"; | 22 | compatible = "st,stih418"; |
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt index cfcc52705ed8..6151999c5dca 100644 --- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt | |||
@@ -4,7 +4,10 @@ Ethernet nodes are defined to describe on-chip ethernet interfaces in | |||
4 | APM X-Gene SoC. | 4 | APM X-Gene SoC. |
5 | 5 | ||
6 | Required properties for all the ethernet interfaces: | 6 | Required properties for all the ethernet interfaces: |
7 | - compatible: Should be "apm,xgene-enet" | 7 | - compatible: Should state binding information from the following list, |
8 | - "apm,xgene-enet": RGMII based 1G interface | ||
9 | - "apm,xgene1-sgenet": SGMII based 1G interface | ||
10 | - "apm,xgene1-xgenet": XFI based 10G interface | ||
8 | - reg: Address and length of the register set for the device. It contains the | 11 | - reg: Address and length of the register set for the device. It contains the |
9 | information of registers in the same order as described by reg-names | 12 | information of registers in the same order as described by reg-names |
10 | - reg-names: Should contain the register set names | 13 | - reg-names: Should contain the register set names |
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt index 98c16672ab5f..0f8ed3710c66 100644 --- a/Documentation/devicetree/bindings/power/power_domain.txt +++ b/Documentation/devicetree/bindings/power/power_domain.txt | |||
@@ -19,6 +19,16 @@ Required properties: | |||
19 | providing multiple PM domains (e.g. power controllers), but can be any value | 19 | providing multiple PM domains (e.g. power controllers), but can be any value |
20 | as specified by device tree binding documentation of particular provider. | 20 | as specified by device tree binding documentation of particular provider. |
21 | 21 | ||
22 | Optional properties: | ||
23 | - power-domains : A phandle and PM domain specifier as defined by bindings of | ||
24 | the power controller specified by phandle. | ||
25 | Some power domains might be powered from another power domain (or have | ||
26 | other hardware specific dependencies). For representing such dependency | ||
27 | a standard PM domain consumer binding is used. When provided, all domains | ||
28 | created by the given provider should be subdomains of the domain | ||
29 | specified by this binding. More details about power domain specifier are | ||
30 | available in the next section. | ||
31 | |||
22 | Example: | 32 | Example: |
23 | 33 | ||
24 | power: power-controller@12340000 { | 34 | power: power-controller@12340000 { |
@@ -30,6 +40,25 @@ Example: | |||
30 | The node above defines a power controller that is a PM domain provider and | 40 | The node above defines a power controller that is a PM domain provider and |
31 | expects one cell as its phandle argument. | 41 | expects one cell as its phandle argument. |
32 | 42 | ||
43 | Example 2: | ||
44 | |||
45 | parent: power-controller@12340000 { | ||
46 | compatible = "foo,power-controller"; | ||
47 | reg = <0x12340000 0x1000>; | ||
48 | #power-domain-cells = <1>; | ||
49 | }; | ||
50 | |||
51 | child: power-controller@12340000 { | ||
52 | compatible = "foo,power-controller"; | ||
53 | reg = <0x12341000 0x1000>; | ||
54 | power-domains = <&parent 0>; | ||
55 | #power-domain-cells = <1>; | ||
56 | }; | ||
57 | |||
58 | The nodes above define two power controllers: 'parent' and 'child'. | ||
59 | Domains created by the 'child' power controller are subdomains of '0' power | ||
60 | domain provided by the 'parent' power controller. | ||
61 | |||
33 | ==PM domain consumers== | 62 | ==PM domain consumers== |
34 | 63 | ||
35 | Required properties: | 64 | Required properties: |
diff --git a/Documentation/devicetree/bindings/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/8250.txt index 91d5ab0e60fc..91d5ab0e60fc 100644 --- a/Documentation/devicetree/bindings/serial/of-serial.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt | |||
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt new file mode 100644 index 000000000000..ebcbb62c0a76 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt | |||
@@ -0,0 +1,19 @@ | |||
1 | ETRAX FS UART | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "axis,etraxfs-uart" | ||
5 | - reg: offset and length of the register set for the device. | ||
6 | - interrupts: device interrupt | ||
7 | |||
8 | Optional properties: | ||
9 | - {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD | ||
10 | line respectively. | ||
11 | |||
12 | Example: | ||
13 | |||
14 | serial@b00260000 { | ||
15 | compatible = "axis,etraxfs-uart"; | ||
16 | reg = <0xb0026000 0x1000>; | ||
17 | interrupts = <68>; | ||
18 | status = "disabled"; | ||
19 | }; | ||
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt index 56742bc70218..7d44eae7ab0b 100644 --- a/Documentation/devicetree/bindings/submitting-patches.txt +++ b/Documentation/devicetree/bindings/submitting-patches.txt | |||
@@ -12,6 +12,9 @@ I. For patch submitters | |||
12 | 12 | ||
13 | devicetree@vger.kernel.org | 13 | devicetree@vger.kernel.org |
14 | 14 | ||
15 | and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify | ||
16 | all of the DT maintainers. | ||
17 | |||
15 | 3) The Documentation/ portion of the patch should come in the series before | 18 | 3) The Documentation/ portion of the patch should come in the series before |
16 | the code implementing the binding. | 19 | the code implementing the binding. |
17 | 20 | ||
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 389ca1347a77..fae26d014aaf 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
@@ -20,6 +20,7 @@ amlogic Amlogic, Inc. | |||
20 | ams AMS AG | 20 | ams AMS AG |
21 | amstaos AMS-Taos Inc. | 21 | amstaos AMS-Taos Inc. |
22 | apm Applied Micro Circuits Corporation (APM) | 22 | apm Applied Micro Circuits Corporation (APM) |
23 | arasan Arasan Chip Systems | ||
23 | arm ARM Ltd. | 24 | arm ARM Ltd. |
24 | armadeus ARMadeus Systems SARL | 25 | armadeus ARMadeus Systems SARL |
25 | asahi-kasei Asahi Kasei Corp. | 26 | asahi-kasei Asahi Kasei Corp. |
@@ -27,6 +28,7 @@ atmel Atmel Corporation | |||
27 | auo AU Optronics Corporation | 28 | auo AU Optronics Corporation |
28 | avago Avago Technologies | 29 | avago Avago Technologies |
29 | avic Shanghai AVIC Optoelectronics Co., Ltd. | 30 | avic Shanghai AVIC Optoelectronics Co., Ltd. |
31 | axis Axis Communications AB | ||
30 | bosch Bosch Sensortec GmbH | 32 | bosch Bosch Sensortec GmbH |
31 | brcm Broadcom Corporation | 33 | brcm Broadcom Corporation |
32 | buffalo Buffalo, Inc. | 34 | buffalo Buffalo, Inc. |
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt index f90e294d7631..a4d869744f59 100644 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt | |||
@@ -26,6 +26,11 @@ Optional properties: | |||
26 | - atmel,disable : Should be present if you want to disable the watchdog. | 26 | - atmel,disable : Should be present if you want to disable the watchdog. |
27 | - atmel,idle-halt : Should be present if you want to stop the watchdog when | 27 | - atmel,idle-halt : Should be present if you want to stop the watchdog when |
28 | entering idle state. | 28 | entering idle state. |
29 | CAUTION: This property should be used with care, it actually makes the | ||
30 | watchdog not counting when the CPU is in idle state, therefore the | ||
31 | watchdog reset time depends on mean CPU usage and will not reset at all | ||
32 | if the CPU stop working while it is in idle state, which is probably | ||
33 | not what you want. | ||
29 | - atmel,dbg-halt : Should be present if you want to stop the watchdog when | 34 | - atmel,dbg-halt : Should be present if you want to stop the watchdog when |
30 | entering debug state. | 35 | entering debug state. |
31 | 36 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index ab44a48d53bb..74778886321e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1030,6 +1030,16 @@ F: arch/arm/mach-mxs/ | |||
1030 | F: arch/arm/boot/dts/imx* | 1030 | F: arch/arm/boot/dts/imx* |
1031 | F: arch/arm/configs/imx*_defconfig | 1031 | F: arch/arm/configs/imx*_defconfig |
1032 | 1032 | ||
1033 | ARM/FREESCALE VYBRID ARM ARCHITECTURE | ||
1034 | M: Shawn Guo <shawn.guo@linaro.org> | ||
1035 | M: Sascha Hauer <kernel@pengutronix.de> | ||
1036 | R: Stefan Agner <stefan@agner.ch> | ||
1037 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1038 | S: Maintained | ||
1039 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git | ||
1040 | F: arch/arm/mach-imx/*vf610* | ||
1041 | F: arch/arm/boot/dts/vf* | ||
1042 | |||
1033 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT | 1043 | ARM/GLOMATION GESBC9312SX MACHINE SUPPORT |
1034 | M: Lennert Buytenhek <kernel@wantstofly.org> | 1044 | M: Lennert Buytenhek <kernel@wantstofly.org> |
1035 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1045 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1188,6 +1198,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support | |||
1188 | M: Jason Cooper <jason@lakedaemon.net> | 1198 | M: Jason Cooper <jason@lakedaemon.net> |
1189 | M: Andrew Lunn <andrew@lunn.ch> | 1199 | M: Andrew Lunn <andrew@lunn.ch> |
1190 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 1200 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
1201 | M: Gregory Clement <gregory.clement@free-electrons.com> | ||
1191 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1202 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1192 | S: Maintained | 1203 | S: Maintained |
1193 | F: arch/arm/mach-dove/ | 1204 | F: arch/arm/mach-dove/ |
@@ -2107,7 +2118,6 @@ F: drivers/net/ethernet/broadcom/bnx2x/ | |||
2107 | 2118 | ||
2108 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE | 2119 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE |
2109 | M: Christian Daudt <bcm@fixthebug.org> | 2120 | M: Christian Daudt <bcm@fixthebug.org> |
2110 | M: Matt Porter <mporter@linaro.org> | ||
2111 | M: Florian Fainelli <f.fainelli@gmail.com> | 2121 | M: Florian Fainelli <f.fainelli@gmail.com> |
2112 | L: bcm-kernel-feedback-list@broadcom.com | 2122 | L: bcm-kernel-feedback-list@broadcom.com |
2113 | T: git git://github.com/broadcom/mach-bcm | 2123 | T: git git://github.com/broadcom/mach-bcm |
@@ -2369,8 +2379,9 @@ F: arch/x86/include/asm/tce.h | |||
2369 | 2379 | ||
2370 | CAN NETWORK LAYER | 2380 | CAN NETWORK LAYER |
2371 | M: Oliver Hartkopp <socketcan@hartkopp.net> | 2381 | M: Oliver Hartkopp <socketcan@hartkopp.net> |
2382 | M: Marc Kleine-Budde <mkl@pengutronix.de> | ||
2372 | L: linux-can@vger.kernel.org | 2383 | L: linux-can@vger.kernel.org |
2373 | W: http://gitorious.org/linux-can | 2384 | W: https://github.com/linux-can |
2374 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2385 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2375 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2386 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2376 | S: Maintained | 2387 | S: Maintained |
@@ -2386,7 +2397,7 @@ CAN NETWORK DRIVERS | |||
2386 | M: Wolfgang Grandegger <wg@grandegger.com> | 2397 | M: Wolfgang Grandegger <wg@grandegger.com> |
2387 | M: Marc Kleine-Budde <mkl@pengutronix.de> | 2398 | M: Marc Kleine-Budde <mkl@pengutronix.de> |
2388 | L: linux-can@vger.kernel.org | 2399 | L: linux-can@vger.kernel.org |
2389 | W: http://gitorious.org/linux-can | 2400 | W: https://github.com/linux-can |
2390 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git | 2401 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git |
2391 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git | 2402 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git |
2392 | S: Maintained | 2403 | S: Maintained |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 7f99cd652203..eb7bb511f853 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -150,6 +150,7 @@ machine-$(CONFIG_ARCH_BERLIN) += berlin | |||
150 | machine-$(CONFIG_ARCH_CLPS711X) += clps711x | 150 | machine-$(CONFIG_ARCH_CLPS711X) += clps711x |
151 | machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx | 151 | machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx |
152 | machine-$(CONFIG_ARCH_DAVINCI) += davinci | 152 | machine-$(CONFIG_ARCH_DAVINCI) += davinci |
153 | machine-$(CONFIG_ARCH_DIGICOLOR) += digicolor | ||
153 | machine-$(CONFIG_ARCH_DOVE) += dove | 154 | machine-$(CONFIG_ARCH_DOVE) += dove |
154 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 | 155 | machine-$(CONFIG_ARCH_EBSA110) += ebsa110 |
155 | machine-$(CONFIG_ARCH_EFM32) += efm32 | 156 | machine-$(CONFIG_ARCH_EFM32) += efm32 |
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 2c6248d9a9ef..c3255e0c90aa 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi | |||
@@ -301,3 +301,11 @@ | |||
301 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; | 301 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; |
302 | cd-inverted; | 302 | cd-inverted; |
303 | }; | 303 | }; |
304 | |||
305 | &aes { | ||
306 | status = "okay"; | ||
307 | }; | ||
308 | |||
309 | &sham { | ||
310 | status = "okay"; | ||
311 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 83d40f7655e5..6b8493720424 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts | |||
@@ -24,11 +24,3 @@ | |||
24 | &mmc1 { | 24 | &mmc1 { |
25 | vmmc-supply = <&ldo3_reg>; | 25 | vmmc-supply = <&ldo3_reg>; |
26 | }; | 26 | }; |
27 | |||
28 | &sham { | ||
29 | status = "okay"; | ||
30 | }; | ||
31 | |||
32 | &aes { | ||
33 | status = "okay"; | ||
34 | }; | ||
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index 7266a00aab2e..5c5667a3624d 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts | |||
@@ -328,6 +328,10 @@ | |||
328 | dual_emac_res_vlan = <3>; | 328 | dual_emac_res_vlan = <3>; |
329 | }; | 329 | }; |
330 | 330 | ||
331 | &phy_sel { | ||
332 | rmii-clock-ext; | ||
333 | }; | ||
334 | |||
331 | &mac { | 335 | &mac { |
332 | pinctrl-names = "default", "sleep"; | 336 | pinctrl-names = "default", "sleep"; |
333 | pinctrl-0 = <&cpsw_default>; | 337 | pinctrl-0 = <&cpsw_default>; |
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi index 712edce7d6fb..071b56aa0c7e 100644 --- a/arch/arm/boot/dts/am33xx-clocks.dtsi +++ b/arch/arm/boot/dts/am33xx-clocks.dtsi | |||
@@ -99,7 +99,7 @@ | |||
99 | ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { | 99 | ehrpwm0_tbclk: ehrpwm0_tbclk@44e10664 { |
100 | #clock-cells = <0>; | 100 | #clock-cells = <0>; |
101 | compatible = "ti,gate-clock"; | 101 | compatible = "ti,gate-clock"; |
102 | clocks = <&dpll_per_m2_ck>; | 102 | clocks = <&l4ls_gclk>; |
103 | ti,bit-shift = <0>; | 103 | ti,bit-shift = <0>; |
104 | reg = <0x0664>; | 104 | reg = <0x0664>; |
105 | }; | 105 | }; |
@@ -107,7 +107,7 @@ | |||
107 | ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { | 107 | ehrpwm1_tbclk: ehrpwm1_tbclk@44e10664 { |
108 | #clock-cells = <0>; | 108 | #clock-cells = <0>; |
109 | compatible = "ti,gate-clock"; | 109 | compatible = "ti,gate-clock"; |
110 | clocks = <&dpll_per_m2_ck>; | 110 | clocks = <&l4ls_gclk>; |
111 | ti,bit-shift = <1>; | 111 | ti,bit-shift = <1>; |
112 | reg = <0x0664>; | 112 | reg = <0x0664>; |
113 | }; | 113 | }; |
@@ -115,7 +115,7 @@ | |||
115 | ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { | 115 | ehrpwm2_tbclk: ehrpwm2_tbclk@44e10664 { |
116 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
117 | compatible = "ti,gate-clock"; | 117 | compatible = "ti,gate-clock"; |
118 | clocks = <&dpll_per_m2_ck>; | 118 | clocks = <&l4ls_gclk>; |
119 | ti,bit-shift = <2>; | 119 | ti,bit-shift = <2>; |
120 | reg = <0x0664>; | 120 | reg = <0x0664>; |
121 | }; | 121 | }; |
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi index c7dc9dab93a4..cfb49686ab6a 100644 --- a/arch/arm/boot/dts/am43xx-clocks.dtsi +++ b/arch/arm/boot/dts/am43xx-clocks.dtsi | |||
@@ -107,7 +107,7 @@ | |||
107 | ehrpwm0_tbclk: ehrpwm0_tbclk { | 107 | ehrpwm0_tbclk: ehrpwm0_tbclk { |
108 | #clock-cells = <0>; | 108 | #clock-cells = <0>; |
109 | compatible = "ti,gate-clock"; | 109 | compatible = "ti,gate-clock"; |
110 | clocks = <&dpll_per_m2_ck>; | 110 | clocks = <&l4ls_gclk>; |
111 | ti,bit-shift = <0>; | 111 | ti,bit-shift = <0>; |
112 | reg = <0x0664>; | 112 | reg = <0x0664>; |
113 | }; | 113 | }; |
@@ -115,7 +115,7 @@ | |||
115 | ehrpwm1_tbclk: ehrpwm1_tbclk { | 115 | ehrpwm1_tbclk: ehrpwm1_tbclk { |
116 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
117 | compatible = "ti,gate-clock"; | 117 | compatible = "ti,gate-clock"; |
118 | clocks = <&dpll_per_m2_ck>; | 118 | clocks = <&l4ls_gclk>; |
119 | ti,bit-shift = <1>; | 119 | ti,bit-shift = <1>; |
120 | reg = <0x0664>; | 120 | reg = <0x0664>; |
121 | }; | 121 | }; |
@@ -123,7 +123,7 @@ | |||
123 | ehrpwm2_tbclk: ehrpwm2_tbclk { | 123 | ehrpwm2_tbclk: ehrpwm2_tbclk { |
124 | #clock-cells = <0>; | 124 | #clock-cells = <0>; |
125 | compatible = "ti,gate-clock"; | 125 | compatible = "ti,gate-clock"; |
126 | clocks = <&dpll_per_m2_ck>; | 126 | clocks = <&l4ls_gclk>; |
127 | ti,bit-shift = <2>; | 127 | ti,bit-shift = <2>; |
128 | reg = <0x0664>; | 128 | reg = <0x0664>; |
129 | }; | 129 | }; |
@@ -131,7 +131,7 @@ | |||
131 | ehrpwm3_tbclk: ehrpwm3_tbclk { | 131 | ehrpwm3_tbclk: ehrpwm3_tbclk { |
132 | #clock-cells = <0>; | 132 | #clock-cells = <0>; |
133 | compatible = "ti,gate-clock"; | 133 | compatible = "ti,gate-clock"; |
134 | clocks = <&dpll_per_m2_ck>; | 134 | clocks = <&l4ls_gclk>; |
135 | ti,bit-shift = <4>; | 135 | ti,bit-shift = <4>; |
136 | reg = <0x0664>; | 136 | reg = <0x0664>; |
137 | }; | 137 | }; |
@@ -139,7 +139,7 @@ | |||
139 | ehrpwm4_tbclk: ehrpwm4_tbclk { | 139 | ehrpwm4_tbclk: ehrpwm4_tbclk { |
140 | #clock-cells = <0>; | 140 | #clock-cells = <0>; |
141 | compatible = "ti,gate-clock"; | 141 | compatible = "ti,gate-clock"; |
142 | clocks = <&dpll_per_m2_ck>; | 142 | clocks = <&l4ls_gclk>; |
143 | ti,bit-shift = <5>; | 143 | ti,bit-shift = <5>; |
144 | reg = <0x0664>; | 144 | reg = <0x0664>; |
145 | }; | 145 | }; |
@@ -147,7 +147,7 @@ | |||
147 | ehrpwm5_tbclk: ehrpwm5_tbclk { | 147 | ehrpwm5_tbclk: ehrpwm5_tbclk { |
148 | #clock-cells = <0>; | 148 | #clock-cells = <0>; |
149 | compatible = "ti,gate-clock"; | 149 | compatible = "ti,gate-clock"; |
150 | clocks = <&dpll_per_m2_ck>; | 150 | clocks = <&l4ls_gclk>; |
151 | ti,bit-shift = <6>; | 151 | ti,bit-shift = <6>; |
152 | reg = <0x0664>; | 152 | reg = <0x0664>; |
153 | }; | 153 | }; |
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index fff0ee69aab4..e7f0a4ae271c 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi | |||
@@ -494,12 +494,12 @@ | |||
494 | 494 | ||
495 | pinctrl_usart3_rts: usart3_rts-0 { | 495 | pinctrl_usart3_rts: usart3_rts-0 { |
496 | atmel,pins = | 496 | atmel,pins = |
497 | <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC8 periph B */ | 497 | <AT91_PIOC 8 AT91_PERIPH_B AT91_PINCTRL_NONE>; |
498 | }; | 498 | }; |
499 | 499 | ||
500 | pinctrl_usart3_cts: usart3_cts-0 { | 500 | pinctrl_usart3_cts: usart3_cts-0 { |
501 | atmel,pins = | 501 | atmel,pins = |
502 | <AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* PC10 periph B */ | 502 | <AT91_PIOC 10 AT91_PERIPH_B AT91_PINCTRL_NONE>; |
503 | }; | 503 | }; |
504 | }; | 504 | }; |
505 | 505 | ||
@@ -853,7 +853,7 @@ | |||
853 | }; | 853 | }; |
854 | 854 | ||
855 | usb1: gadget@fffa4000 { | 855 | usb1: gadget@fffa4000 { |
856 | compatible = "atmel,at91rm9200-udc"; | 856 | compatible = "atmel,at91sam9260-udc"; |
857 | reg = <0xfffa4000 0x4000>; | 857 | reg = <0xfffa4000 0x4000>; |
858 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; | 858 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; |
859 | clocks = <&udc_clk>, <&udpck>; | 859 | clocks = <&udc_clk>, <&udpck>; |
@@ -976,7 +976,6 @@ | |||
976 | atmel,watchdog-type = "hardware"; | 976 | atmel,watchdog-type = "hardware"; |
977 | atmel,reset-type = "all"; | 977 | atmel,reset-type = "all"; |
978 | atmel,dbg-halt; | 978 | atmel,dbg-halt; |
979 | atmel,idle-halt; | ||
980 | status = "disabled"; | 979 | status = "disabled"; |
981 | }; | 980 | }; |
982 | 981 | ||
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index e247b0b5fdab..d55fdf2487ef 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi | |||
@@ -124,11 +124,12 @@ | |||
124 | }; | 124 | }; |
125 | 125 | ||
126 | usb1: gadget@fffa4000 { | 126 | usb1: gadget@fffa4000 { |
127 | compatible = "atmel,at91rm9200-udc"; | 127 | compatible = "atmel,at91sam9261-udc"; |
128 | reg = <0xfffa4000 0x4000>; | 128 | reg = <0xfffa4000 0x4000>; |
129 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; | 129 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH 2>; |
130 | clocks = <&usb>, <&udc_clk>, <&udpck>; | 130 | clocks = <&udc_clk>, <&udpck>; |
131 | clock-names = "usb_clk", "udc_clk", "udpck"; | 131 | clock-names = "pclk", "hclk"; |
132 | atmel,matrix = <&matrix>; | ||
132 | status = "disabled"; | 133 | status = "disabled"; |
133 | }; | 134 | }; |
134 | 135 | ||
@@ -262,7 +263,7 @@ | |||
262 | }; | 263 | }; |
263 | 264 | ||
264 | matrix: matrix@ffffee00 { | 265 | matrix: matrix@ffffee00 { |
265 | compatible = "atmel,at91sam9260-bus-matrix"; | 266 | compatible = "atmel,at91sam9260-bus-matrix", "syscon"; |
266 | reg = <0xffffee00 0x200>; | 267 | reg = <0xffffee00 0x200>; |
267 | }; | 268 | }; |
268 | 269 | ||
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 1f67bb4c144e..fce301c4e9d6 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi | |||
@@ -69,7 +69,7 @@ | |||
69 | 69 | ||
70 | sram1: sram@00500000 { | 70 | sram1: sram@00500000 { |
71 | compatible = "mmio-sram"; | 71 | compatible = "mmio-sram"; |
72 | reg = <0x00300000 0x4000>; | 72 | reg = <0x00500000 0x4000>; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | ahb { | 75 | ahb { |
@@ -856,7 +856,7 @@ | |||
856 | }; | 856 | }; |
857 | 857 | ||
858 | usb1: gadget@fff78000 { | 858 | usb1: gadget@fff78000 { |
859 | compatible = "atmel,at91rm9200-udc"; | 859 | compatible = "atmel,at91sam9263-udc"; |
860 | reg = <0xfff78000 0x4000>; | 860 | reg = <0xfff78000 0x4000>; |
861 | interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; | 861 | interrupts = <24 IRQ_TYPE_LEVEL_HIGH 2>; |
862 | clocks = <&udc_clk>, <&udpck>; | 862 | clocks = <&udc_clk>, <&udpck>; |
@@ -905,7 +905,6 @@ | |||
905 | atmel,watchdog-type = "hardware"; | 905 | atmel,watchdog-type = "hardware"; |
906 | atmel,reset-type = "all"; | 906 | atmel,reset-type = "all"; |
907 | atmel,dbg-halt; | 907 | atmel,dbg-halt; |
908 | atmel,idle-halt; | ||
909 | status = "disabled"; | 908 | status = "disabled"; |
910 | }; | 909 | }; |
911 | 910 | ||
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index ee80aa9c0759..488af63d5174 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi | |||
@@ -1116,7 +1116,6 @@ | |||
1116 | atmel,watchdog-type = "hardware"; | 1116 | atmel,watchdog-type = "hardware"; |
1117 | atmel,reset-type = "all"; | 1117 | atmel,reset-type = "all"; |
1118 | atmel,dbg-halt; | 1118 | atmel,dbg-halt; |
1119 | atmel,idle-halt; | ||
1120 | status = "disabled"; | 1119 | status = "disabled"; |
1121 | }; | 1120 | }; |
1122 | 1121 | ||
@@ -1301,7 +1300,7 @@ | |||
1301 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1300 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1302 | reg = <0x00800000 0x100000>; | 1301 | reg = <0x00800000 0x100000>; |
1303 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; | 1302 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; |
1304 | clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; | 1303 | clocks = <&utmi>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; |
1305 | clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; | 1304 | clock-names = "usb_clk", "ehci_clk", "hclk", "uhpck"; |
1306 | status = "disabled"; | 1305 | status = "disabled"; |
1307 | }; | 1306 | }; |
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index c2666a7cb5b1..0c53a375ba99 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi | |||
@@ -894,7 +894,6 @@ | |||
894 | atmel,watchdog-type = "hardware"; | 894 | atmel,watchdog-type = "hardware"; |
895 | atmel,reset-type = "all"; | 895 | atmel,reset-type = "all"; |
896 | atmel,dbg-halt; | 896 | atmel,dbg-halt; |
897 | atmel,idle-halt; | ||
898 | status = "disabled"; | 897 | status = "disabled"; |
899 | }; | 898 | }; |
900 | 899 | ||
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 818dabdd8c0e..d221179d0f1a 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi | |||
@@ -1066,7 +1066,7 @@ | |||
1066 | reg = <0x00500000 0x80000 | 1066 | reg = <0x00500000 0x80000 |
1067 | 0xf803c000 0x400>; | 1067 | 0xf803c000 0x400>; |
1068 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; | 1068 | interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; |
1069 | clocks = <&usb>, <&udphs_clk>; | 1069 | clocks = <&utmi>, <&udphs_clk>; |
1070 | clock-names = "hclk", "pclk"; | 1070 | clock-names = "hclk", "pclk"; |
1071 | status = "disabled"; | 1071 | status = "disabled"; |
1072 | 1072 | ||
@@ -1130,7 +1130,6 @@ | |||
1130 | atmel,watchdog-type = "hardware"; | 1130 | atmel,watchdog-type = "hardware"; |
1131 | atmel,reset-type = "all"; | 1131 | atmel,reset-type = "all"; |
1132 | atmel,dbg-halt; | 1132 | atmel,dbg-halt; |
1133 | atmel,idle-halt; | ||
1134 | status = "disabled"; | 1133 | status = "disabled"; |
1135 | }; | 1134 | }; |
1136 | 1135 | ||
@@ -1186,7 +1185,7 @@ | |||
1186 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1185 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1187 | reg = <0x00700000 0x100000>; | 1186 | reg = <0x00700000 0x100000>; |
1188 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; | 1187 | interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; |
1189 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 1188 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
1190 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 1189 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
1191 | status = "disabled"; | 1190 | status = "disabled"; |
1192 | }; | 1191 | }; |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 3290a96ba586..7563d7ce01bb 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -263,17 +263,15 @@ | |||
263 | 263 | ||
264 | dcan1_pins_default: dcan1_pins_default { | 264 | dcan1_pins_default: dcan1_pins_default { |
265 | pinctrl-single,pins = < | 265 | pinctrl-single,pins = < |
266 | 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ | 266 | 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ |
267 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 267 | 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ |
268 | 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ | ||
269 | >; | 268 | >; |
270 | }; | 269 | }; |
271 | 270 | ||
272 | dcan1_pins_sleep: dcan1_pins_sleep { | 271 | dcan1_pins_sleep: dcan1_pins_sleep { |
273 | pinctrl-single,pins = < | 272 | pinctrl-single,pins = < |
274 | 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ | 273 | 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ |
275 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 274 | 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ |
276 | 0x418 (MUX_MODE15) /* wakeup0.off */ | ||
277 | >; | 275 | >; |
278 | }; | 276 | }; |
279 | }; | 277 | }; |
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index e0264d0bf7b9..40ed539ce474 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts | |||
@@ -119,17 +119,15 @@ | |||
119 | 119 | ||
120 | dcan1_pins_default: dcan1_pins_default { | 120 | dcan1_pins_default: dcan1_pins_default { |
121 | pinctrl-single,pins = < | 121 | pinctrl-single,pins = < |
122 | 0x3d0 (PIN_OUTPUT | MUX_MODE0) /* dcan1_tx */ | 122 | 0x3d0 (PIN_OUTPUT_PULLUP | MUX_MODE0) /* dcan1_tx */ |
123 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 123 | 0x418 (PULL_UP | MUX_MODE1) /* wakeup0.dcan1_rx */ |
124 | 0x418 (PULL_DIS | MUX_MODE1) /* wakeup0.dcan1_rx */ | ||
125 | >; | 124 | >; |
126 | }; | 125 | }; |
127 | 126 | ||
128 | dcan1_pins_sleep: dcan1_pins_sleep { | 127 | dcan1_pins_sleep: dcan1_pins_sleep { |
129 | pinctrl-single,pins = < | 128 | pinctrl-single,pins = < |
130 | 0x3d0 (MUX_MODE15) /* dcan1_tx.off */ | 129 | 0x3d0 (MUX_MODE15 | PULL_UP) /* dcan1_tx.off */ |
131 | 0x3d4 (MUX_MODE15) /* dcan1_rx.off */ | 130 | 0x418 (MUX_MODE15 | PULL_UP) /* wakeup0.off */ |
132 | 0x418 (MUX_MODE15) /* wakeup0.off */ | ||
133 | >; | 131 | >; |
134 | }; | 132 | }; |
135 | 133 | ||
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 4bdcbd61ce47..99b09a44e269 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi | |||
@@ -243,10 +243,18 @@ | |||
243 | ti,invert-autoidle-bit; | 243 | ti,invert-autoidle-bit; |
244 | }; | 244 | }; |
245 | 245 | ||
246 | dpll_core_byp_mux: dpll_core_byp_mux { | ||
247 | #clock-cells = <0>; | ||
248 | compatible = "ti,mux-clock"; | ||
249 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
250 | ti,bit-shift = <23>; | ||
251 | reg = <0x012c>; | ||
252 | }; | ||
253 | |||
246 | dpll_core_ck: dpll_core_ck { | 254 | dpll_core_ck: dpll_core_ck { |
247 | #clock-cells = <0>; | 255 | #clock-cells = <0>; |
248 | compatible = "ti,omap4-dpll-core-clock"; | 256 | compatible = "ti,omap4-dpll-core-clock"; |
249 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 257 | clocks = <&sys_clkin1>, <&dpll_core_byp_mux>; |
250 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; | 258 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; |
251 | }; | 259 | }; |
252 | 260 | ||
@@ -309,10 +317,18 @@ | |||
309 | clock-div = <1>; | 317 | clock-div = <1>; |
310 | }; | 318 | }; |
311 | 319 | ||
320 | dpll_dsp_byp_mux: dpll_dsp_byp_mux { | ||
321 | #clock-cells = <0>; | ||
322 | compatible = "ti,mux-clock"; | ||
323 | clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; | ||
324 | ti,bit-shift = <23>; | ||
325 | reg = <0x0240>; | ||
326 | }; | ||
327 | |||
312 | dpll_dsp_ck: dpll_dsp_ck { | 328 | dpll_dsp_ck: dpll_dsp_ck { |
313 | #clock-cells = <0>; | 329 | #clock-cells = <0>; |
314 | compatible = "ti,omap4-dpll-clock"; | 330 | compatible = "ti,omap4-dpll-clock"; |
315 | clocks = <&sys_clkin1>, <&dsp_dpll_hs_clk_div>; | 331 | clocks = <&sys_clkin1>, <&dpll_dsp_byp_mux>; |
316 | reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; | 332 | reg = <0x0234>, <0x0238>, <0x0240>, <0x023c>; |
317 | }; | 333 | }; |
318 | 334 | ||
@@ -335,10 +351,18 @@ | |||
335 | clock-div = <1>; | 351 | clock-div = <1>; |
336 | }; | 352 | }; |
337 | 353 | ||
354 | dpll_iva_byp_mux: dpll_iva_byp_mux { | ||
355 | #clock-cells = <0>; | ||
356 | compatible = "ti,mux-clock"; | ||
357 | clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; | ||
358 | ti,bit-shift = <23>; | ||
359 | reg = <0x01ac>; | ||
360 | }; | ||
361 | |||
338 | dpll_iva_ck: dpll_iva_ck { | 362 | dpll_iva_ck: dpll_iva_ck { |
339 | #clock-cells = <0>; | 363 | #clock-cells = <0>; |
340 | compatible = "ti,omap4-dpll-clock"; | 364 | compatible = "ti,omap4-dpll-clock"; |
341 | clocks = <&sys_clkin1>, <&iva_dpll_hs_clk_div>; | 365 | clocks = <&sys_clkin1>, <&dpll_iva_byp_mux>; |
342 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; | 366 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; |
343 | }; | 367 | }; |
344 | 368 | ||
@@ -361,10 +385,18 @@ | |||
361 | clock-div = <1>; | 385 | clock-div = <1>; |
362 | }; | 386 | }; |
363 | 387 | ||
388 | dpll_gpu_byp_mux: dpll_gpu_byp_mux { | ||
389 | #clock-cells = <0>; | ||
390 | compatible = "ti,mux-clock"; | ||
391 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
392 | ti,bit-shift = <23>; | ||
393 | reg = <0x02e4>; | ||
394 | }; | ||
395 | |||
364 | dpll_gpu_ck: dpll_gpu_ck { | 396 | dpll_gpu_ck: dpll_gpu_ck { |
365 | #clock-cells = <0>; | 397 | #clock-cells = <0>; |
366 | compatible = "ti,omap4-dpll-clock"; | 398 | compatible = "ti,omap4-dpll-clock"; |
367 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 399 | clocks = <&sys_clkin1>, <&dpll_gpu_byp_mux>; |
368 | reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; | 400 | reg = <0x02d8>, <0x02dc>, <0x02e4>, <0x02e0>; |
369 | }; | 401 | }; |
370 | 402 | ||
@@ -398,10 +430,18 @@ | |||
398 | clock-div = <1>; | 430 | clock-div = <1>; |
399 | }; | 431 | }; |
400 | 432 | ||
433 | dpll_ddr_byp_mux: dpll_ddr_byp_mux { | ||
434 | #clock-cells = <0>; | ||
435 | compatible = "ti,mux-clock"; | ||
436 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
437 | ti,bit-shift = <23>; | ||
438 | reg = <0x021c>; | ||
439 | }; | ||
440 | |||
401 | dpll_ddr_ck: dpll_ddr_ck { | 441 | dpll_ddr_ck: dpll_ddr_ck { |
402 | #clock-cells = <0>; | 442 | #clock-cells = <0>; |
403 | compatible = "ti,omap4-dpll-clock"; | 443 | compatible = "ti,omap4-dpll-clock"; |
404 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 444 | clocks = <&sys_clkin1>, <&dpll_ddr_byp_mux>; |
405 | reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; | 445 | reg = <0x0210>, <0x0214>, <0x021c>, <0x0218>; |
406 | }; | 446 | }; |
407 | 447 | ||
@@ -416,10 +456,18 @@ | |||
416 | ti,invert-autoidle-bit; | 456 | ti,invert-autoidle-bit; |
417 | }; | 457 | }; |
418 | 458 | ||
459 | dpll_gmac_byp_mux: dpll_gmac_byp_mux { | ||
460 | #clock-cells = <0>; | ||
461 | compatible = "ti,mux-clock"; | ||
462 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | ||
463 | ti,bit-shift = <23>; | ||
464 | reg = <0x02b4>; | ||
465 | }; | ||
466 | |||
419 | dpll_gmac_ck: dpll_gmac_ck { | 467 | dpll_gmac_ck: dpll_gmac_ck { |
420 | #clock-cells = <0>; | 468 | #clock-cells = <0>; |
421 | compatible = "ti,omap4-dpll-clock"; | 469 | compatible = "ti,omap4-dpll-clock"; |
422 | clocks = <&sys_clkin1>, <&dpll_abe_m3x2_ck>; | 470 | clocks = <&sys_clkin1>, <&dpll_gmac_byp_mux>; |
423 | reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; | 471 | reg = <0x02a8>, <0x02ac>, <0x02b4>, <0x02b0>; |
424 | }; | 472 | }; |
425 | 473 | ||
@@ -482,10 +530,18 @@ | |||
482 | clock-div = <1>; | 530 | clock-div = <1>; |
483 | }; | 531 | }; |
484 | 532 | ||
533 | dpll_eve_byp_mux: dpll_eve_byp_mux { | ||
534 | #clock-cells = <0>; | ||
535 | compatible = "ti,mux-clock"; | ||
536 | clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; | ||
537 | ti,bit-shift = <23>; | ||
538 | reg = <0x0290>; | ||
539 | }; | ||
540 | |||
485 | dpll_eve_ck: dpll_eve_ck { | 541 | dpll_eve_ck: dpll_eve_ck { |
486 | #clock-cells = <0>; | 542 | #clock-cells = <0>; |
487 | compatible = "ti,omap4-dpll-clock"; | 543 | compatible = "ti,omap4-dpll-clock"; |
488 | clocks = <&sys_clkin1>, <&eve_dpll_hs_clk_div>; | 544 | clocks = <&sys_clkin1>, <&dpll_eve_byp_mux>; |
489 | reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; | 545 | reg = <0x0284>, <0x0288>, <0x0290>, <0x028c>; |
490 | }; | 546 | }; |
491 | 547 | ||
@@ -1249,10 +1305,18 @@ | |||
1249 | clock-div = <1>; | 1305 | clock-div = <1>; |
1250 | }; | 1306 | }; |
1251 | 1307 | ||
1308 | dpll_per_byp_mux: dpll_per_byp_mux { | ||
1309 | #clock-cells = <0>; | ||
1310 | compatible = "ti,mux-clock"; | ||
1311 | clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; | ||
1312 | ti,bit-shift = <23>; | ||
1313 | reg = <0x014c>; | ||
1314 | }; | ||
1315 | |||
1252 | dpll_per_ck: dpll_per_ck { | 1316 | dpll_per_ck: dpll_per_ck { |
1253 | #clock-cells = <0>; | 1317 | #clock-cells = <0>; |
1254 | compatible = "ti,omap4-dpll-clock"; | 1318 | compatible = "ti,omap4-dpll-clock"; |
1255 | clocks = <&sys_clkin1>, <&per_dpll_hs_clk_div>; | 1319 | clocks = <&sys_clkin1>, <&dpll_per_byp_mux>; |
1256 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; | 1320 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; |
1257 | }; | 1321 | }; |
1258 | 1322 | ||
@@ -1275,10 +1339,18 @@ | |||
1275 | clock-div = <1>; | 1339 | clock-div = <1>; |
1276 | }; | 1340 | }; |
1277 | 1341 | ||
1342 | dpll_usb_byp_mux: dpll_usb_byp_mux { | ||
1343 | #clock-cells = <0>; | ||
1344 | compatible = "ti,mux-clock"; | ||
1345 | clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; | ||
1346 | ti,bit-shift = <23>; | ||
1347 | reg = <0x018c>; | ||
1348 | }; | ||
1349 | |||
1278 | dpll_usb_ck: dpll_usb_ck { | 1350 | dpll_usb_ck: dpll_usb_ck { |
1279 | #clock-cells = <0>; | 1351 | #clock-cells = <0>; |
1280 | compatible = "ti,omap4-dpll-j-type-clock"; | 1352 | compatible = "ti,omap4-dpll-j-type-clock"; |
1281 | clocks = <&sys_clkin1>, <&usb_dpll_hs_clk_div>; | 1353 | clocks = <&sys_clkin1>, <&dpll_usb_byp_mux>; |
1282 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; | 1354 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; |
1283 | }; | 1355 | }; |
1284 | 1356 | ||
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 277b48b0b6f9..ac6b0ae42caf 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "skeleton.dtsi" | 20 | #include "skeleton.dtsi" |
21 | #include "exynos4-cpu-thermal.dtsi" | ||
21 | #include <dt-bindings/clock/exynos3250.h> | 22 | #include <dt-bindings/clock/exynos3250.h> |
22 | 23 | ||
23 | / { | 24 | / { |
@@ -193,6 +194,7 @@ | |||
193 | interrupts = <0 216 0>; | 194 | interrupts = <0 216 0>; |
194 | clocks = <&cmu CLK_TMU_APBIF>; | 195 | clocks = <&cmu CLK_TMU_APBIF>; |
195 | clock-names = "tmu_apbif"; | 196 | clock-names = "tmu_apbif"; |
197 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
196 | status = "disabled"; | 198 | status = "disabled"; |
197 | }; | 199 | }; |
198 | 200 | ||
diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi new file mode 100644 index 000000000000..735cb2f10817 --- /dev/null +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos4 thermal zone | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal.h> | ||
13 | |||
14 | / { | ||
15 | thermal-zones { | ||
16 | cpu_thermal: cpu-thermal { | ||
17 | thermal-sensors = <&tmu 0>; | ||
18 | polling-delay-passive = <0>; | ||
19 | polling-delay = <0>; | ||
20 | trips { | ||
21 | cpu_alert0: cpu-alert-0 { | ||
22 | temperature = <70000>; /* millicelsius */ | ||
23 | hysteresis = <10000>; /* millicelsius */ | ||
24 | type = "active"; | ||
25 | }; | ||
26 | cpu_alert1: cpu-alert-1 { | ||
27 | temperature = <95000>; /* millicelsius */ | ||
28 | hysteresis = <10000>; /* millicelsius */ | ||
29 | type = "active"; | ||
30 | }; | ||
31 | cpu_alert2: cpu-alert-2 { | ||
32 | temperature = <110000>; /* millicelsius */ | ||
33 | hysteresis = <10000>; /* millicelsius */ | ||
34 | type = "active"; | ||
35 | }; | ||
36 | cpu_crit0: cpu-crit-0 { | ||
37 | temperature = <120000>; /* millicelsius */ | ||
38 | hysteresis = <0>; /* millicelsius */ | ||
39 | type = "critical"; | ||
40 | }; | ||
41 | }; | ||
42 | cooling-maps { | ||
43 | map0 { | ||
44 | trip = <&cpu_alert0>; | ||
45 | }; | ||
46 | map1 { | ||
47 | trip = <&cpu_alert1>; | ||
48 | }; | ||
49 | }; | ||
50 | }; | ||
51 | }; | ||
52 | }; | ||
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index 76173cacd450..77ea547768f4 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi | |||
@@ -38,6 +38,7 @@ | |||
38 | i2c5 = &i2c_5; | 38 | i2c5 = &i2c_5; |
39 | i2c6 = &i2c_6; | 39 | i2c6 = &i2c_6; |
40 | i2c7 = &i2c_7; | 40 | i2c7 = &i2c_7; |
41 | i2c8 = &i2c_8; | ||
41 | csis0 = &csis_0; | 42 | csis0 = &csis_0; |
42 | csis1 = &csis_1; | 43 | csis1 = &csis_1; |
43 | fimc0 = &fimc_0; | 44 | fimc0 = &fimc_0; |
@@ -104,6 +105,7 @@ | |||
104 | compatible = "samsung,exynos4210-pd"; | 105 | compatible = "samsung,exynos4210-pd"; |
105 | reg = <0x10023C20 0x20>; | 106 | reg = <0x10023C20 0x20>; |
106 | #power-domain-cells = <0>; | 107 | #power-domain-cells = <0>; |
108 | power-domains = <&pd_lcd0>; | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | pd_cam: cam-power-domain@10023C00 { | 111 | pd_cam: cam-power-domain@10023C00 { |
@@ -554,6 +556,22 @@ | |||
554 | status = "disabled"; | 556 | status = "disabled"; |
555 | }; | 557 | }; |
556 | 558 | ||
559 | i2c_8: i2c@138E0000 { | ||
560 | #address-cells = <1>; | ||
561 | #size-cells = <0>; | ||
562 | compatible = "samsung,s3c2440-hdmiphy-i2c"; | ||
563 | reg = <0x138E0000 0x100>; | ||
564 | interrupts = <0 93 0>; | ||
565 | clocks = <&clock CLK_I2C_HDMI>; | ||
566 | clock-names = "i2c"; | ||
567 | status = "disabled"; | ||
568 | |||
569 | hdmi_i2c_phy: hdmiphy@38 { | ||
570 | compatible = "exynos4210-hdmiphy"; | ||
571 | reg = <0x38>; | ||
572 | }; | ||
573 | }; | ||
574 | |||
557 | spi_0: spi@13920000 { | 575 | spi_0: spi@13920000 { |
558 | compatible = "samsung,exynos4210-spi"; | 576 | compatible = "samsung,exynos4210-spi"; |
559 | reg = <0x13920000 0x100>; | 577 | reg = <0x13920000 0x100>; |
@@ -663,6 +681,33 @@ | |||
663 | status = "disabled"; | 681 | status = "disabled"; |
664 | }; | 682 | }; |
665 | 683 | ||
684 | tmu: tmu@100C0000 { | ||
685 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
686 | }; | ||
687 | |||
688 | hdmi: hdmi@12D00000 { | ||
689 | compatible = "samsung,exynos4210-hdmi"; | ||
690 | reg = <0x12D00000 0x70000>; | ||
691 | interrupts = <0 92 0>; | ||
692 | clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy", | ||
693 | "mout_hdmi"; | ||
694 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, | ||
695 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, | ||
696 | <&clock CLK_MOUT_HDMI>; | ||
697 | phy = <&hdmi_i2c_phy>; | ||
698 | power-domains = <&pd_tv>; | ||
699 | samsung,syscon-phandle = <&pmu_system_controller>; | ||
700 | status = "disabled"; | ||
701 | }; | ||
702 | |||
703 | mixer: mixer@12C10000 { | ||
704 | compatible = "samsung,exynos4210-mixer"; | ||
705 | interrupts = <0 91 0>; | ||
706 | reg = <0x12C10000 0x2100>, <0x12c00000 0x300>; | ||
707 | power-domains = <&pd_tv>; | ||
708 | status = "disabled"; | ||
709 | }; | ||
710 | |||
666 | ppmu_dmc0: ppmu_dmc0@106a0000 { | 711 | ppmu_dmc0: ppmu_dmc0@106a0000 { |
667 | compatible = "samsung,exynos-ppmu"; | 712 | compatible = "samsung,exynos-ppmu"; |
668 | reg = <0x106a0000 0x2000>; | 713 | reg = <0x106a0000 0x2000>; |
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 3d6652a4b6cb..32c5fd8f6269 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts | |||
@@ -426,6 +426,25 @@ | |||
426 | status = "okay"; | 426 | status = "okay"; |
427 | }; | 427 | }; |
428 | 428 | ||
429 | tmu@100C0000 { | ||
430 | status = "okay"; | ||
431 | }; | ||
432 | |||
433 | thermal-zones { | ||
434 | cpu_thermal: cpu-thermal { | ||
435 | cooling-maps { | ||
436 | map0 { | ||
437 | /* Corresponds to 800MHz at freq_table */ | ||
438 | cooling-device = <&cpu0 2 2>; | ||
439 | }; | ||
440 | map1 { | ||
441 | /* Corresponds to 200MHz at freq_table */ | ||
442 | cooling-device = <&cpu0 4 4>; | ||
443 | }; | ||
444 | }; | ||
445 | }; | ||
446 | }; | ||
447 | |||
429 | camera { | 448 | camera { |
430 | pinctrl-names = "default"; | 449 | pinctrl-names = "default"; |
431 | pinctrl-0 = <>; | 450 | pinctrl-0 = <>; |
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index b57e6b82ea20..d4f2b11319dd 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts | |||
@@ -505,6 +505,63 @@ | |||
505 | assigned-clock-rates = <0>, <160000000>; | 505 | assigned-clock-rates = <0>, <160000000>; |
506 | }; | 506 | }; |
507 | }; | 507 | }; |
508 | |||
509 | hdmi_en: voltage-regulator-hdmi-5v { | ||
510 | compatible = "regulator-fixed"; | ||
511 | regulator-name = "HDMI_5V"; | ||
512 | regulator-min-microvolt = <5000000>; | ||
513 | regulator-max-microvolt = <5000000>; | ||
514 | gpio = <&gpe0 1 0>; | ||
515 | enable-active-high; | ||
516 | }; | ||
517 | |||
518 | hdmi_ddc: i2c-ddc { | ||
519 | compatible = "i2c-gpio"; | ||
520 | gpios = <&gpe4 2 0 &gpe4 3 0>; | ||
521 | i2c-gpio,delay-us = <100>; | ||
522 | #address-cells = <1>; | ||
523 | #size-cells = <0>; | ||
524 | |||
525 | pinctrl-0 = <&i2c_ddc_bus>; | ||
526 | pinctrl-names = "default"; | ||
527 | status = "okay"; | ||
528 | }; | ||
529 | |||
530 | mixer@12C10000 { | ||
531 | status = "okay"; | ||
532 | }; | ||
533 | |||
534 | hdmi@12D00000 { | ||
535 | hpd-gpio = <&gpx3 7 0>; | ||
536 | pinctrl-names = "default"; | ||
537 | pinctrl-0 = <&hdmi_hpd>; | ||
538 | hdmi-en-supply = <&hdmi_en>; | ||
539 | vdd-supply = <&ldo3_reg>; | ||
540 | vdd_osc-supply = <&ldo4_reg>; | ||
541 | vdd_pll-supply = <&ldo3_reg>; | ||
542 | ddc = <&hdmi_ddc>; | ||
543 | status = "okay"; | ||
544 | }; | ||
545 | |||
546 | i2c@138E0000 { | ||
547 | status = "okay"; | ||
548 | }; | ||
549 | }; | ||
550 | |||
551 | &pinctrl_1 { | ||
552 | hdmi_hpd: hdmi-hpd { | ||
553 | samsung,pins = "gpx3-7"; | ||
554 | samsung,pin-pud = <0>; | ||
555 | }; | ||
556 | }; | ||
557 | |||
558 | &pinctrl_0 { | ||
559 | i2c_ddc_bus: i2c-ddc-bus { | ||
560 | samsung,pins = "gpe4-2", "gpe4-3"; | ||
561 | samsung,pin-function = <2>; | ||
562 | samsung,pin-pud = <3>; | ||
563 | samsung,pin-drv = <0>; | ||
564 | }; | ||
508 | }; | 565 | }; |
509 | 566 | ||
510 | &mdma1 { | 567 | &mdma1 { |
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 67c832c9dcf1..be89f83f70e7 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "exynos4.dtsi" | 22 | #include "exynos4.dtsi" |
23 | #include "exynos4210-pinctrl.dtsi" | 23 | #include "exynos4210-pinctrl.dtsi" |
24 | #include "exynos4-cpu-thermal.dtsi" | ||
24 | 25 | ||
25 | / { | 26 | / { |
26 | compatible = "samsung,exynos4210", "samsung,exynos4"; | 27 | compatible = "samsung,exynos4210", "samsung,exynos4"; |
@@ -35,10 +36,13 @@ | |||
35 | #address-cells = <1>; | 36 | #address-cells = <1>; |
36 | #size-cells = <0>; | 37 | #size-cells = <0>; |
37 | 38 | ||
38 | cpu@900 { | 39 | cpu0: cpu@900 { |
39 | device_type = "cpu"; | 40 | device_type = "cpu"; |
40 | compatible = "arm,cortex-a9"; | 41 | compatible = "arm,cortex-a9"; |
41 | reg = <0x900>; | 42 | reg = <0x900>; |
43 | cooling-min-level = <4>; | ||
44 | cooling-max-level = <2>; | ||
45 | #cooling-cells = <2>; /* min followed by max */ | ||
42 | }; | 46 | }; |
43 | 47 | ||
44 | cpu@901 { | 48 | cpu@901 { |
@@ -153,16 +157,38 @@ | |||
153 | reg = <0x03860000 0x1000>; | 157 | reg = <0x03860000 0x1000>; |
154 | }; | 158 | }; |
155 | 159 | ||
156 | tmu@100C0000 { | 160 | tmu: tmu@100C0000 { |
157 | compatible = "samsung,exynos4210-tmu"; | 161 | compatible = "samsung,exynos4210-tmu"; |
158 | interrupt-parent = <&combiner>; | 162 | interrupt-parent = <&combiner>; |
159 | reg = <0x100C0000 0x100>; | 163 | reg = <0x100C0000 0x100>; |
160 | interrupts = <2 4>; | 164 | interrupts = <2 4>; |
161 | clocks = <&clock CLK_TMU_APBIF>; | 165 | clocks = <&clock CLK_TMU_APBIF>; |
162 | clock-names = "tmu_apbif"; | 166 | clock-names = "tmu_apbif"; |
167 | samsung,tmu_gain = <15>; | ||
168 | samsung,tmu_reference_voltage = <7>; | ||
163 | status = "disabled"; | 169 | status = "disabled"; |
164 | }; | 170 | }; |
165 | 171 | ||
172 | thermal-zones { | ||
173 | cpu_thermal: cpu-thermal { | ||
174 | polling-delay-passive = <0>; | ||
175 | polling-delay = <0>; | ||
176 | thermal-sensors = <&tmu 0>; | ||
177 | |||
178 | trips { | ||
179 | cpu_alert0: cpu-alert-0 { | ||
180 | temperature = <85000>; /* millicelsius */ | ||
181 | }; | ||
182 | cpu_alert1: cpu-alert-1 { | ||
183 | temperature = <100000>; /* millicelsius */ | ||
184 | }; | ||
185 | cpu_alert2: cpu-alert-2 { | ||
186 | temperature = <110000>; /* millicelsius */ | ||
187 | }; | ||
188 | }; | ||
189 | }; | ||
190 | }; | ||
191 | |||
166 | g2d@12800000 { | 192 | g2d@12800000 { |
167 | compatible = "samsung,s5pv210-g2d"; | 193 | compatible = "samsung,s5pv210-g2d"; |
168 | reg = <0x12800000 0x1000>; | 194 | reg = <0x12800000 0x1000>; |
@@ -203,6 +229,14 @@ | |||
203 | }; | 229 | }; |
204 | }; | 230 | }; |
205 | 231 | ||
232 | mixer: mixer@12C10000 { | ||
233 | clock-names = "mixer", "hdmi", "sclk_hdmi", "vp", "mout_mixer", | ||
234 | "sclk_mixer"; | ||
235 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, | ||
236 | <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>, | ||
237 | <&clock CLK_MOUT_MIXER>, <&clock CLK_SCLK_MIXER>; | ||
238 | }; | ||
239 | |||
206 | ppmu_lcd1: ppmu_lcd1@12240000 { | 240 | ppmu_lcd1: ppmu_lcd1@12240000 { |
207 | compatible = "samsung,exynos-ppmu"; | 241 | compatible = "samsung,exynos-ppmu"; |
208 | reg = <0x12240000 0x2000>; | 242 | reg = <0x12240000 0x2000>; |
diff --git a/arch/arm/boot/dts/exynos4212.dtsi b/arch/arm/boot/dts/exynos4212.dtsi index dd0a43ec56da..5be03288f1ee 100644 --- a/arch/arm/boot/dts/exynos4212.dtsi +++ b/arch/arm/boot/dts/exynos4212.dtsi | |||
@@ -26,10 +26,13 @@ | |||
26 | #address-cells = <1>; | 26 | #address-cells = <1>; |
27 | #size-cells = <0>; | 27 | #size-cells = <0>; |
28 | 28 | ||
29 | cpu@A00 { | 29 | cpu0: cpu@A00 { |
30 | device_type = "cpu"; | 30 | device_type = "cpu"; |
31 | compatible = "arm,cortex-a9"; | 31 | compatible = "arm,cortex-a9"; |
32 | reg = <0xA00>; | 32 | reg = <0xA00>; |
33 | cooling-min-level = <13>; | ||
34 | cooling-max-level = <7>; | ||
35 | #cooling-cells = <2>; /* min followed by max */ | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | cpu@A01 { | 38 | cpu@A01 { |
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index de80b5bba204..adb4f6a97a1d 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi | |||
@@ -249,6 +249,20 @@ | |||
249 | regulator-always-on; | 249 | regulator-always-on; |
250 | }; | 250 | }; |
251 | 251 | ||
252 | ldo8_reg: ldo@8 { | ||
253 | regulator-compatible = "LDO8"; | ||
254 | regulator-name = "VDD10_HDMI_1.0V"; | ||
255 | regulator-min-microvolt = <1000000>; | ||
256 | regulator-max-microvolt = <1000000>; | ||
257 | }; | ||
258 | |||
259 | ldo10_reg: ldo@10 { | ||
260 | regulator-compatible = "LDO10"; | ||
261 | regulator-name = "VDDQ_MIPIHSI_1.8V"; | ||
262 | regulator-min-microvolt = <1800000>; | ||
263 | regulator-max-microvolt = <1800000>; | ||
264 | }; | ||
265 | |||
252 | ldo11_reg: LDO11 { | 266 | ldo11_reg: LDO11 { |
253 | regulator-name = "VDD18_ABB1_1.8V"; | 267 | regulator-name = "VDD18_ABB1_1.8V"; |
254 | regulator-min-microvolt = <1800000>; | 268 | regulator-min-microvolt = <1800000>; |
@@ -411,6 +425,51 @@ | |||
411 | ehci: ehci@12580000 { | 425 | ehci: ehci@12580000 { |
412 | status = "okay"; | 426 | status = "okay"; |
413 | }; | 427 | }; |
428 | |||
429 | tmu@100C0000 { | ||
430 | vtmu-supply = <&ldo10_reg>; | ||
431 | status = "okay"; | ||
432 | }; | ||
433 | |||
434 | thermal-zones { | ||
435 | cpu_thermal: cpu-thermal { | ||
436 | cooling-maps { | ||
437 | map0 { | ||
438 | /* Corresponds to 800MHz at freq_table */ | ||
439 | cooling-device = <&cpu0 7 7>; | ||
440 | }; | ||
441 | map1 { | ||
442 | /* Corresponds to 200MHz at freq_table */ | ||
443 | cooling-device = <&cpu0 13 13>; | ||
444 | }; | ||
445 | }; | ||
446 | }; | ||
447 | }; | ||
448 | |||
449 | mixer: mixer@12C10000 { | ||
450 | status = "okay"; | ||
451 | }; | ||
452 | |||
453 | hdmi@12D00000 { | ||
454 | hpd-gpio = <&gpx3 7 0>; | ||
455 | pinctrl-names = "default"; | ||
456 | pinctrl-0 = <&hdmi_hpd>; | ||
457 | vdd-supply = <&ldo8_reg>; | ||
458 | vdd_osc-supply = <&ldo10_reg>; | ||
459 | vdd_pll-supply = <&ldo8_reg>; | ||
460 | ddc = <&hdmi_ddc>; | ||
461 | status = "okay"; | ||
462 | }; | ||
463 | |||
464 | hdmi_ddc: i2c@13880000 { | ||
465 | status = "okay"; | ||
466 | pinctrl-names = "default"; | ||
467 | pinctrl-0 = <&i2c2_bus>; | ||
468 | }; | ||
469 | |||
470 | i2c@138E0000 { | ||
471 | status = "okay"; | ||
472 | }; | ||
414 | }; | 473 | }; |
415 | 474 | ||
416 | &pinctrl_1 { | 475 | &pinctrl_1 { |
@@ -425,4 +484,9 @@ | |||
425 | samsung,pin-pud = <0>; | 484 | samsung,pin-pud = <0>; |
426 | samsung,pin-drv = <0>; | 485 | samsung,pin-drv = <0>; |
427 | }; | 486 | }; |
487 | |||
488 | hdmi_hpd: hdmi-hpd { | ||
489 | samsung,pins = "gpx3-7"; | ||
490 | samsung,pin-pud = <1>; | ||
491 | }; | ||
428 | }; | 492 | }; |
diff --git a/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..e3f7934d19d0 --- /dev/null +++ b/arch/arm/boot/dts/exynos4412-tmu-sensor-conf.dtsi | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos4412 TMU sensor configuration | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal_exynos.h> | ||
13 | |||
14 | #thermal-sensor-cells = <0>; | ||
15 | samsung,tmu_gain = <8>; | ||
16 | samsung,tmu_reference_voltage = <16>; | ||
17 | samsung,tmu_noise_cancel_mode = <4>; | ||
18 | samsung,tmu_efuse_value = <55>; | ||
19 | samsung,tmu_min_efuse_value = <40>; | ||
20 | samsung,tmu_max_efuse_value = <100>; | ||
21 | samsung,tmu_first_point_trim = <25>; | ||
22 | samsung,tmu_second_point_trim = <85>; | ||
23 | samsung,tmu_default_temp_offset = <50>; | ||
24 | samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; | ||
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 21f748083586..173ffa479ad3 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts | |||
@@ -927,6 +927,21 @@ | |||
927 | pulldown-ohm = <100000>; /* 100K */ | 927 | pulldown-ohm = <100000>; /* 100K */ |
928 | io-channels = <&adc 2>; /* Battery temperature */ | 928 | io-channels = <&adc 2>; /* Battery temperature */ |
929 | }; | 929 | }; |
930 | |||
931 | thermal-zones { | ||
932 | cpu_thermal: cpu-thermal { | ||
933 | cooling-maps { | ||
934 | map0 { | ||
935 | /* Corresponds to 800MHz at freq_table */ | ||
936 | cooling-device = <&cpu0 7 7>; | ||
937 | }; | ||
938 | map1 { | ||
939 | /* Corresponds to 200MHz at freq_table */ | ||
940 | cooling-device = <&cpu0 13 13>; | ||
941 | }; | ||
942 | }; | ||
943 | }; | ||
944 | }; | ||
930 | }; | 945 | }; |
931 | 946 | ||
932 | &pmu_system_controller { | 947 | &pmu_system_controller { |
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index 0f6ec93bb1d8..68ad43b391ae 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi | |||
@@ -26,10 +26,13 @@ | |||
26 | #address-cells = <1>; | 26 | #address-cells = <1>; |
27 | #size-cells = <0>; | 27 | #size-cells = <0>; |
28 | 28 | ||
29 | cpu@A00 { | 29 | cpu0: cpu@A00 { |
30 | device_type = "cpu"; | 30 | device_type = "cpu"; |
31 | compatible = "arm,cortex-a9"; | 31 | compatible = "arm,cortex-a9"; |
32 | reg = <0xA00>; | 32 | reg = <0xA00>; |
33 | cooling-min-level = <13>; | ||
34 | cooling-max-level = <7>; | ||
35 | #cooling-cells = <2>; /* min followed by max */ | ||
33 | }; | 36 | }; |
34 | 37 | ||
35 | cpu@A01 { | 38 | cpu@A01 { |
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index f5e0ae780d6c..6a6abe14fd9b 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include "exynos4.dtsi" | 20 | #include "exynos4.dtsi" |
21 | #include "exynos4x12-pinctrl.dtsi" | 21 | #include "exynos4x12-pinctrl.dtsi" |
22 | #include "exynos4-cpu-thermal.dtsi" | ||
22 | 23 | ||
23 | / { | 24 | / { |
24 | aliases { | 25 | aliases { |
@@ -297,4 +298,15 @@ | |||
297 | clock-names = "tmu_apbif"; | 298 | clock-names = "tmu_apbif"; |
298 | status = "disabled"; | 299 | status = "disabled"; |
299 | }; | 300 | }; |
301 | |||
302 | hdmi: hdmi@12D00000 { | ||
303 | compatible = "samsung,exynos4212-hdmi"; | ||
304 | }; | ||
305 | |||
306 | mixer: mixer@12C10000 { | ||
307 | compatible = "samsung,exynos4212-mixer"; | ||
308 | clock-names = "mixer", "hdmi", "sclk_hdmi", "vp"; | ||
309 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, | ||
310 | <&clock CLK_SCLK_HDMI>, <&clock CLK_VP>; | ||
311 | }; | ||
300 | }; | 312 | }; |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9bb1b0b738f5..adbde1adad95 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <dt-bindings/clock/exynos5250.h> | 20 | #include <dt-bindings/clock/exynos5250.h> |
21 | #include "exynos5.dtsi" | 21 | #include "exynos5.dtsi" |
22 | #include "exynos5250-pinctrl.dtsi" | 22 | #include "exynos5250-pinctrl.dtsi" |
23 | 23 | #include "exynos4-cpu-thermal.dtsi" | |
24 | #include <dt-bindings/clock/exynos-audss-clk.h> | 24 | #include <dt-bindings/clock/exynos-audss-clk.h> |
25 | 25 | ||
26 | / { | 26 | / { |
@@ -58,11 +58,14 @@ | |||
58 | #address-cells = <1>; | 58 | #address-cells = <1>; |
59 | #size-cells = <0>; | 59 | #size-cells = <0>; |
60 | 60 | ||
61 | cpu@0 { | 61 | cpu0: cpu@0 { |
62 | device_type = "cpu"; | 62 | device_type = "cpu"; |
63 | compatible = "arm,cortex-a15"; | 63 | compatible = "arm,cortex-a15"; |
64 | reg = <0>; | 64 | reg = <0>; |
65 | clock-frequency = <1700000000>; | 65 | clock-frequency = <1700000000>; |
66 | cooling-min-level = <15>; | ||
67 | cooling-max-level = <9>; | ||
68 | #cooling-cells = <2>; /* min followed by max */ | ||
66 | }; | 69 | }; |
67 | cpu@1 { | 70 | cpu@1 { |
68 | device_type = "cpu"; | 71 | device_type = "cpu"; |
@@ -102,6 +105,12 @@ | |||
102 | #power-domain-cells = <0>; | 105 | #power-domain-cells = <0>; |
103 | }; | 106 | }; |
104 | 107 | ||
108 | pd_disp1: disp1-power-domain@100440A0 { | ||
109 | compatible = "samsung,exynos4210-pd"; | ||
110 | reg = <0x100440A0 0x20>; | ||
111 | #power-domain-cells = <0>; | ||
112 | }; | ||
113 | |||
105 | clock: clock-controller@10010000 { | 114 | clock: clock-controller@10010000 { |
106 | compatible = "samsung,exynos5250-clock"; | 115 | compatible = "samsung,exynos5250-clock"; |
107 | reg = <0x10010000 0x30000>; | 116 | reg = <0x10010000 0x30000>; |
@@ -235,12 +244,32 @@ | |||
235 | status = "disabled"; | 244 | status = "disabled"; |
236 | }; | 245 | }; |
237 | 246 | ||
238 | tmu@10060000 { | 247 | tmu: tmu@10060000 { |
239 | compatible = "samsung,exynos5250-tmu"; | 248 | compatible = "samsung,exynos5250-tmu"; |
240 | reg = <0x10060000 0x100>; | 249 | reg = <0x10060000 0x100>; |
241 | interrupts = <0 65 0>; | 250 | interrupts = <0 65 0>; |
242 | clocks = <&clock CLK_TMU>; | 251 | clocks = <&clock CLK_TMU>; |
243 | clock-names = "tmu_apbif"; | 252 | clock-names = "tmu_apbif"; |
253 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
254 | }; | ||
255 | |||
256 | thermal-zones { | ||
257 | cpu_thermal: cpu-thermal { | ||
258 | polling-delay-passive = <0>; | ||
259 | polling-delay = <0>; | ||
260 | thermal-sensors = <&tmu 0>; | ||
261 | |||
262 | cooling-maps { | ||
263 | map0 { | ||
264 | /* Corresponds to 800MHz at freq_table */ | ||
265 | cooling-device = <&cpu0 9 9>; | ||
266 | }; | ||
267 | map1 { | ||
268 | /* Corresponds to 200MHz at freq_table */ | ||
269 | cooling-device = <&cpu0 15 15>; | ||
270 | }; | ||
271 | }; | ||
272 | }; | ||
244 | }; | 273 | }; |
245 | 274 | ||
246 | serial@12C00000 { | 275 | serial@12C00000 { |
@@ -719,6 +748,7 @@ | |||
719 | hdmi: hdmi { | 748 | hdmi: hdmi { |
720 | compatible = "samsung,exynos4212-hdmi"; | 749 | compatible = "samsung,exynos4212-hdmi"; |
721 | reg = <0x14530000 0x70000>; | 750 | reg = <0x14530000 0x70000>; |
751 | power-domains = <&pd_disp1>; | ||
722 | interrupts = <0 95 0>; | 752 | interrupts = <0 95 0>; |
723 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, | 753 | clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>, |
724 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, | 754 | <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>, |
@@ -731,9 +761,11 @@ | |||
731 | mixer { | 761 | mixer { |
732 | compatible = "samsung,exynos5250-mixer"; | 762 | compatible = "samsung,exynos5250-mixer"; |
733 | reg = <0x14450000 0x10000>; | 763 | reg = <0x14450000 0x10000>; |
764 | power-domains = <&pd_disp1>; | ||
734 | interrupts = <0 94 0>; | 765 | interrupts = <0 94 0>; |
735 | clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; | 766 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, |
736 | clock-names = "mixer", "sclk_hdmi"; | 767 | <&clock CLK_SCLK_HDMI>; |
768 | clock-names = "mixer", "hdmi", "sclk_hdmi"; | ||
737 | }; | 769 | }; |
738 | 770 | ||
739 | dp_phy: video-phy@10040720 { | 771 | dp_phy: video-phy@10040720 { |
@@ -743,6 +775,7 @@ | |||
743 | }; | 775 | }; |
744 | 776 | ||
745 | dp: dp-controller@145B0000 { | 777 | dp: dp-controller@145B0000 { |
778 | power-domains = <&pd_disp1>; | ||
746 | clocks = <&clock CLK_DP>; | 779 | clocks = <&clock CLK_DP>; |
747 | clock-names = "dp"; | 780 | clock-names = "dp"; |
748 | phys = <&dp_phy>; | 781 | phys = <&dp_phy>; |
@@ -750,6 +783,7 @@ | |||
750 | }; | 783 | }; |
751 | 784 | ||
752 | fimd: fimd@14400000 { | 785 | fimd: fimd@14400000 { |
786 | power-domains = <&pd_disp1>; | ||
753 | clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; | 787 | clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>; |
754 | clock-names = "sclk_fimd", "fimd"; | 788 | clock-names = "sclk_fimd", "fimd"; |
755 | }; | 789 | }; |
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi new file mode 100644 index 000000000000..5d31fc140823 --- /dev/null +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Device tree sources for default Exynos5420 thermal zone definition | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | polling-delay-passive = <0>; | ||
13 | polling-delay = <0>; | ||
14 | trips { | ||
15 | cpu-alert-0 { | ||
16 | temperature = <85000>; /* millicelsius */ | ||
17 | hysteresis = <10000>; /* millicelsius */ | ||
18 | type = "active"; | ||
19 | }; | ||
20 | cpu-alert-1 { | ||
21 | temperature = <103000>; /* millicelsius */ | ||
22 | hysteresis = <10000>; /* millicelsius */ | ||
23 | type = "active"; | ||
24 | }; | ||
25 | cpu-alert-2 { | ||
26 | temperature = <110000>; /* millicelsius */ | ||
27 | hysteresis = <10000>; /* millicelsius */ | ||
28 | type = "active"; | ||
29 | }; | ||
30 | cpu-crit-0 { | ||
31 | temperature = <1200000>; /* millicelsius */ | ||
32 | hysteresis = <0>; /* millicelsius */ | ||
33 | type = "critical"; | ||
34 | }; | ||
35 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 9dc2e9773b30..c0e98cf3514f 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi | |||
@@ -740,8 +740,9 @@ | |||
740 | compatible = "samsung,exynos5420-mixer"; | 740 | compatible = "samsung,exynos5420-mixer"; |
741 | reg = <0x14450000 0x10000>; | 741 | reg = <0x14450000 0x10000>; |
742 | interrupts = <0 94 0>; | 742 | interrupts = <0 94 0>; |
743 | clocks = <&clock CLK_MIXER>, <&clock CLK_SCLK_HDMI>; | 743 | clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>, |
744 | clock-names = "mixer", "sclk_hdmi"; | 744 | <&clock CLK_SCLK_HDMI>; |
745 | clock-names = "mixer", "hdmi", "sclk_hdmi"; | ||
745 | power-domains = <&disp_pd>; | 746 | power-domains = <&disp_pd>; |
746 | }; | 747 | }; |
747 | 748 | ||
@@ -782,6 +783,7 @@ | |||
782 | interrupts = <0 65 0>; | 783 | interrupts = <0 65 0>; |
783 | clocks = <&clock CLK_TMU>; | 784 | clocks = <&clock CLK_TMU>; |
784 | clock-names = "tmu_apbif"; | 785 | clock-names = "tmu_apbif"; |
786 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
785 | }; | 787 | }; |
786 | 788 | ||
787 | tmu_cpu1: tmu@10064000 { | 789 | tmu_cpu1: tmu@10064000 { |
@@ -790,6 +792,7 @@ | |||
790 | interrupts = <0 183 0>; | 792 | interrupts = <0 183 0>; |
791 | clocks = <&clock CLK_TMU>; | 793 | clocks = <&clock CLK_TMU>; |
792 | clock-names = "tmu_apbif"; | 794 | clock-names = "tmu_apbif"; |
795 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
793 | }; | 796 | }; |
794 | 797 | ||
795 | tmu_cpu2: tmu@10068000 { | 798 | tmu_cpu2: tmu@10068000 { |
@@ -798,6 +801,7 @@ | |||
798 | interrupts = <0 184 0>; | 801 | interrupts = <0 184 0>; |
799 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; | 802 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU>; |
800 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 803 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
804 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
801 | }; | 805 | }; |
802 | 806 | ||
803 | tmu_cpu3: tmu@1006c000 { | 807 | tmu_cpu3: tmu@1006c000 { |
@@ -806,6 +810,7 @@ | |||
806 | interrupts = <0 185 0>; | 810 | interrupts = <0 185 0>; |
807 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; | 811 | clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>; |
808 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 812 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
813 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
809 | }; | 814 | }; |
810 | 815 | ||
811 | tmu_gpu: tmu@100a0000 { | 816 | tmu_gpu: tmu@100a0000 { |
@@ -814,6 +819,30 @@ | |||
814 | interrupts = <0 215 0>; | 819 | interrupts = <0 215 0>; |
815 | clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; | 820 | clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>; |
816 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; | 821 | clock-names = "tmu_apbif", "tmu_triminfo_apbif"; |
822 | #include "exynos4412-tmu-sensor-conf.dtsi" | ||
823 | }; | ||
824 | |||
825 | thermal-zones { | ||
826 | cpu0_thermal: cpu0-thermal { | ||
827 | thermal-sensors = <&tmu_cpu0>; | ||
828 | #include "exynos5420-trip-points.dtsi" | ||
829 | }; | ||
830 | cpu1_thermal: cpu1-thermal { | ||
831 | thermal-sensors = <&tmu_cpu1>; | ||
832 | #include "exynos5420-trip-points.dtsi" | ||
833 | }; | ||
834 | cpu2_thermal: cpu2-thermal { | ||
835 | thermal-sensors = <&tmu_cpu2>; | ||
836 | #include "exynos5420-trip-points.dtsi" | ||
837 | }; | ||
838 | cpu3_thermal: cpu3-thermal { | ||
839 | thermal-sensors = <&tmu_cpu3>; | ||
840 | #include "exynos5420-trip-points.dtsi" | ||
841 | }; | ||
842 | gpu_thermal: gpu-thermal { | ||
843 | thermal-sensors = <&tmu_gpu>; | ||
844 | #include "exynos5420-trip-points.dtsi" | ||
845 | }; | ||
817 | }; | 846 | }; |
818 | 847 | ||
819 | watchdog: watchdog@101D0000 { | 848 | watchdog: watchdog@101D0000 { |
diff --git a/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi new file mode 100644 index 000000000000..7b2fba0ae92b --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-tmu-sensor-conf.dtsi | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Device tree sources for Exynos5440 TMU sensor configuration | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <dt-bindings/thermal/thermal_exynos.h> | ||
13 | |||
14 | #thermal-sensor-cells = <0>; | ||
15 | samsung,tmu_gain = <5>; | ||
16 | samsung,tmu_reference_voltage = <16>; | ||
17 | samsung,tmu_noise_cancel_mode = <4>; | ||
18 | samsung,tmu_efuse_value = <0x5d2d>; | ||
19 | samsung,tmu_min_efuse_value = <16>; | ||
20 | samsung,tmu_max_efuse_value = <76>; | ||
21 | samsung,tmu_first_point_trim = <25>; | ||
22 | samsung,tmu_second_point_trim = <70>; | ||
23 | samsung,tmu_default_temp_offset = <25>; | ||
24 | samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>; | ||
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi new file mode 100644 index 000000000000..48adfa8f4300 --- /dev/null +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Device tree sources for default Exynos5440 thermal zone definition | ||
3 | * | ||
4 | * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | polling-delay-passive = <0>; | ||
13 | polling-delay = <0>; | ||
14 | trips { | ||
15 | cpu-alert-0 { | ||
16 | temperature = <100000>; /* millicelsius */ | ||
17 | hysteresis = <0>; /* millicelsius */ | ||
18 | type = "active"; | ||
19 | }; | ||
20 | cpu-crit-0 { | ||
21 | temperature = <1050000>; /* millicelsius */ | ||
22 | hysteresis = <0>; /* millicelsius */ | ||
23 | type = "critical"; | ||
24 | }; | ||
25 | }; | ||
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 8f3373cd7b87..59d9416b3b03 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi | |||
@@ -219,6 +219,7 @@ | |||
219 | interrupts = <0 58 0>; | 219 | interrupts = <0 58 0>; |
220 | clocks = <&clock CLK_B_125>; | 220 | clocks = <&clock CLK_B_125>; |
221 | clock-names = "tmu_apbif"; | 221 | clock-names = "tmu_apbif"; |
222 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
222 | }; | 223 | }; |
223 | 224 | ||
224 | tmuctrl_1: tmuctrl@16011C { | 225 | tmuctrl_1: tmuctrl@16011C { |
@@ -227,6 +228,7 @@ | |||
227 | interrupts = <0 58 0>; | 228 | interrupts = <0 58 0>; |
228 | clocks = <&clock CLK_B_125>; | 229 | clocks = <&clock CLK_B_125>; |
229 | clock-names = "tmu_apbif"; | 230 | clock-names = "tmu_apbif"; |
231 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
230 | }; | 232 | }; |
231 | 233 | ||
232 | tmuctrl_2: tmuctrl@160120 { | 234 | tmuctrl_2: tmuctrl@160120 { |
@@ -235,6 +237,22 @@ | |||
235 | interrupts = <0 58 0>; | 237 | interrupts = <0 58 0>; |
236 | clocks = <&clock CLK_B_125>; | 238 | clocks = <&clock CLK_B_125>; |
237 | clock-names = "tmu_apbif"; | 239 | clock-names = "tmu_apbif"; |
240 | #include "exynos5440-tmu-sensor-conf.dtsi" | ||
241 | }; | ||
242 | |||
243 | thermal-zones { | ||
244 | cpu0_thermal: cpu0-thermal { | ||
245 | thermal-sensors = <&tmuctrl_0>; | ||
246 | #include "exynos5440-trip-points.dtsi" | ||
247 | }; | ||
248 | cpu1_thermal: cpu1-thermal { | ||
249 | thermal-sensors = <&tmuctrl_1>; | ||
250 | #include "exynos5440-trip-points.dtsi" | ||
251 | }; | ||
252 | cpu2_thermal: cpu2-thermal { | ||
253 | thermal-sensors = <&tmuctrl_2>; | ||
254 | #include "exynos5440-trip-points.dtsi" | ||
255 | }; | ||
238 | }; | 256 | }; |
239 | 257 | ||
240 | sata@210000 { | 258 | sata@210000 { |
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index f1cd2147421d..a626e6dd8022 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi | |||
@@ -35,6 +35,7 @@ | |||
35 | regulator-max-microvolt = <5000000>; | 35 | regulator-max-microvolt = <5000000>; |
36 | gpio = <&gpio3 22 0>; | 36 | gpio = <&gpio3 22 0>; |
37 | enable-active-high; | 37 | enable-active-high; |
38 | vin-supply = <&swbst_reg>; | ||
38 | }; | 39 | }; |
39 | 40 | ||
40 | reg_usb_h1_vbus: regulator@1 { | 41 | reg_usb_h1_vbus: regulator@1 { |
@@ -45,6 +46,7 @@ | |||
45 | regulator-max-microvolt = <5000000>; | 46 | regulator-max-microvolt = <5000000>; |
46 | gpio = <&gpio1 29 0>; | 47 | gpio = <&gpio1 29 0>; |
47 | enable-active-high; | 48 | enable-active-high; |
49 | vin-supply = <&swbst_reg>; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | reg_audio: regulator@2 { | 52 | reg_audio: regulator@2 { |
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index fda4932faefd..945887d3fdb3 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts | |||
@@ -52,6 +52,7 @@ | |||
52 | regulator-max-microvolt = <5000000>; | 52 | regulator-max-microvolt = <5000000>; |
53 | gpio = <&gpio4 0 0>; | 53 | gpio = <&gpio4 0 0>; |
54 | enable-active-high; | 54 | enable-active-high; |
55 | vin-supply = <&swbst_reg>; | ||
55 | }; | 56 | }; |
56 | 57 | ||
57 | reg_usb_otg2_vbus: regulator@1 { | 58 | reg_usb_otg2_vbus: regulator@1 { |
@@ -62,6 +63,7 @@ | |||
62 | regulator-max-microvolt = <5000000>; | 63 | regulator-max-microvolt = <5000000>; |
63 | gpio = <&gpio4 2 0>; | 64 | gpio = <&gpio4 2 0>; |
64 | enable-active-high; | 65 | enable-active-high; |
66 | vin-supply = <&swbst_reg>; | ||
65 | }; | 67 | }; |
66 | 68 | ||
67 | reg_aud3v: regulator@2 { | 69 | reg_aud3v: regulator@2 { |
diff --git a/arch/arm/boot/dts/omap5-core-thermal.dtsi b/arch/arm/boot/dts/omap5-core-thermal.dtsi index 19212ac6eef0..de8a3d456cf7 100644 --- a/arch/arm/boot/dts/omap5-core-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-core-thermal.dtsi | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | core_thermal: core_thermal { | 14 | core_thermal: core_thermal { |
15 | polling-delay-passive = <250>; /* milliseconds */ | 15 | polling-delay-passive = <250>; /* milliseconds */ |
16 | polling-delay = <1000>; /* milliseconds */ | 16 | polling-delay = <500>; /* milliseconds */ |
17 | 17 | ||
18 | /* sensor ID */ | 18 | /* sensor ID */ |
19 | thermal-sensors = <&bandgap 2>; | 19 | thermal-sensors = <&bandgap 2>; |
diff --git a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi index 1b87aca88b77..bc3090f2e84b 100644 --- a/arch/arm/boot/dts/omap5-gpu-thermal.dtsi +++ b/arch/arm/boot/dts/omap5-gpu-thermal.dtsi | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | gpu_thermal: gpu_thermal { | 14 | gpu_thermal: gpu_thermal { |
15 | polling-delay-passive = <250>; /* milliseconds */ | 15 | polling-delay-passive = <250>; /* milliseconds */ |
16 | polling-delay = <1000>; /* milliseconds */ | 16 | polling-delay = <500>; /* milliseconds */ |
17 | 17 | ||
18 | /* sensor ID */ | 18 | /* sensor ID */ |
19 | thermal-sensors = <&bandgap 1>; | 19 | thermal-sensors = <&bandgap 1>; |
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index ddff674bd05e..4a485b63a141 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -1079,4 +1079,8 @@ | |||
1079 | }; | 1079 | }; |
1080 | }; | 1080 | }; |
1081 | 1081 | ||
1082 | &cpu_thermal { | ||
1083 | polling-delay = <500>; /* milliseconds */ | ||
1084 | }; | ||
1085 | |||
1082 | /include/ "omap54xx-clocks.dtsi" | 1086 | /include/ "omap54xx-clocks.dtsi" |
diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index 58c27466f012..83b425fb3ac2 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi | |||
@@ -167,10 +167,18 @@ | |||
167 | ti,index-starts-at-one; | 167 | ti,index-starts-at-one; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | dpll_core_byp_mux: dpll_core_byp_mux { | ||
171 | #clock-cells = <0>; | ||
172 | compatible = "ti,mux-clock"; | ||
173 | clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; | ||
174 | ti,bit-shift = <23>; | ||
175 | reg = <0x012c>; | ||
176 | }; | ||
177 | |||
170 | dpll_core_ck: dpll_core_ck { | 178 | dpll_core_ck: dpll_core_ck { |
171 | #clock-cells = <0>; | 179 | #clock-cells = <0>; |
172 | compatible = "ti,omap4-dpll-core-clock"; | 180 | compatible = "ti,omap4-dpll-core-clock"; |
173 | clocks = <&sys_clkin>, <&dpll_abe_m3x2_ck>; | 181 | clocks = <&sys_clkin>, <&dpll_core_byp_mux>; |
174 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; | 182 | reg = <0x0120>, <0x0124>, <0x012c>, <0x0128>; |
175 | }; | 183 | }; |
176 | 184 | ||
@@ -294,10 +302,18 @@ | |||
294 | clock-div = <1>; | 302 | clock-div = <1>; |
295 | }; | 303 | }; |
296 | 304 | ||
305 | dpll_iva_byp_mux: dpll_iva_byp_mux { | ||
306 | #clock-cells = <0>; | ||
307 | compatible = "ti,mux-clock"; | ||
308 | clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; | ||
309 | ti,bit-shift = <23>; | ||
310 | reg = <0x01ac>; | ||
311 | }; | ||
312 | |||
297 | dpll_iva_ck: dpll_iva_ck { | 313 | dpll_iva_ck: dpll_iva_ck { |
298 | #clock-cells = <0>; | 314 | #clock-cells = <0>; |
299 | compatible = "ti,omap4-dpll-clock"; | 315 | compatible = "ti,omap4-dpll-clock"; |
300 | clocks = <&sys_clkin>, <&iva_dpll_hs_clk_div>; | 316 | clocks = <&sys_clkin>, <&dpll_iva_byp_mux>; |
301 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; | 317 | reg = <0x01a0>, <0x01a4>, <0x01ac>, <0x01a8>; |
302 | }; | 318 | }; |
303 | 319 | ||
@@ -599,10 +615,19 @@ | |||
599 | }; | 615 | }; |
600 | }; | 616 | }; |
601 | &cm_core_clocks { | 617 | &cm_core_clocks { |
618 | |||
619 | dpll_per_byp_mux: dpll_per_byp_mux { | ||
620 | #clock-cells = <0>; | ||
621 | compatible = "ti,mux-clock"; | ||
622 | clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; | ||
623 | ti,bit-shift = <23>; | ||
624 | reg = <0x014c>; | ||
625 | }; | ||
626 | |||
602 | dpll_per_ck: dpll_per_ck { | 627 | dpll_per_ck: dpll_per_ck { |
603 | #clock-cells = <0>; | 628 | #clock-cells = <0>; |
604 | compatible = "ti,omap4-dpll-clock"; | 629 | compatible = "ti,omap4-dpll-clock"; |
605 | clocks = <&sys_clkin>, <&per_dpll_hs_clk_div>; | 630 | clocks = <&sys_clkin>, <&dpll_per_byp_mux>; |
606 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; | 631 | reg = <0x0140>, <0x0144>, <0x014c>, <0x0148>; |
607 | }; | 632 | }; |
608 | 633 | ||
@@ -714,10 +739,18 @@ | |||
714 | ti,index-starts-at-one; | 739 | ti,index-starts-at-one; |
715 | }; | 740 | }; |
716 | 741 | ||
742 | dpll_usb_byp_mux: dpll_usb_byp_mux { | ||
743 | #clock-cells = <0>; | ||
744 | compatible = "ti,mux-clock"; | ||
745 | clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; | ||
746 | ti,bit-shift = <23>; | ||
747 | reg = <0x018c>; | ||
748 | }; | ||
749 | |||
717 | dpll_usb_ck: dpll_usb_ck { | 750 | dpll_usb_ck: dpll_usb_ck { |
718 | #clock-cells = <0>; | 751 | #clock-cells = <0>; |
719 | compatible = "ti,omap4-dpll-j-type-clock"; | 752 | compatible = "ti,omap4-dpll-j-type-clock"; |
720 | clocks = <&sys_clkin>, <&usb_dpll_hs_clk_div>; | 753 | clocks = <&sys_clkin>, <&dpll_usb_byp_mux>; |
721 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; | 754 | reg = <0x0180>, <0x0184>, <0x018c>, <0x0188>; |
722 | }; | 755 | }; |
723 | 756 | ||
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 261311bdf65b..367af53c1b84 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi | |||
@@ -1248,7 +1248,6 @@ | |||
1248 | atmel,watchdog-type = "hardware"; | 1248 | atmel,watchdog-type = "hardware"; |
1249 | atmel,reset-type = "all"; | 1249 | atmel,reset-type = "all"; |
1250 | atmel,dbg-halt; | 1250 | atmel,dbg-halt; |
1251 | atmel,idle-halt; | ||
1252 | status = "disabled"; | 1251 | status = "disabled"; |
1253 | }; | 1252 | }; |
1254 | 1253 | ||
@@ -1416,7 +1415,7 @@ | |||
1416 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 1415 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
1417 | reg = <0x00700000 0x100000>; | 1416 | reg = <0x00700000 0x100000>; |
1418 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; | 1417 | interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>; |
1419 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 1418 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
1420 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 1419 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
1421 | status = "disabled"; | 1420 | status = "disabled"; |
1422 | }; | 1421 | }; |
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index d986b41b9654..4303874889c6 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi | |||
@@ -66,6 +66,7 @@ | |||
66 | gpio4 = &pioE; | 66 | gpio4 = &pioE; |
67 | tcb0 = &tcb0; | 67 | tcb0 = &tcb0; |
68 | tcb1 = &tcb1; | 68 | tcb1 = &tcb1; |
69 | i2c0 = &i2c0; | ||
69 | i2c2 = &i2c2; | 70 | i2c2 = &i2c2; |
70 | }; | 71 | }; |
71 | cpus { | 72 | cpus { |
@@ -259,7 +260,7 @@ | |||
259 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; | 260 | compatible = "atmel,at91sam9g45-ehci", "usb-ehci"; |
260 | reg = <0x00600000 0x100000>; | 261 | reg = <0x00600000 0x100000>; |
261 | interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; | 262 | interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>; |
262 | clocks = <&usb>, <&uhphs_clk>, <&uhpck>; | 263 | clocks = <&utmi>, <&uhphs_clk>, <&uhpck>; |
263 | clock-names = "usb_clk", "ehci_clk", "uhpck"; | 264 | clock-names = "usb_clk", "ehci_clk", "uhpck"; |
264 | status = "disabled"; | 265 | status = "disabled"; |
265 | }; | 266 | }; |
@@ -461,8 +462,8 @@ | |||
461 | 462 | ||
462 | lcdck: lcdck { | 463 | lcdck: lcdck { |
463 | #clock-cells = <0>; | 464 | #clock-cells = <0>; |
464 | reg = <4>; | 465 | reg = <3>; |
465 | clocks = <&smd>; | 466 | clocks = <&mck>; |
466 | }; | 467 | }; |
467 | 468 | ||
468 | smdck: smdck { | 469 | smdck: smdck { |
@@ -770,7 +771,7 @@ | |||
770 | reg = <50>; | 771 | reg = <50>; |
771 | }; | 772 | }; |
772 | 773 | ||
773 | lcd_clk: lcd_clk { | 774 | lcdc_clk: lcdc_clk { |
774 | #clock-cells = <0>; | 775 | #clock-cells = <0>; |
775 | reg = <51>; | 776 | reg = <51>; |
776 | }; | 777 | }; |
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 252c3d1bda50..9d8760956752 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi | |||
@@ -713,6 +713,9 @@ | |||
713 | reg-shift = <2>; | 713 | reg-shift = <2>; |
714 | reg-io-width = <4>; | 714 | reg-io-width = <4>; |
715 | clocks = <&l4_sp_clk>; | 715 | clocks = <&l4_sp_clk>; |
716 | dmas = <&pdma 28>, | ||
717 | <&pdma 29>; | ||
718 | dma-names = "tx", "rx"; | ||
716 | }; | 719 | }; |
717 | 720 | ||
718 | uart1: serial1@ffc03000 { | 721 | uart1: serial1@ffc03000 { |
@@ -722,6 +725,9 @@ | |||
722 | reg-shift = <2>; | 725 | reg-shift = <2>; |
723 | reg-io-width = <4>; | 726 | reg-io-width = <4>; |
724 | clocks = <&l4_sp_clk>; | 727 | clocks = <&l4_sp_clk>; |
728 | dmas = <&pdma 30>, | ||
729 | <&pdma 31>; | ||
730 | dma-names = "tx", "rx"; | ||
725 | }; | 731 | }; |
726 | 732 | ||
727 | rst: rstmgr@ffd05000 { | 733 | rst: rstmgr@ffd05000 { |
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig index f2670f638e97..811e72bbe642 100644 --- a/arch/arm/configs/at91_dt_defconfig +++ b/arch/arm/configs/at91_dt_defconfig | |||
@@ -70,6 +70,7 @@ CONFIG_SCSI=y | |||
70 | CONFIG_BLK_DEV_SD=y | 70 | CONFIG_BLK_DEV_SD=y |
71 | # CONFIG_SCSI_LOWLEVEL is not set | 71 | # CONFIG_SCSI_LOWLEVEL is not set |
72 | CONFIG_NETDEVICES=y | 72 | CONFIG_NETDEVICES=y |
73 | CONFIG_ARM_AT91_ETHER=y | ||
73 | CONFIG_MACB=y | 74 | CONFIG_MACB=y |
74 | # CONFIG_NET_VENDOR_BROADCOM is not set | 75 | # CONFIG_NET_VENDOR_BROADCOM is not set |
75 | CONFIG_DM9000=y | 76 | CONFIG_DM9000=y |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index b7e6b6fba5e0..06075b6d2463 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
@@ -99,7 +99,7 @@ CONFIG_PCI_RCAR_GEN2=y | |||
99 | CONFIG_PCI_RCAR_GEN2_PCIE=y | 99 | CONFIG_PCI_RCAR_GEN2_PCIE=y |
100 | CONFIG_PCIEPORTBUS=y | 100 | CONFIG_PCIEPORTBUS=y |
101 | CONFIG_SMP=y | 101 | CONFIG_SMP=y |
102 | CONFIG_NR_CPUS=8 | 102 | CONFIG_NR_CPUS=16 |
103 | CONFIG_HIGHPTE=y | 103 | CONFIG_HIGHPTE=y |
104 | CONFIG_CMA=y | 104 | CONFIG_CMA=y |
105 | CONFIG_ARM_APPENDED_DTB=y | 105 | CONFIG_ARM_APPENDED_DTB=y |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index a097cffa1231..8e108599e1af 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -377,6 +377,7 @@ CONFIG_PWM_TWL=m | |||
377 | CONFIG_PWM_TWL_LED=m | 377 | CONFIG_PWM_TWL_LED=m |
378 | CONFIG_OMAP_USB2=m | 378 | CONFIG_OMAP_USB2=m |
379 | CONFIG_TI_PIPE3=y | 379 | CONFIG_TI_PIPE3=y |
380 | CONFIG_TWL4030_USB=m | ||
380 | CONFIG_EXT2_FS=y | 381 | CONFIG_EXT2_FS=y |
381 | CONFIG_EXT3_FS=y | 382 | CONFIG_EXT3_FS=y |
382 | # CONFIG_EXT3_FS_XATTR is not set | 383 | # CONFIG_EXT3_FS_XATTR is not set |
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig index 41d856effe6c..510c747c65b4 100644 --- a/arch/arm/configs/sama5_defconfig +++ b/arch/arm/configs/sama5_defconfig | |||
@@ -3,8 +3,6 @@ | |||
3 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
4 | CONFIG_IRQ_DOMAIN_DEBUG=y | 4 | CONFIG_IRQ_DOMAIN_DEBUG=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_SYSFS_DEPRECATED=y | ||
7 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
8 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
9 | CONFIG_EMBEDDED=y | 7 | CONFIG_EMBEDDED=y |
10 | CONFIG_SLAB=y | 8 | CONFIG_SLAB=y |
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index 38840a812924..8f6a5702b696 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
4 | CONFIG_PERF_EVENTS=y | 4 | CONFIG_PERF_EVENTS=y |
5 | CONFIG_ARCH_SUNXI=y | 5 | CONFIG_ARCH_SUNXI=y |
6 | CONFIG_SMP=y | 6 | CONFIG_SMP=y |
7 | CONFIG_NR_CPUS=8 | ||
7 | CONFIG_AEABI=y | 8 | CONFIG_AEABI=y |
8 | CONFIG_HIGHMEM=y | 9 | CONFIG_HIGHMEM=y |
9 | CONFIG_HIGHPTE=y | 10 | CONFIG_HIGHPTE=y |
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index f489fdaa19b8..37fe607a4ede 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig | |||
@@ -118,8 +118,8 @@ CONFIG_HID_ZEROPLUS=y | |||
118 | CONFIG_USB=y | 118 | CONFIG_USB=y |
119 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | 119 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y |
120 | CONFIG_USB_MON=y | 120 | CONFIG_USB_MON=y |
121 | CONFIG_USB_ISP1760_HCD=y | ||
122 | CONFIG_USB_STORAGE=y | 121 | CONFIG_USB_STORAGE=y |
122 | CONFIG_USB_ISP1760=y | ||
123 | CONFIG_MMC=y | 123 | CONFIG_MMC=y |
124 | CONFIG_MMC_ARMMMCI=y | 124 | CONFIG_MMC_ARMMMCI=y |
125 | CONFIG_NEW_LEDS=y | 125 | CONFIG_NEW_LEDS=y |
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S index 80a6501b4d50..c3c45e628e33 100644 --- a/arch/arm/include/debug/at91.S +++ b/arch/arm/include/debug/at91.S | |||
@@ -18,8 +18,11 @@ | |||
18 | #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ | 18 | #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */ |
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | /* Keep in sync with mach-at91/include/mach/hardware.h */ | 21 | #ifdef CONFIG_MMU |
22 | #define AT91_IO_P2V(x) ((x) - 0x01000000) | 22 | #define AT91_IO_P2V(x) ((x) - 0x01000000) |
23 | #else | ||
24 | #define AT91_IO_P2V(x) (x) | ||
25 | #endif | ||
23 | 26 | ||
24 | #define AT91_DBGU_SR (0x14) /* Status Register */ | 27 | #define AT91_DBGU_SR (0x14) /* Status Register */ |
25 | #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ | 28 | #define AT91_DBGU_THR (0x1c) /* Transmitter Holding Register */ |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5e34fb143309..aa4116e9452f 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -270,37 +270,35 @@ static void __init at91_pm_sram_init(void) | |||
270 | phys_addr_t sram_pbase; | 270 | phys_addr_t sram_pbase; |
271 | unsigned long sram_base; | 271 | unsigned long sram_base; |
272 | struct device_node *node; | 272 | struct device_node *node; |
273 | struct platform_device *pdev; | 273 | struct platform_device *pdev = NULL; |
274 | 274 | ||
275 | node = of_find_compatible_node(NULL, NULL, "mmio-sram"); | 275 | for_each_compatible_node(node, NULL, "mmio-sram") { |
276 | if (!node) { | 276 | pdev = of_find_device_by_node(node); |
277 | pr_warn("%s: failed to find sram node!\n", __func__); | 277 | if (pdev) { |
278 | return; | 278 | of_node_put(node); |
279 | break; | ||
280 | } | ||
279 | } | 281 | } |
280 | 282 | ||
281 | pdev = of_find_device_by_node(node); | ||
282 | if (!pdev) { | 283 | if (!pdev) { |
283 | pr_warn("%s: failed to find sram device!\n", __func__); | 284 | pr_warn("%s: failed to find sram device!\n", __func__); |
284 | goto put_node; | 285 | return; |
285 | } | 286 | } |
286 | 287 | ||
287 | sram_pool = dev_get_gen_pool(&pdev->dev); | 288 | sram_pool = dev_get_gen_pool(&pdev->dev); |
288 | if (!sram_pool) { | 289 | if (!sram_pool) { |
289 | pr_warn("%s: sram pool unavailable!\n", __func__); | 290 | pr_warn("%s: sram pool unavailable!\n", __func__); |
290 | goto put_node; | 291 | return; |
291 | } | 292 | } |
292 | 293 | ||
293 | sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); | 294 | sram_base = gen_pool_alloc(sram_pool, at91_slow_clock_sz); |
294 | if (!sram_base) { | 295 | if (!sram_base) { |
295 | pr_warn("%s: unable to alloc ocram!\n", __func__); | 296 | pr_warn("%s: unable to alloc ocram!\n", __func__); |
296 | goto put_node; | 297 | return; |
297 | } | 298 | } |
298 | 299 | ||
299 | sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); | 300 | sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); |
300 | slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); | 301 | slow_clock = __arm_ioremap_exec(sram_pbase, at91_slow_clock_sz, false); |
301 | |||
302 | put_node: | ||
303 | of_node_put(node); | ||
304 | } | 302 | } |
305 | #endif | 303 | #endif |
306 | 304 | ||
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index d2c89963af2d..86c0aa819d25 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h | |||
@@ -44,7 +44,7 @@ static inline void at91rm9200_standby(void) | |||
44 | " mcr p15, 0, %0, c7, c0, 4\n\t" | 44 | " mcr p15, 0, %0, c7, c0, 4\n\t" |
45 | " str %5, [%1, %2]" | 45 | " str %5, [%1, %2]" |
46 | : | 46 | : |
47 | : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), | 47 | : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), |
48 | "r" (1), "r" (AT91RM9200_SDRAMC_SRR), | 48 | "r" (1), "r" (AT91RM9200_SDRAMC_SRR), |
49 | "r" (lpr)); | 49 | "r" (lpr)); |
50 | } | 50 | } |
diff --git a/arch/arm/mach-at91/pm_slowclock.S b/arch/arm/mach-at91/pm_slowclock.S index 556151e85ec4..931f0e302c03 100644 --- a/arch/arm/mach-at91/pm_slowclock.S +++ b/arch/arm/mach-at91/pm_slowclock.S | |||
@@ -25,11 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | #undef SLOWDOWN_MASTER_CLOCK | 26 | #undef SLOWDOWN_MASTER_CLOCK |
27 | 27 | ||
28 | #define MCKRDY_TIMEOUT 1000 | ||
29 | #define MOSCRDY_TIMEOUT 1000 | ||
30 | #define PLLALOCK_TIMEOUT 1000 | ||
31 | #define PLLBLOCK_TIMEOUT 1000 | ||
32 | |||
33 | pmc .req r0 | 28 | pmc .req r0 |
34 | sdramc .req r1 | 29 | sdramc .req r1 |
35 | ramc1 .req r2 | 30 | ramc1 .req r2 |
@@ -41,60 +36,42 @@ tmp2 .req r5 | |||
41 | * Wait until master clock is ready (after switching master clock source) | 36 | * Wait until master clock is ready (after switching master clock source) |
42 | */ | 37 | */ |
43 | .macro wait_mckrdy | 38 | .macro wait_mckrdy |
44 | mov tmp2, #MCKRDY_TIMEOUT | 39 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
45 | 1: sub tmp2, tmp2, #1 | ||
46 | cmp tmp2, #0 | ||
47 | beq 2f | ||
48 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
49 | tst tmp1, #AT91_PMC_MCKRDY | 40 | tst tmp1, #AT91_PMC_MCKRDY |
50 | beq 1b | 41 | beq 1b |
51 | 2: | ||
52 | .endm | 42 | .endm |
53 | 43 | ||
54 | /* | 44 | /* |
55 | * Wait until master oscillator has stabilized. | 45 | * Wait until master oscillator has stabilized. |
56 | */ | 46 | */ |
57 | .macro wait_moscrdy | 47 | .macro wait_moscrdy |
58 | mov tmp2, #MOSCRDY_TIMEOUT | 48 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
59 | 1: sub tmp2, tmp2, #1 | ||
60 | cmp tmp2, #0 | ||
61 | beq 2f | ||
62 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
63 | tst tmp1, #AT91_PMC_MOSCS | 49 | tst tmp1, #AT91_PMC_MOSCS |
64 | beq 1b | 50 | beq 1b |
65 | 2: | ||
66 | .endm | 51 | .endm |
67 | 52 | ||
68 | /* | 53 | /* |
69 | * Wait until PLLA has locked. | 54 | * Wait until PLLA has locked. |
70 | */ | 55 | */ |
71 | .macro wait_pllalock | 56 | .macro wait_pllalock |
72 | mov tmp2, #PLLALOCK_TIMEOUT | 57 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
73 | 1: sub tmp2, tmp2, #1 | ||
74 | cmp tmp2, #0 | ||
75 | beq 2f | ||
76 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
77 | tst tmp1, #AT91_PMC_LOCKA | 58 | tst tmp1, #AT91_PMC_LOCKA |
78 | beq 1b | 59 | beq 1b |
79 | 2: | ||
80 | .endm | 60 | .endm |
81 | 61 | ||
82 | /* | 62 | /* |
83 | * Wait until PLLB has locked. | 63 | * Wait until PLLB has locked. |
84 | */ | 64 | */ |
85 | .macro wait_pllblock | 65 | .macro wait_pllblock |
86 | mov tmp2, #PLLBLOCK_TIMEOUT | 66 | 1: ldr tmp1, [pmc, #AT91_PMC_SR] |
87 | 1: sub tmp2, tmp2, #1 | ||
88 | cmp tmp2, #0 | ||
89 | beq 2f | ||
90 | ldr tmp1, [pmc, #AT91_PMC_SR] | ||
91 | tst tmp1, #AT91_PMC_LOCKB | 67 | tst tmp1, #AT91_PMC_LOCKB |
92 | beq 1b | 68 | beq 1b |
93 | 2: | ||
94 | .endm | 69 | .endm |
95 | 70 | ||
96 | .text | 71 | .text |
97 | 72 | ||
73 | .arm | ||
74 | |||
98 | /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, | 75 | /* void at91_slow_clock(void __iomem *pmc, void __iomem *sdramc, |
99 | * void __iomem *ramc1, int memctrl) | 76 | * void __iomem *ramc1, int memctrl) |
100 | */ | 77 | */ |
@@ -134,6 +111,16 @@ ddr_sr_enable: | |||
134 | cmp memctrl, #AT91_MEMCTRL_DDRSDR | 111 | cmp memctrl, #AT91_MEMCTRL_DDRSDR |
135 | bne sdr_sr_enable | 112 | bne sdr_sr_enable |
136 | 113 | ||
114 | /* LPDDR1 --> force DDR2 mode during self-refresh */ | ||
115 | ldr tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
116 | str tmp1, .saved_sam9_mdr | ||
117 | bic tmp1, tmp1, #~AT91_DDRSDRC_MD | ||
118 | cmp tmp1, #AT91_DDRSDRC_MD_LOW_POWER_DDR | ||
119 | ldreq tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
120 | biceq tmp1, tmp1, #AT91_DDRSDRC_MD | ||
121 | orreq tmp1, tmp1, #AT91_DDRSDRC_MD_DDR2 | ||
122 | streq tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
123 | |||
137 | /* prepare for DDRAM self-refresh mode */ | 124 | /* prepare for DDRAM self-refresh mode */ |
138 | ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 125 | ldr tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
139 | str tmp1, .saved_sam9_lpr | 126 | str tmp1, .saved_sam9_lpr |
@@ -142,14 +129,26 @@ ddr_sr_enable: | |||
142 | 129 | ||
143 | /* figure out if we use the second ram controller */ | 130 | /* figure out if we use the second ram controller */ |
144 | cmp ramc1, #0 | 131 | cmp ramc1, #0 |
145 | ldrne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | 132 | beq ddr_no_2nd_ctrl |
146 | strne tmp2, .saved_sam9_lpr1 | 133 | |
147 | bicne tmp2, #AT91_DDRSDRC_LPCB | 134 | ldr tmp2, [ramc1, #AT91_DDRSDRC_MDR] |
148 | orrne tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH | 135 | str tmp2, .saved_sam9_mdr1 |
136 | bic tmp2, tmp2, #~AT91_DDRSDRC_MD | ||
137 | cmp tmp2, #AT91_DDRSDRC_MD_LOW_POWER_DDR | ||
138 | ldreq tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
139 | biceq tmp2, tmp2, #AT91_DDRSDRC_MD | ||
140 | orreq tmp2, tmp2, #AT91_DDRSDRC_MD_DDR2 | ||
141 | streq tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
142 | |||
143 | ldr tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
144 | str tmp2, .saved_sam9_lpr1 | ||
145 | bic tmp2, #AT91_DDRSDRC_LPCB | ||
146 | orr tmp2, #AT91_DDRSDRC_LPCB_SELF_REFRESH | ||
149 | 147 | ||
150 | /* Enable DDRAM self-refresh mode */ | 148 | /* Enable DDRAM self-refresh mode */ |
149 | str tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
150 | ddr_no_2nd_ctrl: | ||
151 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 151 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
152 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | ||
153 | 152 | ||
154 | b sdr_sr_done | 153 | b sdr_sr_done |
155 | 154 | ||
@@ -208,6 +207,7 @@ sdr_sr_done: | |||
208 | /* Turn off the main oscillator */ | 207 | /* Turn off the main oscillator */ |
209 | ldr tmp1, [pmc, #AT91_CKGR_MOR] | 208 | ldr tmp1, [pmc, #AT91_CKGR_MOR] |
210 | bic tmp1, tmp1, #AT91_PMC_MOSCEN | 209 | bic tmp1, tmp1, #AT91_PMC_MOSCEN |
210 | orr tmp1, tmp1, #AT91_PMC_KEY | ||
211 | str tmp1, [pmc, #AT91_CKGR_MOR] | 211 | str tmp1, [pmc, #AT91_CKGR_MOR] |
212 | 212 | ||
213 | /* Wait for interrupt */ | 213 | /* Wait for interrupt */ |
@@ -216,6 +216,7 @@ sdr_sr_done: | |||
216 | /* Turn on the main oscillator */ | 216 | /* Turn on the main oscillator */ |
217 | ldr tmp1, [pmc, #AT91_CKGR_MOR] | 217 | ldr tmp1, [pmc, #AT91_CKGR_MOR] |
218 | orr tmp1, tmp1, #AT91_PMC_MOSCEN | 218 | orr tmp1, tmp1, #AT91_PMC_MOSCEN |
219 | orr tmp1, tmp1, #AT91_PMC_KEY | ||
219 | str tmp1, [pmc, #AT91_CKGR_MOR] | 220 | str tmp1, [pmc, #AT91_CKGR_MOR] |
220 | 221 | ||
221 | wait_moscrdy | 222 | wait_moscrdy |
@@ -280,12 +281,17 @@ sdr_sr_done: | |||
280 | */ | 281 | */ |
281 | cmp memctrl, #AT91_MEMCTRL_DDRSDR | 282 | cmp memctrl, #AT91_MEMCTRL_DDRSDR |
282 | bne sdr_en_restore | 283 | bne sdr_en_restore |
284 | /* Restore MDR in case of LPDDR1 */ | ||
285 | ldr tmp1, .saved_sam9_mdr | ||
286 | str tmp1, [sdramc, #AT91_DDRSDRC_MDR] | ||
283 | /* Restore LPR on AT91 with DDRAM */ | 287 | /* Restore LPR on AT91 with DDRAM */ |
284 | ldr tmp1, .saved_sam9_lpr | 288 | ldr tmp1, .saved_sam9_lpr |
285 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] | 289 | str tmp1, [sdramc, #AT91_DDRSDRC_LPR] |
286 | 290 | ||
287 | /* if we use the second ram controller */ | 291 | /* if we use the second ram controller */ |
288 | cmp ramc1, #0 | 292 | cmp ramc1, #0 |
293 | ldrne tmp2, .saved_sam9_mdr1 | ||
294 | strne tmp2, [ramc1, #AT91_DDRSDRC_MDR] | ||
289 | ldrne tmp2, .saved_sam9_lpr1 | 295 | ldrne tmp2, .saved_sam9_lpr1 |
290 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] | 296 | strne tmp2, [ramc1, #AT91_DDRSDRC_LPR] |
291 | 297 | ||
@@ -319,5 +325,11 @@ ram_restored: | |||
319 | .saved_sam9_lpr1: | 325 | .saved_sam9_lpr1: |
320 | .word 0 | 326 | .word 0 |
321 | 327 | ||
328 | .saved_sam9_mdr: | ||
329 | .word 0 | ||
330 | |||
331 | .saved_sam9_mdr1: | ||
332 | .word 0 | ||
333 | |||
322 | ENTRY(at91_slow_clock_sz) | 334 | ENTRY(at91_slow_clock_sz) |
323 | .word .-at91_slow_clock | 335 | .word .-at91_slow_clock |
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 3f32c47a6d74..d2e9f12d12f1 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c | |||
@@ -126,8 +126,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious) | |||
126 | */ | 126 | */ |
127 | void exynos_cpu_power_down(int cpu) | 127 | void exynos_cpu_power_down(int cpu) |
128 | { | 128 | { |
129 | if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") || | 129 | if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) { |
130 | of_machine_is_compatible("samsung,exynos5800"))) { | ||
131 | /* | 130 | /* |
132 | * Bypass power down for CPU0 during suspend. Check for | 131 | * Bypass power down for CPU0 during suspend. Check for |
133 | * the SYS_PWR_REG value to decide if we are suspending | 132 | * the SYS_PWR_REG value to decide if we are suspending |
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 20f267121b3e..37266a826437 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c | |||
@@ -161,6 +161,34 @@ no_clk: | |||
161 | of_genpd_add_provider_simple(np, &pd->pd); | 161 | of_genpd_add_provider_simple(np, &pd->pd); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* Assign the child power domains to their parents */ | ||
165 | for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { | ||
166 | struct generic_pm_domain *child_domain, *parent_domain; | ||
167 | struct of_phandle_args args; | ||
168 | |||
169 | args.np = np; | ||
170 | args.args_count = 0; | ||
171 | child_domain = of_genpd_get_from_provider(&args); | ||
172 | if (!child_domain) | ||
173 | continue; | ||
174 | |||
175 | if (of_parse_phandle_with_args(np, "power-domains", | ||
176 | "#power-domain-cells", 0, &args) != 0) | ||
177 | continue; | ||
178 | |||
179 | parent_domain = of_genpd_get_from_provider(&args); | ||
180 | if (!parent_domain) | ||
181 | continue; | ||
182 | |||
183 | if (pm_genpd_add_subdomain(parent_domain, child_domain)) | ||
184 | pr_warn("%s failed to add subdomain: %s\n", | ||
185 | parent_domain->name, child_domain->name); | ||
186 | else | ||
187 | pr_info("%s has as child subdomain: %s.\n", | ||
188 | parent_domain->name, child_domain->name); | ||
189 | of_node_put(np); | ||
190 | } | ||
191 | |||
164 | return 0; | 192 | return 0; |
165 | } | 193 | } |
166 | arch_initcall(exynos4_pm_init_power_domain); | 194 | arch_initcall(exynos4_pm_init_power_domain); |
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 52e2b1a2fddb..318d127df147 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c | |||
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; | |||
87 | static u32 exynos_irqwake_intmask = 0xffffffff; | 87 | static u32 exynos_irqwake_intmask = 0xffffffff; |
88 | 88 | ||
89 | static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { | 89 | static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { |
90 | { 73, BIT(1) }, /* RTC alarm */ | 90 | { 105, BIT(1) }, /* RTC alarm */ |
91 | { 74, BIT(2) }, /* RTC tick */ | 91 | { 106, BIT(2) }, /* RTC tick */ |
92 | { /* sentinel */ }, | 92 | { /* sentinel */ }, |
93 | }; | 93 | }; |
94 | 94 | ||
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 4ad6e473cf83..9de3412af406 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -211,8 +211,9 @@ static void __init imx6q_1588_init(void) | |||
211 | * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad | 211 | * set bit IOMUXC_GPR1[21]. Or the PTP clock must be from pad |
212 | * (external OSC), and we need to clear the bit. | 212 | * (external OSC), and we need to clear the bit. |
213 | */ | 213 | */ |
214 | clksel = ptp_clk == enet_ref ? IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : | 214 | clksel = clk_is_match(ptp_clk, enet_ref) ? |
215 | IMX6Q_GPR1_ENET_CLK_SEL_PAD; | 215 | IMX6Q_GPR1_ENET_CLK_SEL_ANATOP : |
216 | IMX6Q_GPR1_ENET_CLK_SEL_PAD; | ||
216 | gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | 217 | gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); |
217 | if (!IS_ERR(gpr)) | 218 | if (!IS_ERR(gpr)) |
218 | regmap_update_bits(gpr, IOMUXC_GPR1, | 219 | regmap_update_bits(gpr, IOMUXC_GPR1, |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 92afb723dcfc..355b08936871 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1692,16 +1692,15 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name) | |||
1692 | if (ret == -EBUSY) | 1692 | if (ret == -EBUSY) |
1693 | pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); | 1693 | pr_warn("omap_hwmod: %s: failed to hardreset\n", oh->name); |
1694 | 1694 | ||
1695 | if (!ret) { | 1695 | if (oh->clkdm) { |
1696 | /* | 1696 | /* |
1697 | * Set the clockdomain to HW_AUTO, assuming that the | 1697 | * Set the clockdomain to HW_AUTO, assuming that the |
1698 | * previous state was HW_AUTO. | 1698 | * previous state was HW_AUTO. |
1699 | */ | 1699 | */ |
1700 | if (oh->clkdm && hwsup) | 1700 | if (hwsup) |
1701 | clkdm_allow_idle(oh->clkdm); | 1701 | clkdm_allow_idle(oh->clkdm); |
1702 | } else { | 1702 | |
1703 | if (oh->clkdm) | 1703 | clkdm_hwmod_disable(oh->clkdm, oh); |
1704 | clkdm_hwmod_disable(oh->clkdm, oh); | ||
1705 | } | 1704 | } |
1706 | 1705 | ||
1707 | return ret; | 1706 | return ret; |
@@ -2698,6 +2697,7 @@ static int __init _register(struct omap_hwmod *oh) | |||
2698 | INIT_LIST_HEAD(&oh->master_ports); | 2697 | INIT_LIST_HEAD(&oh->master_ports); |
2699 | INIT_LIST_HEAD(&oh->slave_ports); | 2698 | INIT_LIST_HEAD(&oh->slave_ports); |
2700 | spin_lock_init(&oh->_lock); | 2699 | spin_lock_init(&oh->_lock); |
2700 | lockdep_set_class(&oh->_lock, &oh->hwmod_key); | ||
2701 | 2701 | ||
2702 | oh->_state = _HWMOD_STATE_REGISTERED; | 2702 | oh->_state = _HWMOD_STATE_REGISTERED; |
2703 | 2703 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 9d4bec6ee742..9611c91d9b82 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -674,6 +674,7 @@ struct omap_hwmod { | |||
674 | u32 _sysc_cache; | 674 | u32 _sysc_cache; |
675 | void __iomem *_mpu_rt_va; | 675 | void __iomem *_mpu_rt_va; |
676 | spinlock_t _lock; | 676 | spinlock_t _lock; |
677 | struct lock_class_key hwmod_key; /* unique lock class */ | ||
677 | struct list_head node; | 678 | struct list_head node; |
678 | struct omap_hwmod_ocp_if *_mpu_port; | 679 | struct omap_hwmod_ocp_if *_mpu_port; |
679 | unsigned int (*xlate_irq)(unsigned int); | 680 | unsigned int (*xlate_irq)(unsigned int); |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index e8692e7675b8..16fe7a1b7a35 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1466,55 +1466,18 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = { | |||
1466 | * | 1466 | * |
1467 | */ | 1467 | */ |
1468 | 1468 | ||
1469 | static struct omap_hwmod_class dra7xx_pcie_hwmod_class = { | 1469 | static struct omap_hwmod_class dra7xx_pciess_hwmod_class = { |
1470 | .name = "pcie", | 1470 | .name = "pcie", |
1471 | }; | 1471 | }; |
1472 | 1472 | ||
1473 | /* pcie1 */ | 1473 | /* pcie1 */ |
1474 | static struct omap_hwmod dra7xx_pcie1_hwmod = { | 1474 | static struct omap_hwmod dra7xx_pciess1_hwmod = { |
1475 | .name = "pcie1", | 1475 | .name = "pcie1", |
1476 | .class = &dra7xx_pcie_hwmod_class, | 1476 | .class = &dra7xx_pciess_hwmod_class, |
1477 | .clkdm_name = "pcie_clkdm", | 1477 | .clkdm_name = "pcie_clkdm", |
1478 | .main_clk = "l4_root_clk_div", | 1478 | .main_clk = "l4_root_clk_div", |
1479 | .prcm = { | 1479 | .prcm = { |
1480 | .omap4 = { | 1480 | .omap4 = { |
1481 | .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, | ||
1482 | .modulemode = MODULEMODE_SWCTRL, | ||
1483 | }, | ||
1484 | }, | ||
1485 | }; | ||
1486 | |||
1487 | /* pcie2 */ | ||
1488 | static struct omap_hwmod dra7xx_pcie2_hwmod = { | ||
1489 | .name = "pcie2", | ||
1490 | .class = &dra7xx_pcie_hwmod_class, | ||
1491 | .clkdm_name = "pcie_clkdm", | ||
1492 | .main_clk = "l4_root_clk_div", | ||
1493 | .prcm = { | ||
1494 | .omap4 = { | ||
1495 | .clkctrl_offs = DRA7XX_CM_PCIE_CLKSTCTRL_OFFSET, | ||
1496 | .modulemode = MODULEMODE_SWCTRL, | ||
1497 | }, | ||
1498 | }, | ||
1499 | }; | ||
1500 | |||
1501 | /* | ||
1502 | * 'PCIE PHY' class | ||
1503 | * | ||
1504 | */ | ||
1505 | |||
1506 | static struct omap_hwmod_class dra7xx_pcie_phy_hwmod_class = { | ||
1507 | .name = "pcie-phy", | ||
1508 | }; | ||
1509 | |||
1510 | /* pcie1 phy */ | ||
1511 | static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { | ||
1512 | .name = "pcie1-phy", | ||
1513 | .class = &dra7xx_pcie_phy_hwmod_class, | ||
1514 | .clkdm_name = "l3init_clkdm", | ||
1515 | .main_clk = "l4_root_clk_div", | ||
1516 | .prcm = { | ||
1517 | .omap4 = { | ||
1518 | .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, | 1481 | .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET, |
1519 | .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, | 1482 | .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET, |
1520 | .modulemode = MODULEMODE_SWCTRL, | 1483 | .modulemode = MODULEMODE_SWCTRL, |
@@ -1522,11 +1485,11 @@ static struct omap_hwmod dra7xx_pcie1_phy_hwmod = { | |||
1522 | }, | 1485 | }, |
1523 | }; | 1486 | }; |
1524 | 1487 | ||
1525 | /* pcie2 phy */ | 1488 | /* pcie2 */ |
1526 | static struct omap_hwmod dra7xx_pcie2_phy_hwmod = { | 1489 | static struct omap_hwmod dra7xx_pciess2_hwmod = { |
1527 | .name = "pcie2-phy", | 1490 | .name = "pcie2", |
1528 | .class = &dra7xx_pcie_phy_hwmod_class, | 1491 | .class = &dra7xx_pciess_hwmod_class, |
1529 | .clkdm_name = "l3init_clkdm", | 1492 | .clkdm_name = "pcie_clkdm", |
1530 | .main_clk = "l4_root_clk_div", | 1493 | .main_clk = "l4_root_clk_div", |
1531 | .prcm = { | 1494 | .prcm = { |
1532 | .omap4 = { | 1495 | .omap4 = { |
@@ -2877,50 +2840,34 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__ocp2scp3 = { | |||
2877 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2840 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2878 | }; | 2841 | }; |
2879 | 2842 | ||
2880 | /* l3_main_1 -> pcie1 */ | 2843 | /* l3_main_1 -> pciess1 */ |
2881 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie1 = { | 2844 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = { |
2882 | .master = &dra7xx_l3_main_1_hwmod, | 2845 | .master = &dra7xx_l3_main_1_hwmod, |
2883 | .slave = &dra7xx_pcie1_hwmod, | 2846 | .slave = &dra7xx_pciess1_hwmod, |
2884 | .clk = "l3_iclk_div", | 2847 | .clk = "l3_iclk_div", |
2885 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2848 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2886 | }; | 2849 | }; |
2887 | 2850 | ||
2888 | /* l4_cfg -> pcie1 */ | 2851 | /* l4_cfg -> pciess1 */ |
2889 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1 = { | 2852 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = { |
2890 | .master = &dra7xx_l4_cfg_hwmod, | 2853 | .master = &dra7xx_l4_cfg_hwmod, |
2891 | .slave = &dra7xx_pcie1_hwmod, | 2854 | .slave = &dra7xx_pciess1_hwmod, |
2892 | .clk = "l4_root_clk_div", | 2855 | .clk = "l4_root_clk_div", |
2893 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2856 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2894 | }; | 2857 | }; |
2895 | 2858 | ||
2896 | /* l3_main_1 -> pcie2 */ | 2859 | /* l3_main_1 -> pciess2 */ |
2897 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pcie2 = { | 2860 | static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = { |
2898 | .master = &dra7xx_l3_main_1_hwmod, | 2861 | .master = &dra7xx_l3_main_1_hwmod, |
2899 | .slave = &dra7xx_pcie2_hwmod, | 2862 | .slave = &dra7xx_pciess2_hwmod, |
2900 | .clk = "l3_iclk_div", | 2863 | .clk = "l3_iclk_div", |
2901 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2864 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2902 | }; | 2865 | }; |
2903 | 2866 | ||
2904 | /* l4_cfg -> pcie2 */ | 2867 | /* l4_cfg -> pciess2 */ |
2905 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2 = { | 2868 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = { |
2906 | .master = &dra7xx_l4_cfg_hwmod, | ||
2907 | .slave = &dra7xx_pcie2_hwmod, | ||
2908 | .clk = "l4_root_clk_div", | ||
2909 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
2910 | }; | ||
2911 | |||
2912 | /* l4_cfg -> pcie1 phy */ | ||
2913 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie1_phy = { | ||
2914 | .master = &dra7xx_l4_cfg_hwmod, | ||
2915 | .slave = &dra7xx_pcie1_phy_hwmod, | ||
2916 | .clk = "l4_root_clk_div", | ||
2917 | .user = OCP_USER_MPU | OCP_USER_SDMA, | ||
2918 | }; | ||
2919 | |||
2920 | /* l4_cfg -> pcie2 phy */ | ||
2921 | static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pcie2_phy = { | ||
2922 | .master = &dra7xx_l4_cfg_hwmod, | 2869 | .master = &dra7xx_l4_cfg_hwmod, |
2923 | .slave = &dra7xx_pcie2_phy_hwmod, | 2870 | .slave = &dra7xx_pciess2_hwmod, |
2924 | .clk = "l4_root_clk_div", | 2871 | .clk = "l4_root_clk_div", |
2925 | .user = OCP_USER_MPU | OCP_USER_SDMA, | 2872 | .user = OCP_USER_MPU | OCP_USER_SDMA, |
2926 | }; | 2873 | }; |
@@ -3327,12 +3274,10 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { | |||
3327 | &dra7xx_l4_cfg__mpu, | 3274 | &dra7xx_l4_cfg__mpu, |
3328 | &dra7xx_l4_cfg__ocp2scp1, | 3275 | &dra7xx_l4_cfg__ocp2scp1, |
3329 | &dra7xx_l4_cfg__ocp2scp3, | 3276 | &dra7xx_l4_cfg__ocp2scp3, |
3330 | &dra7xx_l3_main_1__pcie1, | 3277 | &dra7xx_l3_main_1__pciess1, |
3331 | &dra7xx_l4_cfg__pcie1, | 3278 | &dra7xx_l4_cfg__pciess1, |
3332 | &dra7xx_l3_main_1__pcie2, | 3279 | &dra7xx_l3_main_1__pciess2, |
3333 | &dra7xx_l4_cfg__pcie2, | 3280 | &dra7xx_l4_cfg__pciess2, |
3334 | &dra7xx_l4_cfg__pcie1_phy, | ||
3335 | &dra7xx_l4_cfg__pcie2_phy, | ||
3336 | &dra7xx_l3_main_1__qspi, | 3281 | &dra7xx_l3_main_1__qspi, |
3337 | &dra7xx_l4_per3__rtcss, | 3282 | &dra7xx_l4_per3__rtcss, |
3338 | &dra7xx_l4_cfg__sata, | 3283 | &dra7xx_l4_cfg__sata, |
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 190fa43e7479..e642b079e9f3 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c | |||
@@ -173,6 +173,7 @@ static void __init omap3_igep0030_rev_g_legacy_init(void) | |||
173 | 173 | ||
174 | static void __init omap3_evm_legacy_init(void) | 174 | static void __init omap3_evm_legacy_init(void) |
175 | { | 175 | { |
176 | hsmmc2_internal_input_clk(); | ||
176 | legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); | 177 | legacy_init_wl12xx(WL12XX_REFCLOCK_38, 0, 149); |
177 | } | 178 | } |
178 | 179 | ||
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index a08a617a6c11..d6d6bc39e05c 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c | |||
@@ -252,10 +252,10 @@ static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) | |||
252 | { | 252 | { |
253 | saved_mask[0] = | 253 | saved_mask[0] = |
254 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, | 254 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, |
255 | OMAP4_PRM_IRQSTATUS_MPU_OFFSET); | 255 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); |
256 | saved_mask[1] = | 256 | saved_mask[1] = |
257 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, | 257 | omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, |
258 | OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); | 258 | OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); |
259 | 259 | ||
260 | omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, | 260 | omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, |
261 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); | 261 | OMAP4_PRM_IRQENABLE_MPU_OFFSET); |
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 7d8eab857a93..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/platform_data/video-pxafb.h> | 36 | #include <linux/platform_data/video-pxafb.h> |
37 | #include <mach/bitfield.h> | 37 | #include <mach/bitfield.h> |
38 | #include <linux/platform_data/mmc-pxamci.h> | 38 | #include <linux/platform_data/mmc-pxamci.h> |
39 | #include <linux/smc91x.h> | ||
39 | 40 | ||
40 | #include "generic.h" | 41 | #include "generic.h" |
41 | #include "devices.h" | 42 | #include "devices.h" |
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 28da319d389f..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c | |||
@@ -195,7 +195,7 @@ static struct resource smc91x_resources[] = { | |||
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct smc91x_platdata smc91x_platdata = { | 197 | struct smc91x_platdata smc91x_platdata = { |
198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; | 198 | .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | static struct platform_device smc91x_device = { | 201 | static struct platform_device smc91x_device = { |
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 7b0cd3172354..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c | |||
@@ -268,8 +268,8 @@ static int neponset_probe(struct platform_device *dev) | |||
268 | .id = 0, | 268 | .id = 0, |
269 | .res = smc91x_resources, | 269 | .res = smc91x_resources, |
270 | .num_res = ARRAY_SIZE(smc91x_resources), | 270 | .num_res = ARRAY_SIZE(smc91x_resources), |
271 | .data = &smc91c_platdata, | 271 | .data = &smc91x_platdata, |
272 | .size_data = sizeof(smc91c_platdata), | 272 | .size_data = sizeof(smc91x_platdata), |
273 | }; | 273 | }; |
274 | int ret, irq; | 274 | int ret, irq; |
275 | 275 | ||
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 696fd0fe4806..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c | |||
@@ -54,7 +54,7 @@ static struct platform_device smc91x_device = { | |||
54 | .num_resources = ARRAY_SIZE(smc91x_resources), | 54 | .num_resources = ARRAY_SIZE(smc91x_resources), |
55 | .resource = smc91x_resources, | 55 | .resource = smc91x_resources, |
56 | .dev = { | 56 | .dev = { |
57 | .platform_data = &smc91c_platdata, | 57 | .platform_data = &smc91x_platdata, |
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 483cb467bf65..a0f3b1cd497c 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h | |||
@@ -45,6 +45,6 @@ extern char secondary_trampoline, secondary_trampoline_end; | |||
45 | 45 | ||
46 | extern unsigned long socfpga_cpu1start_addr; | 46 | extern unsigned long socfpga_cpu1start_addr; |
47 | 47 | ||
48 | #define SOCFPGA_SCU_VIRT_BASE 0xfffec000 | 48 | #define SOCFPGA_SCU_VIRT_BASE 0xfee00000 |
49 | 49 | ||
50 | #endif | 50 | #endif |
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index 383d61e138af..f5e597c207b9 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/hardware/cache-l2x0.h> | 23 | #include <asm/hardware/cache-l2x0.h> |
24 | #include <asm/mach/arch.h> | 24 | #include <asm/mach/arch.h> |
25 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
26 | #include <asm/cacheflush.h> | ||
26 | 27 | ||
27 | #include "core.h" | 28 | #include "core.h" |
28 | 29 | ||
@@ -73,6 +74,10 @@ void __init socfpga_sysmgr_init(void) | |||
73 | (u32 *) &socfpga_cpu1start_addr)) | 74 | (u32 *) &socfpga_cpu1start_addr)) |
74 | pr_err("SMP: Need cpu1-start-addr in device tree.\n"); | 75 | pr_err("SMP: Need cpu1-start-addr in device tree.\n"); |
75 | 76 | ||
77 | /* Ensure that socfpga_cpu1start_addr is visible to other CPUs */ | ||
78 | smp_wmb(); | ||
79 | sync_cache_w(&socfpga_cpu1start_addr); | ||
80 | |||
76 | sys_manager_base_addr = of_iomap(np, 0); | 81 | sys_manager_base_addr = of_iomap(np, 0); |
77 | 82 | ||
78 | np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); | 83 | np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr"); |
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index b067390cef4e..b373acade338 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c | |||
@@ -18,6 +18,7 @@ static const char *stih41x_dt_match[] __initdata = { | |||
18 | "st,stih415", | 18 | "st,stih415", |
19 | "st,stih416", | 19 | "st,stih416", |
20 | "st,stih407", | 20 | "st,stih407", |
21 | "st,stih410", | ||
21 | "st,stih418", | 22 | "st,stih418", |
22 | NULL | 23 | NULL |
23 | }; | 24 | }; |
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
@@ -622,7 +622,7 @@ | |||
622 | }; | 622 | }; |
623 | 623 | ||
624 | sgenet0: ethernet@1f210000 { | 624 | sgenet0: ethernet@1f210000 { |
625 | compatible = "apm,xgene-enet"; | 625 | compatible = "apm,xgene1-sgenet"; |
626 | status = "disabled"; | 626 | status = "disabled"; |
627 | reg = <0x0 0x1f210000 0x0 0xd100>, | 627 | reg = <0x0 0x1f210000 0x0 0xd100>, |
628 | <0x0 0x1f200000 0x0 0Xc300>, | 628 | <0x0 0x1f200000 0x0 0Xc300>, |
@@ -636,7 +636,7 @@ | |||
636 | }; | 636 | }; |
637 | 637 | ||
638 | xgenet: ethernet@1f610000 { | 638 | xgenet: ethernet@1f610000 { |
639 | compatible = "apm,xgene-enet"; | 639 | compatible = "apm,xgene1-xgenet"; |
640 | status = "disabled"; | 640 | status = "disabled"; |
641 | reg = <0x0 0x1f610000 0x0 0xd100>, | 641 | reg = <0x0 0x1f610000 0x0 0xd100>, |
642 | <0x0 0x1f600000 0x0 0Xc300>, | 642 | <0x0 0x1f600000 0x0 0Xc300>, |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe37456f..53d9c354219f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) | |||
48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 48 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
49 | unsigned long addr) | 49 | unsigned long addr) |
50 | { | 50 | { |
51 | __flush_tlb_pgtable(tlb->mm, addr); | ||
51 | pgtable_page_dtor(pte); | 52 | pgtable_page_dtor(pte); |
52 | tlb_remove_entry(tlb, pte); | 53 | tlb_remove_entry(tlb, pte); |
53 | } | 54 | } |
@@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
56 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 57 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
57 | unsigned long addr) | 58 | unsigned long addr) |
58 | { | 59 | { |
60 | __flush_tlb_pgtable(tlb->mm, addr); | ||
59 | tlb_remove_entry(tlb, virt_to_page(pmdp)); | 61 | tlb_remove_entry(tlb, virt_to_page(pmdp)); |
60 | } | 62 | } |
61 | #endif | 63 | #endif |
@@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
64 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, | 66 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, |
65 | unsigned long addr) | 67 | unsigned long addr) |
66 | { | 68 | { |
69 | __flush_tlb_pgtable(tlb->mm, addr); | ||
67 | tlb_remove_entry(tlb, virt_to_page(pudp)); | 70 | tlb_remove_entry(tlb, virt_to_page(pudp)); |
68 | } | 71 | } |
69 | #endif | 72 | #endif |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 4abe9b945f77..c3bb05b98616 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end | |||
144 | } | 144 | } |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Used to invalidate the TLB (walk caches) corresponding to intermediate page | ||
148 | * table levels (pgd/pud/pmd). | ||
149 | */ | ||
150 | static inline void __flush_tlb_pgtable(struct mm_struct *mm, | ||
151 | unsigned long uaddr) | ||
152 | { | ||
153 | unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); | ||
154 | |||
155 | dsb(ishst); | ||
156 | asm("tlbi vae1is, %0" : : "r" (addr)); | ||
157 | dsb(ish); | ||
158 | } | ||
159 | /* | ||
147 | * On AArch64, the cache coherency is handled via the set_pte_at() function. | 160 | * On AArch64, the cache coherency is handled via the set_pte_at() function. |
148 | */ | 161 | */ |
149 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 162 | static inline void update_mmu_cache(struct vm_area_struct *vma, |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..2b8d70164428 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -354,3 +354,12 @@ void efi_virtmap_unload(void) | |||
354 | efi_set_pgd(current->active_mm); | 354 | efi_set_pgd(current->active_mm); |
355 | preempt_enable(); | 355 | preempt_enable(); |
356 | } | 356 | } |
357 | |||
358 | /* | ||
359 | * UpdateCapsule() depends on the system being shutdown via | ||
360 | * ResetSystem(). | ||
361 | */ | ||
362 | bool efi_poweroff_required(void) | ||
363 | { | ||
364 | return efi_enabled(EFI_RUNTIME_SERVICES); | ||
365 | } | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..07f930540f4a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) | |||
585 | * zeroing of .bss would clobber it. | 585 | * zeroing of .bss would clobber it. |
586 | */ | 586 | */ |
587 | .pushsection .data..cacheline_aligned | 587 | .pushsection .data..cacheline_aligned |
588 | ENTRY(__boot_cpu_mode) | ||
589 | .align L1_CACHE_SHIFT | 588 | .align L1_CACHE_SHIFT |
589 | ENTRY(__boot_cpu_mode) | ||
590 | .long BOOT_CPU_MODE_EL2 | 590 | .long BOOT_CPU_MODE_EL2 |
591 | .long 0 | 591 | .long 0 |
592 | .popsection | 592 | .popsection |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <stdarg.h> | 21 | #include <stdarg.h> |
22 | 22 | ||
23 | #include <linux/compat.h> | 23 | #include <linux/compat.h> |
24 | #include <linux/efi.h> | ||
24 | #include <linux/export.h> | 25 | #include <linux/export.h> |
25 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
26 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -150,6 +151,13 @@ void machine_restart(char *cmd) | |||
150 | local_irq_disable(); | 151 | local_irq_disable(); |
151 | smp_send_stop(); | 152 | smp_send_stop(); |
152 | 153 | ||
154 | /* | ||
155 | * UpdateCapsule() depends on the system being reset via | ||
156 | * ResetSystem(). | ||
157 | */ | ||
158 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | ||
159 | efi_reboot(reboot_mode, NULL); | ||
160 | |||
153 | /* Now call the architecture specific reboot code. */ | 161 | /* Now call the architecture specific reboot code. */ |
154 | if (arm_pm_restart) | 162 | if (arm_pm_restart) |
155 | arm_pm_restart(reboot_mode, cmd); | 163 | arm_pm_restart(reboot_mode, cmd); |
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h index 78d4483ba40c..ec4db6df5e0d 100644 --- a/arch/c6x/include/asm/pgtable.h +++ b/arch/c6x/include/asm/pgtable.h | |||
@@ -67,6 +67,11 @@ extern unsigned long empty_zero_page; | |||
67 | */ | 67 | */ |
68 | #define pgtable_cache_init() do { } while (0) | 68 | #define pgtable_cache_init() do { } while (0) |
69 | 69 | ||
70 | /* | ||
71 | * c6x is !MMU, so define the simpliest implementation | ||
72 | */ | ||
73 | #define pgprot_writecombine pgprot_noncached | ||
74 | |||
70 | #include <asm-generic/pgtable.h> | 75 | #include <asm-generic/pgtable.h> |
71 | 76 | ||
72 | #endif /* _ASM_C6X_PGTABLE_H */ | 77 | #endif /* _ASM_C6X_PGTABLE_H */ |
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 0536bc021cc6..ef548510b951 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S | |||
@@ -348,8 +348,9 @@ C_ENTRY(_user_exception): | |||
348 | * The LP register should point to the location where the called function | 348 | * The LP register should point to the location where the called function |
349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ | 349 | * should return. [note that MAKE_SYS_CALL uses label 1] */ |
350 | /* See if the system call number is valid */ | 350 | /* See if the system call number is valid */ |
351 | blti r12, 5f | ||
351 | addi r11, r12, -__NR_syscalls; | 352 | addi r11, r12, -__NR_syscalls; |
352 | bgei r11,5f; | 353 | bgei r11, 5f; |
353 | /* Figure out which function to use for this system call. */ | 354 | /* Figure out which function to use for this system call. */ |
354 | /* Note Microblaze barrel shift is optional, so don't rely on it */ | 355 | /* Note Microblaze barrel shift is optional, so don't rely on it */ |
355 | add r12, r12, r12; /* convert num -> ptr */ | 356 | add r12, r12, r12; /* convert num -> ptr */ |
@@ -375,7 +376,7 @@ C_ENTRY(_user_exception): | |||
375 | 376 | ||
376 | /* The syscall number is invalid, return an error. */ | 377 | /* The syscall number is invalid, return an error. */ |
377 | 5: | 378 | 5: |
378 | rtsd r15, 8; /* looks like a normal subroutine return */ | 379 | braid ret_from_trap |
379 | addi r3, r0, -ENOSYS; | 380 | addi r3, r0, -ENOSYS; |
380 | 381 | ||
381 | /* Entry point used to return from a syscall/trap */ | 382 | /* Entry point used to return from a syscall/trap */ |
@@ -411,7 +412,7 @@ C_ENTRY(ret_from_trap): | |||
411 | bri 1b | 412 | bri 1b |
412 | 413 | ||
413 | /* Maybe handle a signal */ | 414 | /* Maybe handle a signal */ |
414 | 5: | 415 | 5: |
415 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; | 416 | andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME; |
416 | beqi r11, 4f; /* Signals to handle, handle them */ | 417 | beqi r11, 4f; /* Signals to handle, handle them */ |
417 | 418 | ||
diff --git a/arch/nios2/include/asm/ptrace.h b/arch/nios2/include/asm/ptrace.h index 20fb1cf2dab6..642462144872 100644 --- a/arch/nios2/include/asm/ptrace.h +++ b/arch/nios2/include/asm/ptrace.h | |||
@@ -15,7 +15,54 @@ | |||
15 | 15 | ||
16 | #include <uapi/asm/ptrace.h> | 16 | #include <uapi/asm/ptrace.h> |
17 | 17 | ||
18 | /* This struct defines the way the registers are stored on the | ||
19 | stack during a system call. */ | ||
20 | |||
18 | #ifndef __ASSEMBLY__ | 21 | #ifndef __ASSEMBLY__ |
22 | struct pt_regs { | ||
23 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
24 | unsigned long r9; | ||
25 | unsigned long r10; | ||
26 | unsigned long r11; | ||
27 | unsigned long r12; | ||
28 | unsigned long r13; | ||
29 | unsigned long r14; | ||
30 | unsigned long r15; | ||
31 | unsigned long r1; /* Assembler temporary */ | ||
32 | unsigned long r2; /* Retval LS 32bits */ | ||
33 | unsigned long r3; /* Retval MS 32bits */ | ||
34 | unsigned long r4; /* r4-r7 Register arguments */ | ||
35 | unsigned long r5; | ||
36 | unsigned long r6; | ||
37 | unsigned long r7; | ||
38 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
39 | unsigned long ra; /* Return address */ | ||
40 | unsigned long fp; /* Frame pointer */ | ||
41 | unsigned long sp; /* Stack pointer */ | ||
42 | unsigned long gp; /* Global pointer */ | ||
43 | unsigned long estatus; | ||
44 | unsigned long ea; /* Exception return address (pc) */ | ||
45 | unsigned long orig_r7; | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * This is the extended stack used by signal handlers and the context | ||
50 | * switcher: it's pushed after the normal "struct pt_regs". | ||
51 | */ | ||
52 | struct switch_stack { | ||
53 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
54 | unsigned long r17; | ||
55 | unsigned long r18; | ||
56 | unsigned long r19; | ||
57 | unsigned long r20; | ||
58 | unsigned long r21; | ||
59 | unsigned long r22; | ||
60 | unsigned long r23; | ||
61 | unsigned long fp; | ||
62 | unsigned long gp; | ||
63 | unsigned long ra; | ||
64 | }; | ||
65 | |||
19 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) | 66 | #define user_mode(regs) (((regs)->estatus & ESTATUS_EU)) |
20 | 67 | ||
21 | #define instruction_pointer(regs) ((regs)->ra) | 68 | #define instruction_pointer(regs) ((regs)->ra) |
diff --git a/arch/nios2/include/asm/ucontext.h b/arch/nios2/include/asm/ucontext.h deleted file mode 100644 index 2c87614b0f6e..000000000000 --- a/arch/nios2/include/asm/ucontext.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> | ||
3 | * Copyright (C) 2004 Microtronix Datacom Ltd | ||
4 | * | ||
5 | * This file is subject to the terms and conditions of the GNU General Public | ||
6 | * License. See the file "COPYING" in the main directory of this archive | ||
7 | * for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_NIOS2_UCONTEXT_H | ||
11 | #define _ASM_NIOS2_UCONTEXT_H | ||
12 | |||
13 | typedef int greg_t; | ||
14 | #define NGREG 32 | ||
15 | typedef greg_t gregset_t[NGREG]; | ||
16 | |||
17 | struct mcontext { | ||
18 | int version; | ||
19 | gregset_t gregs; | ||
20 | }; | ||
21 | |||
22 | #define MCONTEXT_VERSION 2 | ||
23 | |||
24 | struct ucontext { | ||
25 | unsigned long uc_flags; | ||
26 | struct ucontext *uc_link; | ||
27 | stack_t uc_stack; | ||
28 | struct mcontext uc_mcontext; | ||
29 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
30 | }; | ||
31 | |||
32 | #endif | ||
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild index 4f07ca3f8d10..376131194cc3 100644 --- a/arch/nios2/include/uapi/asm/Kbuild +++ b/arch/nios2/include/uapi/asm/Kbuild | |||
@@ -2,3 +2,5 @@ include include/uapi/asm-generic/Kbuild.asm | |||
2 | 2 | ||
3 | header-y += elf.h | 3 | header-y += elf.h |
4 | header-y += ucontext.h | 4 | header-y += ucontext.h |
5 | |||
6 | generic-y += ucontext.h | ||
diff --git a/arch/nios2/include/uapi/asm/elf.h b/arch/nios2/include/uapi/asm/elf.h index a5b91ae5cf56..6f06d3b2949e 100644 --- a/arch/nios2/include/uapi/asm/elf.h +++ b/arch/nios2/include/uapi/asm/elf.h | |||
@@ -50,9 +50,7 @@ | |||
50 | 50 | ||
51 | typedef unsigned long elf_greg_t; | 51 | typedef unsigned long elf_greg_t; |
52 | 52 | ||
53 | #define ELF_NGREG \ | 53 | #define ELF_NGREG 49 |
54 | ((sizeof(struct pt_regs) + sizeof(struct switch_stack)) / \ | ||
55 | sizeof(elf_greg_t)) | ||
56 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 54 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
57 | 55 | ||
58 | typedef unsigned long elf_fpregset_t; | 56 | typedef unsigned long elf_fpregset_t; |
diff --git a/arch/nios2/include/uapi/asm/ptrace.h b/arch/nios2/include/uapi/asm/ptrace.h index e83a7c9d1c36..71a330597adf 100644 --- a/arch/nios2/include/uapi/asm/ptrace.h +++ b/arch/nios2/include/uapi/asm/ptrace.h | |||
@@ -67,53 +67,9 @@ | |||
67 | 67 | ||
68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) | 68 | #define NUM_PTRACE_REG (PTR_TLBMISC + 1) |
69 | 69 | ||
70 | /* this struct defines the way the registers are stored on the | 70 | /* User structures for general purpose registers. */ |
71 | stack during a system call. | 71 | struct user_pt_regs { |
72 | 72 | __u32 regs[49]; | |
73 | There is a fake_regs in setup.c that has to match pt_regs.*/ | ||
74 | |||
75 | struct pt_regs { | ||
76 | unsigned long r8; /* r8-r15 Caller-saved GP registers */ | ||
77 | unsigned long r9; | ||
78 | unsigned long r10; | ||
79 | unsigned long r11; | ||
80 | unsigned long r12; | ||
81 | unsigned long r13; | ||
82 | unsigned long r14; | ||
83 | unsigned long r15; | ||
84 | unsigned long r1; /* Assembler temporary */ | ||
85 | unsigned long r2; /* Retval LS 32bits */ | ||
86 | unsigned long r3; /* Retval MS 32bits */ | ||
87 | unsigned long r4; /* r4-r7 Register arguments */ | ||
88 | unsigned long r5; | ||
89 | unsigned long r6; | ||
90 | unsigned long r7; | ||
91 | unsigned long orig_r2; /* Copy of r2 ?? */ | ||
92 | unsigned long ra; /* Return address */ | ||
93 | unsigned long fp; /* Frame pointer */ | ||
94 | unsigned long sp; /* Stack pointer */ | ||
95 | unsigned long gp; /* Global pointer */ | ||
96 | unsigned long estatus; | ||
97 | unsigned long ea; /* Exception return address (pc) */ | ||
98 | unsigned long orig_r7; | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * This is the extended stack used by signal handlers and the context | ||
103 | * switcher: it's pushed after the normal "struct pt_regs". | ||
104 | */ | ||
105 | struct switch_stack { | ||
106 | unsigned long r16; /* r16-r23 Callee-saved GP registers */ | ||
107 | unsigned long r17; | ||
108 | unsigned long r18; | ||
109 | unsigned long r19; | ||
110 | unsigned long r20; | ||
111 | unsigned long r21; | ||
112 | unsigned long r22; | ||
113 | unsigned long r23; | ||
114 | unsigned long fp; | ||
115 | unsigned long gp; | ||
116 | unsigned long ra; | ||
117 | }; | 73 | }; |
118 | 74 | ||
119 | #endif /* __ASSEMBLY__ */ | 75 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/nios2/include/uapi/asm/sigcontext.h b/arch/nios2/include/uapi/asm/sigcontext.h index 7b8bb41867d4..b67944a50927 100644 --- a/arch/nios2/include/uapi/asm/sigcontext.h +++ b/arch/nios2/include/uapi/asm/sigcontext.h | |||
@@ -15,14 +15,16 @@ | |||
15 | * details. | 15 | * details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef _ASM_NIOS2_SIGCONTEXT_H | 18 | #ifndef _UAPI__ASM_SIGCONTEXT_H |
19 | #define _ASM_NIOS2_SIGCONTEXT_H | 19 | #define _UAPI__ASM_SIGCONTEXT_H |
20 | 20 | ||
21 | #include <asm/ptrace.h> | 21 | #include <linux/types.h> |
22 | |||
23 | #define MCONTEXT_VERSION 2 | ||
22 | 24 | ||
23 | struct sigcontext { | 25 | struct sigcontext { |
24 | struct pt_regs regs; | 26 | int version; |
25 | unsigned long sc_mask; /* old sigmask */ | 27 | unsigned long gregs[32]; |
26 | }; | 28 | }; |
27 | 29 | ||
28 | #endif | 30 | #endif |
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index 2d0ea25be171..dda41e4fe707 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c | |||
@@ -39,7 +39,7 @@ static inline int rt_restore_ucontext(struct pt_regs *regs, | |||
39 | struct ucontext *uc, int *pr2) | 39 | struct ucontext *uc, int *pr2) |
40 | { | 40 | { |
41 | int temp; | 41 | int temp; |
42 | greg_t *gregs = uc->uc_mcontext.gregs; | 42 | unsigned long *gregs = uc->uc_mcontext.gregs; |
43 | int err; | 43 | int err; |
44 | 44 | ||
45 | /* Always make any pending restarted system calls return -EINTR */ | 45 | /* Always make any pending restarted system calls return -EINTR */ |
@@ -127,7 +127,7 @@ badframe: | |||
127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) | 127 | static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) |
128 | { | 128 | { |
129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; | 129 | struct switch_stack *sw = (struct switch_stack *)regs - 1; |
130 | greg_t *gregs = uc->uc_mcontext.gregs; | 130 | unsigned long *gregs = uc->uc_mcontext.gregs; |
131 | int err = 0; | 131 | int err = 0; |
132 | 132 | ||
133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); | 133 | err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..f407bbf5ee94 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -515,15 +515,15 @@ struct s390_io_adapter { | |||
515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ | 515 | #define S390_ARCH_FAC_MASK_SIZE_U64 \ |
516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) | 516 | (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) |
517 | 517 | ||
518 | struct s390_model_fac { | 518 | struct kvm_s390_fac { |
519 | /* facilities used in SIE context */ | 519 | /* facility list requested by guest */ |
520 | __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; | 520 | __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; |
521 | /* subset enabled by kvm */ | 521 | /* facility mask supported by kvm & hosting machine */ |
522 | __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; | 522 | __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; |
523 | }; | 523 | }; |
524 | 524 | ||
525 | struct kvm_s390_cpu_model { | 525 | struct kvm_s390_cpu_model { |
526 | struct s390_model_fac *fac; | 526 | struct kvm_s390_fac *fac; |
527 | struct cpuid cpu_id; | 527 | struct cpuid cpu_id; |
528 | unsigned short ibc; | 528 | unsigned short ibc; |
529 | }; | 529 | }; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
62 | { | 62 | { |
63 | int cpu = smp_processor_id(); | 63 | int cpu = smp_processor_id(); |
64 | 64 | ||
65 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
65 | if (prev == next) | 66 | if (prev == next) |
66 | return; | 67 | return; |
67 | if (MACHINE_HAS_TLB_LC) | 68 | if (MACHINE_HAS_TLB_LC) |
@@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
73 | atomic_dec(&prev->context.attach_count); | 74 | atomic_dec(&prev->context.attach_count); |
74 | if (MACHINE_HAS_TLB_LC) | 75 | if (MACHINE_HAS_TLB_LC) |
75 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | 76 | cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); |
76 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | ||
77 | } | 77 | } |
78 | 78 | ||
79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | 79 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end | |||
37 | #endif | 37 | #endif |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void clear_page(void *page) | 40 | #define clear_page(page) memset((page), 0, PAGE_SIZE) |
41 | { | ||
42 | register unsigned long reg1 asm ("1") = 0; | ||
43 | register void *reg2 asm ("2") = page; | ||
44 | register unsigned long reg3 asm ("3") = 4096; | ||
45 | asm volatile( | ||
46 | " mvcl 2,0" | ||
47 | : "+d" (reg2), "+d" (reg3) : "d" (reg1) | ||
48 | : "memory", "cc"); | ||
49 | } | ||
50 | 41 | ||
51 | /* | 42 | /* |
52 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to | 43 | * copy_page uses the mvcl instruction with 0xb0 padding byte in order to |
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c | |||
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) | |||
36 | insn->offset = (entry->target - entry->code) >> 1; | 36 | insn->offset = (entry->target - entry->code) >> 1; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void jump_label_bug(struct jump_entry *entry, struct insn *insn) | 39 | static void jump_label_bug(struct jump_entry *entry, struct insn *expected, |
40 | struct insn *new) | ||
40 | { | 41 | { |
41 | unsigned char *ipc = (unsigned char *)entry->code; | 42 | unsigned char *ipc = (unsigned char *)entry->code; |
42 | unsigned char *ipe = (unsigned char *)insn; | 43 | unsigned char *ipe = (unsigned char *)expected; |
44 | unsigned char *ipn = (unsigned char *)new; | ||
43 | 45 | ||
44 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); | 46 | pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); |
45 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", | 47 | pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", |
46 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); | 48 | ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); |
47 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", | 49 | pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", |
48 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); | 50 | ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); |
51 | pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", | ||
52 | ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); | ||
49 | panic("Corrupted kernel text"); | 53 | panic("Corrupted kernel text"); |
50 | } | 54 | } |
51 | 55 | ||
@@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
69 | } | 73 | } |
70 | if (init) { | 74 | if (init) { |
71 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) | 75 | if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) |
72 | jump_label_bug(entry, &old); | 76 | jump_label_bug(entry, &orignop, &new); |
73 | } else { | 77 | } else { |
74 | if (memcmp((void *)entry->code, &old, sizeof(old))) | 78 | if (memcmp((void *)entry->code, &old, sizeof(old))) |
75 | jump_label_bug(entry, &old); | 79 | jump_label_bug(entry, &old, &new); |
76 | } | 80 | } |
77 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); | 81 | probe_kernel_write((void *)entry->code, &new, sizeof(new)); |
78 | } | 82 | } |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
436 | const Elf_Shdr *sechdrs, | 436 | const Elf_Shdr *sechdrs, |
437 | struct module *me) | 437 | struct module *me) |
438 | { | 438 | { |
439 | jump_label_apply_nops(me); | ||
439 | vfree(me->arch.syminfo); | 440 | vfree(me->arch.syminfo); |
440 | me->arch.syminfo = NULL; | 441 | me->arch.syminfo = NULL; |
441 | return 0; | 442 | return 0; |
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); | 19 | static DEFINE_PER_CPU(struct cpuid, cpu_id); |
20 | 20 | ||
21 | void cpu_relax(void) | 21 | void notrace cpu_relax(void) |
22 | { | 22 | { |
23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) | 23 | if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) |
24 | asm volatile("diag 0,0,0x44"); | 24 | asm volatile("diag 0,0,0x44"); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..f6579cfde2df 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -522,7 +522,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, | 522 | memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, |
523 | sizeof(struct cpuid)); | 523 | sizeof(struct cpuid)); |
524 | kvm->arch.model.ibc = proc->ibc; | 524 | kvm->arch.model.ibc = proc->ibc; |
525 | memcpy(kvm->arch.model.fac->kvm, proc->fac_list, | 525 | memcpy(kvm->arch.model.fac->list, proc->fac_list, |
526 | S390_ARCH_FAC_LIST_SIZE_BYTE); | 526 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
527 | } else | 527 | } else |
528 | ret = -EFAULT; | 528 | ret = -EFAULT; |
@@ -556,7 +556,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
556 | } | 556 | } |
557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); | 557 | memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); |
558 | proc->ibc = kvm->arch.model.ibc; | 558 | proc->ibc = kvm->arch.model.ibc; |
559 | memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); | 559 | memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); |
560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) | 560 | if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) |
561 | ret = -EFAULT; | 561 | ret = -EFAULT; |
562 | kfree(proc); | 562 | kfree(proc); |
@@ -576,10 +576,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) | |||
576 | } | 576 | } |
577 | get_cpu_id((struct cpuid *) &mach->cpuid); | 577 | get_cpu_id((struct cpuid *) &mach->cpuid); |
578 | mach->ibc = sclp_get_ibc(); | 578 | mach->ibc = sclp_get_ibc(); |
579 | memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, | 579 | memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, |
580 | kvm_s390_fac_list_mask_size() * sizeof(u64)); | 580 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, | 581 | memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, |
582 | S390_ARCH_FAC_LIST_SIZE_U64); | 582 | S390_ARCH_FAC_LIST_SIZE_BYTE); |
583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) | 583 | if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) |
584 | ret = -EFAULT; | 584 | ret = -EFAULT; |
585 | kfree(mach); | 585 | kfree(mach); |
@@ -778,15 +778,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
778 | static int kvm_s390_query_ap_config(u8 *config) | 778 | static int kvm_s390_query_ap_config(u8 *config) |
779 | { | 779 | { |
780 | u32 fcn_code = 0x04000000UL; | 780 | u32 fcn_code = 0x04000000UL; |
781 | u32 cc; | 781 | u32 cc = 0; |
782 | 782 | ||
783 | memset(config, 0, 128); | ||
783 | asm volatile( | 784 | asm volatile( |
784 | "lgr 0,%1\n" | 785 | "lgr 0,%1\n" |
785 | "lgr 2,%2\n" | 786 | "lgr 2,%2\n" |
786 | ".long 0xb2af0000\n" /* PQAP(QCI) */ | 787 | ".long 0xb2af0000\n" /* PQAP(QCI) */ |
787 | "ipm %0\n" | 788 | "0: ipm %0\n" |
788 | "srl %0,28\n" | 789 | "srl %0,28\n" |
789 | : "=r" (cc) | 790 | "1:\n" |
791 | EX_TABLE(0b, 1b) | ||
792 | : "+r" (cc) | ||
790 | : "r" (fcn_code), "r" (config) | 793 | : "r" (fcn_code), "r" (config) |
791 | : "cc", "0", "2", "memory" | 794 | : "cc", "0", "2", "memory" |
792 | ); | 795 | ); |
@@ -839,9 +842,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) | |||
839 | 842 | ||
840 | kvm_s390_set_crycb_format(kvm); | 843 | kvm_s390_set_crycb_format(kvm); |
841 | 844 | ||
842 | /* Disable AES/DEA protected key functions by default */ | 845 | /* Enable AES/DEA protected key functions by default */ |
843 | kvm->arch.crypto.aes_kw = 0; | 846 | kvm->arch.crypto.aes_kw = 1; |
844 | kvm->arch.crypto.dea_kw = 0; | 847 | kvm->arch.crypto.dea_kw = 1; |
848 | get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, | ||
849 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); | ||
850 | get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, | ||
851 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); | ||
845 | 852 | ||
846 | return 0; | 853 | return 0; |
847 | } | 854 | } |
@@ -886,40 +893,29 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
886 | /* | 893 | /* |
887 | * The architectural maximum amount of facilities is 16 kbit. To store | 894 | * The architectural maximum amount of facilities is 16 kbit. To store |
888 | * this amount, 2 kbyte of memory is required. Thus we need a full | 895 | * this amount, 2 kbyte of memory is required. Thus we need a full |
889 | * page to hold the active copy (arch.model.fac->sie) and the current | 896 | * page to hold the guest facility list (arch.model.fac->list) and the |
890 | * facilities set (arch.model.fac->kvm). Its address size has to be | 897 | * facility mask (arch.model.fac->mask). Its address size has to be |
891 | * 31 bits and word aligned. | 898 | * 31 bits and word aligned. |
892 | */ | 899 | */ |
893 | kvm->arch.model.fac = | 900 | kvm->arch.model.fac = |
894 | (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 901 | (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
895 | if (!kvm->arch.model.fac) | 902 | if (!kvm->arch.model.fac) |
896 | goto out_nofac; | 903 | goto out_nofac; |
897 | 904 | ||
898 | memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, | 905 | /* Populate the facility mask initially. */ |
899 | S390_ARCH_FAC_LIST_SIZE_U64); | 906 | memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, |
900 | 907 | S390_ARCH_FAC_LIST_SIZE_BYTE); | |
901 | /* | ||
902 | * If this KVM host runs *not* in a LPAR, relax the facility bits | ||
903 | * of the kvm facility mask by all missing facilities. This will allow | ||
904 | * to determine the right CPU model by means of the remaining facilities. | ||
905 | * Live guest migration must prohibit the migration of KVMs running in | ||
906 | * a LPAR to non LPAR hosts. | ||
907 | */ | ||
908 | if (!MACHINE_IS_LPAR) | ||
909 | for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) | ||
910 | kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; | ||
911 | |||
912 | /* | ||
913 | * Apply the kvm facility mask to limit the kvm supported/tolerated | ||
914 | * facility list. | ||
915 | */ | ||
916 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { | 908 | for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { |
917 | if (i < kvm_s390_fac_list_mask_size()) | 909 | if (i < kvm_s390_fac_list_mask_size()) |
918 | kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; | 910 | kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; |
919 | else | 911 | else |
920 | kvm->arch.model.fac->kvm[i] = 0UL; | 912 | kvm->arch.model.fac->mask[i] = 0UL; |
921 | } | 913 | } |
922 | 914 | ||
915 | /* Populate the facility list initially. */ | ||
916 | memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, | ||
917 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
918 | |||
923 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); | 919 | kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); |
924 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; | 920 | kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; |
925 | 921 | ||
@@ -1165,8 +1161,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1165 | 1161 | ||
1166 | mutex_lock(&vcpu->kvm->lock); | 1162 | mutex_lock(&vcpu->kvm->lock); |
1167 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; | 1163 | vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; |
1168 | memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, | ||
1169 | S390_ARCH_FAC_LIST_SIZE_BYTE); | ||
1170 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; | 1164 | vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; |
1171 | mutex_unlock(&vcpu->kvm->lock); | 1165 | mutex_unlock(&vcpu->kvm->lock); |
1172 | 1166 | ||
@@ -1212,7 +1206,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1212 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; | 1206 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
1213 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); | 1207 | set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); |
1214 | } | 1208 | } |
1215 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; | 1209 | vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; |
1216 | 1210 | ||
1217 | spin_lock_init(&vcpu->arch.local_int.lock); | 1211 | spin_lock_init(&vcpu->arch.local_int.lock); |
1218 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; | 1212 | vcpu->arch.local_int.float_int = &kvm->arch.float_int; |
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c34109aa552d 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -128,7 +128,8 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
128 | /* test availability of facility in a kvm intance */ | 128 | /* test availability of facility in a kvm intance */ |
129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) | 129 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
130 | { | 130 | { |
131 | return __test_facility(nr, kvm->arch.model.fac->kvm); | 131 | return __test_facility(nr, kvm->arch.model.fac->mask) && |
132 | __test_facility(nr, kvm->arch.model.fac->list); | ||
132 | } | 133 | } |
133 | 134 | ||
134 | /* are cpu states controlled by user space */ | 135 | /* are cpu states controlled by user space */ |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..351116939ea2 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -348,7 +348,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) | |||
348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 | 348 | * We need to shift the lower 32 facility bits (bit 0-31) from a u64 |
349 | * into a u32 memory representation. They will remain bits 0-31. | 349 | * into a u32 memory representation. They will remain bits 0-31. |
350 | */ | 350 | */ |
351 | fac = *vcpu->kvm->arch.model.fac->sie >> 32; | 351 | fac = *vcpu->kvm->arch.model.fac->list >> 32; |
352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), | 352 | rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), |
353 | &fac, sizeof(fac)); | 353 | &fac, sizeof(fac)); |
354 | if (rc) | 354 | if (rc) |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, | |||
287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | 287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); |
288 | return (void __iomem *) addr + offset; | 288 | return (void __iomem *) addr + offset; |
289 | } | 289 | } |
290 | EXPORT_SYMBOL_GPL(pci_iomap_range); | 290 | EXPORT_SYMBOL(pci_iomap_range); |
291 | 291 | ||
292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 292 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
293 | { | 293 | { |
@@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | |||
309 | } | 309 | } |
310 | spin_unlock(&zpci_iomap_lock); | 310 | spin_unlock(&zpci_iomap_lock); |
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(pci_iounmap); | 312 | EXPORT_SYMBOL(pci_iounmap); |
313 | 313 | ||
314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | 314 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
315 | int size, u32 *val) | 315 | int size, u32 *val) |
@@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) | |||
483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); | 483 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); |
484 | } | 484 | } |
485 | 485 | ||
486 | static void zpci_map_resources(struct zpci_dev *zdev) | 486 | static void zpci_map_resources(struct pci_dev *pdev) |
487 | { | 487 | { |
488 | struct pci_dev *pdev = zdev->pdev; | ||
489 | resource_size_t len; | 488 | resource_size_t len; |
490 | int i; | 489 | int i; |
491 | 490 | ||
@@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
499 | } | 498 | } |
500 | } | 499 | } |
501 | 500 | ||
502 | static void zpci_unmap_resources(struct zpci_dev *zdev) | 501 | static void zpci_unmap_resources(struct pci_dev *pdev) |
503 | { | 502 | { |
504 | struct pci_dev *pdev = zdev->pdev; | ||
505 | resource_size_t len; | 503 | resource_size_t len; |
506 | int i; | 504 | int i; |
507 | 505 | ||
@@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
651 | 649 | ||
652 | zdev->pdev = pdev; | 650 | zdev->pdev = pdev; |
653 | pdev->dev.groups = zpci_attr_groups; | 651 | pdev->dev.groups = zpci_attr_groups; |
654 | zpci_map_resources(zdev); | 652 | zpci_map_resources(pdev); |
655 | 653 | ||
656 | for (i = 0; i < PCI_BAR_COUNT; i++) { | 654 | for (i = 0; i < PCI_BAR_COUNT; i++) { |
657 | res = &pdev->resource[i]; | 655 | res = &pdev->resource[i]; |
@@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) | |||
663 | return 0; | 661 | return 0; |
664 | } | 662 | } |
665 | 663 | ||
664 | void pcibios_release_device(struct pci_dev *pdev) | ||
665 | { | ||
666 | zpci_unmap_resources(pdev); | ||
667 | } | ||
668 | |||
666 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | 669 | int pcibios_enable_device(struct pci_dev *pdev, int mask) |
667 | { | 670 | { |
668 | struct zpci_dev *zdev = get_zdev(pdev); | 671 | struct zpci_dev *zdev = get_zdev(pdev); |
@@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) | |||
670 | zdev->pdev = pdev; | 673 | zdev->pdev = pdev; |
671 | zpci_debug_init_device(zdev); | 674 | zpci_debug_init_device(zdev); |
672 | zpci_fmb_enable_device(zdev); | 675 | zpci_fmb_enable_device(zdev); |
673 | zpci_map_resources(zdev); | ||
674 | 676 | ||
675 | return pci_enable_resources(pdev, mask); | 677 | return pci_enable_resources(pdev, mask); |
676 | } | 678 | } |
@@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
679 | { | 681 | { |
680 | struct zpci_dev *zdev = get_zdev(pdev); | 682 | struct zpci_dev *zdev = get_zdev(pdev); |
681 | 683 | ||
682 | zpci_unmap_resources(zdev); | ||
683 | zpci_fmb_disable_device(zdev); | 684 | zpci_fmb_disable_device(zdev); |
684 | zpci_debug_exit_device(zdev); | 685 | zpci_debug_exit_device(zdev); |
685 | zdev->pdev = NULL; | 686 | zdev->pdev = NULL; |
@@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) | |||
688 | #ifdef CONFIG_HIBERNATE_CALLBACKS | 689 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
689 | static int zpci_restore(struct device *dev) | 690 | static int zpci_restore(struct device *dev) |
690 | { | 691 | { |
691 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 692 | struct pci_dev *pdev = to_pci_dev(dev); |
693 | struct zpci_dev *zdev = get_zdev(pdev); | ||
692 | int ret = 0; | 694 | int ret = 0; |
693 | 695 | ||
694 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 696 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
@@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) | |||
698 | if (ret) | 700 | if (ret) |
699 | goto out; | 701 | goto out; |
700 | 702 | ||
701 | zpci_map_resources(zdev); | 703 | zpci_map_resources(pdev); |
702 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, | 704 | zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, |
703 | zdev->start_dma + zdev->iommu_size - 1, | 705 | zdev->start_dma + zdev->iommu_size - 1, |
704 | (u64) zdev->dma_table); | 706 | (u64) zdev->dma_table); |
@@ -709,12 +711,14 @@ out: | |||
709 | 711 | ||
710 | static int zpci_freeze(struct device *dev) | 712 | static int zpci_freeze(struct device *dev) |
711 | { | 713 | { |
712 | struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); | 714 | struct pci_dev *pdev = to_pci_dev(dev); |
715 | struct zpci_dev *zdev = get_zdev(pdev); | ||
713 | 716 | ||
714 | if (zdev->state != ZPCI_FN_STATE_ONLINE) | 717 | if (zdev->state != ZPCI_FN_STATE_ONLINE) |
715 | return 0; | 718 | return 0; |
716 | 719 | ||
717 | zpci_unregister_ioat(zdev, 0); | 720 | zpci_unregister_ioat(zdev, 0); |
721 | zpci_unmap_resources(pdev); | ||
718 | return clp_disable_fh(zdev); | 722 | return clp_disable_fh(zdev); |
719 | } | 723 | } |
720 | 724 | ||
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c | |||
@@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, | |||
64 | if (copy_from_user(buf, user_buffer, length)) | 64 | if (copy_from_user(buf, user_buffer, length)) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | memcpy_toio(io_addr, buf, length); | 67 | ret = zpci_memcpy_toio(io_addr, buf, length); |
68 | ret = 0; | ||
69 | out: | 68 | out: |
70 | if (buf != local_buf) | 69 | if (buf != local_buf) |
71 | kfree(buf); | 70 | kfree(buf); |
@@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, | |||
98 | goto out; | 97 | goto out; |
99 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); | 98 | io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); |
100 | 99 | ||
101 | ret = -EFAULT; | 100 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { |
102 | if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) | 101 | ret = -EFAULT; |
103 | goto out; | 102 | goto out; |
104 | 103 | } | |
105 | memcpy_fromio(buf, io_addr, length); | 104 | ret = zpci_memcpy_fromio(buf, io_addr, length); |
106 | 105 | if (ret) | |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | goto out; | 106 | goto out; |
107 | if (copy_to_user(user_buffer, buf, length)) | ||
108 | ret = -EFAULT; | ||
109 | 109 | ||
110 | ret = 0; | ||
111 | out: | 110 | out: |
112 | if (buf != local_buf) | 111 | if (buf != local_buf) |
113 | kfree(buf); | 112 | kfree(buf); |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 740ae3026a14..9f93af56a5fc 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -563,7 +563,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
563 | if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) | 563 | if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) |
564 | p2m_init(p2m); | 564 | p2m_init(p2m); |
565 | else | 565 | else |
566 | p2m_init_identity(p2m, pfn); | 566 | p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); |
567 | 567 | ||
568 | spin_lock_irqsave(&p2m_update_lock, flags); | 568 | spin_lock_irqsave(&p2m_update_lock, flags); |
569 | 569 | ||
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -65,6 +65,7 @@ struct lpss_private_data; | |||
65 | 65 | ||
66 | struct lpss_device_desc { | 66 | struct lpss_device_desc { |
67 | unsigned int flags; | 67 | unsigned int flags; |
68 | const char *clk_con_id; | ||
68 | unsigned int prv_offset; | 69 | unsigned int prv_offset; |
69 | size_t prv_size_override; | 70 | size_t prv_size_override; |
70 | void (*setup)(struct lpss_private_data *pdata); | 71 | void (*setup)(struct lpss_private_data *pdata); |
@@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { | |||
140 | 141 | ||
141 | static struct lpss_device_desc lpt_uart_dev_desc = { | 142 | static struct lpss_device_desc lpt_uart_dev_desc = { |
142 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, | 143 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, |
144 | .clk_con_id = "baudclk", | ||
143 | .prv_offset = 0x800, | 145 | .prv_offset = 0x800, |
144 | .setup = lpss_uart_setup, | 146 | .setup = lpss_uart_setup, |
145 | }; | 147 | }; |
@@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { | |||
156 | 158 | ||
157 | static struct lpss_device_desc byt_uart_dev_desc = { | 159 | static struct lpss_device_desc byt_uart_dev_desc = { |
158 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, | 160 | .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, |
161 | .clk_con_id = "baudclk", | ||
159 | .prv_offset = 0x800, | 162 | .prv_offset = 0x800, |
160 | .setup = lpss_uart_setup, | 163 | .setup = lpss_uart_setup, |
161 | }; | 164 | }; |
@@ -313,7 +316,7 @@ out: | |||
313 | return PTR_ERR(clk); | 316 | return PTR_ERR(clk); |
314 | 317 | ||
315 | pdata->clk = clk; | 318 | pdata->clk = clk; |
316 | clk_register_clkdev(clk, NULL, devname); | 319 | clk_register_clkdev(clk, dev_desc->clk_con_id, devname); |
317 | return 0; | 320 | return 0; |
318 | } | 321 | } |
319 | 322 | ||
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -869,6 +869,8 @@ try_offline_again: | |||
869 | */ | 869 | */ |
870 | ata_msleep(ap, 1); | 870 | ata_msleep(ap, 1); |
871 | 871 | ||
872 | sata_set_spd(link); | ||
873 | |||
872 | /* | 874 | /* |
873 | * Now, bring the host controller online again, this can take time | 875 | * Now, bring the host controller online again, this can take time |
874 | * as PHY reset and communication establishment, 1st D2H FIS and | 876 | * as PHY reset and communication establishment, 1st D2H FIS and |
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
@@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) | |||
140 | { | 140 | { |
141 | int rc; | 141 | int rc; |
142 | 142 | ||
143 | rc = device_add(&chip->dev); | 143 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); |
144 | if (rc) { | 144 | if (rc) { |
145 | dev_err(&chip->dev, | 145 | dev_err(&chip->dev, |
146 | "unable to device_register() %s, major %d, minor %d, err=%d\n", | 146 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", |
147 | chip->devname, MAJOR(chip->dev.devt), | 147 | chip->devname, MAJOR(chip->dev.devt), |
148 | MINOR(chip->dev.devt), rc); | 148 | MINOR(chip->dev.devt), rc); |
149 | 149 | ||
150 | device_unregister(&chip->dev); | ||
150 | return rc; | 151 | return rc; |
151 | } | 152 | } |
152 | 153 | ||
153 | rc = cdev_add(&chip->cdev, chip->dev.devt, 1); | 154 | rc = device_add(&chip->dev); |
154 | if (rc) { | 155 | if (rc) { |
155 | dev_err(&chip->dev, | 156 | dev_err(&chip->dev, |
156 | "unable to cdev_add() %s, major %d, minor %d, err=%d\n", | 157 | "unable to device_register() %s, major %d, minor %d, err=%d\n", |
157 | chip->devname, MAJOR(chip->dev.devt), | 158 | chip->devname, MAJOR(chip->dev.devt), |
158 | MINOR(chip->dev.devt), rc); | 159 | MINOR(chip->dev.devt), rc); |
159 | 160 | ||
160 | device_unregister(&chip->dev); | ||
161 | return rc; | 161 | return rc; |
162 | } | 162 | } |
163 | 163 | ||
@@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) | |||
174 | * tpm_chip_register() - create a character device for the TPM chip | 174 | * tpm_chip_register() - create a character device for the TPM chip |
175 | * @chip: TPM chip to use. | 175 | * @chip: TPM chip to use. |
176 | * | 176 | * |
177 | * Creates a character device for the TPM chip and adds sysfs interfaces for | 177 | * Creates a character device for the TPM chip and adds sysfs attributes for |
178 | * the device, PPI and TCPA. As the last step this function adds the | 178 | * the device. As the last step this function adds the chip to the list of TPM |
179 | * chip to the list of TPM chips available for use. | 179 | * chips available for in-kernel use. |
180 | * | 180 | * |
181 | * NOTE: This function should be only called after the chip initialization | 181 | * This function should be only called after the chip initialization is |
182 | * is complete. | 182 | * complete. |
183 | * | ||
184 | * Called from tpm_<specific>.c probe function only for devices | ||
185 | * the driver has determined it should claim. Prior to calling | ||
186 | * this function the specific probe function has called pci_enable_device | ||
187 | * upon errant exit from this function specific probe function should call | ||
188 | * pci_disable_device | ||
189 | */ | 183 | */ |
190 | int tpm_chip_register(struct tpm_chip *chip) | 184 | int tpm_chip_register(struct tpm_chip *chip) |
191 | { | 185 | { |
192 | int rc; | 186 | int rc; |
193 | 187 | ||
194 | rc = tpm_dev_add_device(chip); | ||
195 | if (rc) | ||
196 | return rc; | ||
197 | |||
198 | /* Populate sysfs for TPM1 devices. */ | 188 | /* Populate sysfs for TPM1 devices. */ |
199 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { | 189 | if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { |
200 | rc = tpm_sysfs_add_device(chip); | 190 | rc = tpm_sysfs_add_device(chip); |
@@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
208 | chip->bios_dir = tpm_bios_log_setup(chip->devname); | 198 | chip->bios_dir = tpm_bios_log_setup(chip->devname); |
209 | } | 199 | } |
210 | 200 | ||
201 | rc = tpm_dev_add_device(chip); | ||
202 | if (rc) | ||
203 | return rc; | ||
204 | |||
211 | /* Make the chip available. */ | 205 | /* Make the chip available. */ |
212 | spin_lock(&driver_lock); | 206 | spin_lock(&driver_lock); |
213 | list_add_rcu(&chip->list, &tpm_chip_list); | 207 | list_add_rcu(&chip->list, &tpm_chip_list); |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c | |||
@@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
124 | { | 124 | { |
125 | struct ibmvtpm_dev *ibmvtpm; | 125 | struct ibmvtpm_dev *ibmvtpm; |
126 | struct ibmvtpm_crq crq; | 126 | struct ibmvtpm_crq crq; |
127 | u64 *word = (u64 *) &crq; | 127 | __be64 *word = (__be64 *)&crq; |
128 | int rc; | 128 | int rc; |
129 | 129 | ||
130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); | 130 | ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); |
@@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) | |||
145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); | 145 | memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); |
146 | crq.valid = (u8)IBMVTPM_VALID_CMD; | 146 | crq.valid = (u8)IBMVTPM_VALID_CMD; |
147 | crq.msg = (u8)VTPM_TPM_COMMAND; | 147 | crq.msg = (u8)VTPM_TPM_COMMAND; |
148 | crq.len = (u16)count; | 148 | crq.len = cpu_to_be16(count); |
149 | crq.data = ibmvtpm->rtce_dma_handle; | 149 | crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); |
150 | 150 | ||
151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), | 151 | rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), |
152 | cpu_to_be64(word[1])); | 152 | be64_to_cpu(word[1])); |
153 | if (rc != H_SUCCESS) { | 153 | if (rc != H_SUCCESS) { |
154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); | 154 | dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); |
155 | rc = 0; | 155 | rc = 0; |
diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h | |||
@@ -22,9 +22,9 @@ | |||
22 | struct ibmvtpm_crq { | 22 | struct ibmvtpm_crq { |
23 | u8 valid; | 23 | u8 valid; |
24 | u8 msg; | 24 | u8 msg; |
25 | u16 len; | 25 | __be16 len; |
26 | u32 data; | 26 | __be32 data; |
27 | u64 reserved; | 27 | __be64 reserved; |
28 | } __attribute__((packed, aligned(8))); | 28 | } __attribute__((packed, aligned(8))); |
29 | 29 | ||
30 | struct ibmvtpm_crq_queue { | 30 | struct ibmvtpm_crq_queue { |
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c | |||
@@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, | |||
144 | divider->flags); | 144 | divider->flags); |
145 | } | 145 | } |
146 | 146 | ||
147 | /* | ||
148 | * The reverse of DIV_ROUND_UP: The maximum number which | ||
149 | * divided by m is r | ||
150 | */ | ||
151 | #define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) | ||
152 | |||
153 | static bool _is_valid_table_div(const struct clk_div_table *table, | 147 | static bool _is_valid_table_div(const struct clk_div_table *table, |
154 | unsigned int div) | 148 | unsigned int div) |
155 | { | 149 | { |
@@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, | |||
225 | unsigned long parent_rate, unsigned long rate, | 219 | unsigned long parent_rate, unsigned long rate, |
226 | unsigned long flags) | 220 | unsigned long flags) |
227 | { | 221 | { |
228 | int up, down, div; | 222 | int up, down; |
223 | unsigned long up_rate, down_rate; | ||
229 | 224 | ||
230 | up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); | 225 | up = DIV_ROUND_UP(parent_rate, rate); |
226 | down = parent_rate / rate; | ||
231 | 227 | ||
232 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { | 228 | if (flags & CLK_DIVIDER_POWER_OF_TWO) { |
233 | up = __roundup_pow_of_two(div); | 229 | up = __roundup_pow_of_two(up); |
234 | down = __rounddown_pow_of_two(div); | 230 | down = __rounddown_pow_of_two(down); |
235 | } else if (table) { | 231 | } else if (table) { |
236 | up = _round_up_table(table, div); | 232 | up = _round_up_table(table, up); |
237 | down = _round_down_table(table, div); | 233 | down = _round_down_table(table, down); |
238 | } | 234 | } |
239 | 235 | ||
240 | return (up - div) <= (div - down) ? up : down; | 236 | up_rate = DIV_ROUND_UP(parent_rate, up); |
237 | down_rate = DIV_ROUND_UP(parent_rate, down); | ||
238 | |||
239 | return (rate - up_rate) <= (down_rate - rate) ? up : down; | ||
241 | } | 240 | } |
242 | 241 | ||
243 | static int _div_round(const struct clk_div_table *table, | 242 | static int _div_round(const struct clk_div_table *table, |
@@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, | |||
313 | return i; | 312 | return i; |
314 | } | 313 | } |
315 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), | 314 | parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), |
316 | MULT_ROUND_UP(rate, i)); | 315 | rate * i); |
317 | now = DIV_ROUND_UP(parent_rate, i); | 316 | now = DIV_ROUND_UP(parent_rate, i); |
318 | if (_is_best_div(rate, now, best, flags)) { | 317 | if (_is_best_div(rate, now, best, flags)) { |
319 | bestdiv = i; | 318 | bestdiv = i; |
@@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, | |||
353 | bestdiv = readl(divider->reg) >> divider->shift; | 352 | bestdiv = readl(divider->reg) >> divider->shift; |
354 | bestdiv &= div_mask(divider->width); | 353 | bestdiv &= div_mask(divider->width); |
355 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); | 354 | bestdiv = _get_div(divider->table, bestdiv, divider->flags); |
356 | return bestdiv; | 355 | return DIV_ROUND_UP(*prate, bestdiv); |
357 | } | 356 | } |
358 | 357 | ||
359 | return divider_round_rate(hw, rate, prate, divider->table, | 358 | return divider_round_rate(hw, rate, prate, divider->table, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) | |||
1350 | 1350 | ||
1351 | return rate; | 1351 | return rate; |
1352 | } | 1352 | } |
1353 | EXPORT_SYMBOL_GPL(clk_core_get_rate); | ||
1354 | 1353 | ||
1355 | /** | 1354 | /** |
1356 | * clk_get_rate - return the rate of clk | 1355 | * clk_get_rate - return the rate of clk |
@@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) | |||
2171 | } | 2170 | } |
2172 | 2171 | ||
2173 | /** | 2172 | /** |
2173 | * clk_is_match - check if two clk's point to the same hardware clock | ||
2174 | * @p: clk compared against q | ||
2175 | * @q: clk compared against p | ||
2176 | * | ||
2177 | * Returns true if the two struct clk pointers both point to the same hardware | ||
2178 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
2179 | * share the same struct clk_core object. | ||
2180 | * | ||
2181 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
2182 | */ | ||
2183 | bool clk_is_match(const struct clk *p, const struct clk *q) | ||
2184 | { | ||
2185 | /* trivial case: identical struct clk's or both NULL */ | ||
2186 | if (p == q) | ||
2187 | return true; | ||
2188 | |||
2189 | /* true if clk->core pointers match. Avoid derefing garbage */ | ||
2190 | if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) | ||
2191 | if (p->core == q->core) | ||
2192 | return true; | ||
2193 | |||
2194 | return false; | ||
2195 | } | ||
2196 | EXPORT_SYMBOL_GPL(clk_is_match); | ||
2197 | |||
2198 | /** | ||
2174 | * __clk_init - initialize the data structures in a struct clk | 2199 | * __clk_init - initialize the data structures in a struct clk |
2175 | * @dev: device initializing this clk, placeholder for now | 2200 | * @dev: device initializing this clk, placeholder for now |
2176 | * @clk: clk being initialized | 2201 | * @clk: clk being initialized |
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c | |||
@@ -48,6 +48,17 @@ static struct clk_pll pll3 = { | |||
48 | }, | 48 | }, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | static struct clk_regmap pll4_vote = { | ||
52 | .enable_reg = 0x34c0, | ||
53 | .enable_mask = BIT(4), | ||
54 | .hw.init = &(struct clk_init_data){ | ||
55 | .name = "pll4_vote", | ||
56 | .parent_names = (const char *[]){ "pll4" }, | ||
57 | .num_parents = 1, | ||
58 | .ops = &clk_pll_vote_ops, | ||
59 | }, | ||
60 | }; | ||
61 | |||
51 | static struct clk_pll pll8 = { | 62 | static struct clk_pll pll8 = { |
52 | .l_reg = 0x3144, | 63 | .l_reg = 0x3144, |
53 | .m_reg = 0x3148, | 64 | .m_reg = 0x3148, |
@@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { | |||
3023 | 3034 | ||
3024 | static struct clk_regmap *gcc_msm8960_clks[] = { | 3035 | static struct clk_regmap *gcc_msm8960_clks[] = { |
3025 | [PLL3] = &pll3.clkr, | 3036 | [PLL3] = &pll3.clkr, |
3037 | [PLL4_VOTE] = &pll4_vote, | ||
3026 | [PLL8] = &pll8.clkr, | 3038 | [PLL8] = &pll8.clkr, |
3027 | [PLL8_VOTE] = &pll8_vote, | 3039 | [PLL8_VOTE] = &pll8_vote, |
3028 | [PLL14] = &pll14.clkr, | 3040 | [PLL14] = &pll14.clkr, |
@@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { | |||
3247 | 3259 | ||
3248 | static struct clk_regmap *gcc_apq8064_clks[] = { | 3260 | static struct clk_regmap *gcc_apq8064_clks[] = { |
3249 | [PLL3] = &pll3.clkr, | 3261 | [PLL3] = &pll3.clkr, |
3262 | [PLL4_VOTE] = &pll4_vote, | ||
3250 | [PLL8] = &pll8.clkr, | 3263 | [PLL8] = &pll8.clkr, |
3251 | [PLL8_VOTE] = &pll8_vote, | 3264 | [PLL8_VOTE] = &pll8_vote, |
3252 | [PLL14] = &pll14.clkr, | 3265 | [PLL14] = &pll14.clkr, |
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c | |||
@@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { | |||
462 | .remove = lcc_ipq806x_remove, | 462 | .remove = lcc_ipq806x_remove, |
463 | .driver = { | 463 | .driver = { |
464 | .name = "lcc-ipq806x", | 464 | .name = "lcc-ipq806x", |
465 | .owner = THIS_MODULE, | ||
466 | .of_match_table = lcc_ipq806x_match_table, | 465 | .of_match_table = lcc_ipq806x_match_table, |
467 | }, | 466 | }, |
468 | }; | 467 | }; |
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c | |||
@@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { | |||
417 | .mnctr_en_bit = 8, | 417 | .mnctr_en_bit = 8, |
418 | .mnctr_reset_bit = 7, | 418 | .mnctr_reset_bit = 7, |
419 | .mnctr_mode_shift = 5, | 419 | .mnctr_mode_shift = 5, |
420 | .n_val_shift = 16, | 420 | .n_val_shift = 24, |
421 | .m_val_shift = 16, | 421 | .m_val_shift = 8, |
422 | .width = 8, | 422 | .width = 8, |
423 | }, | 423 | }, |
424 | .p = { | 424 | .p = { |
@@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) | |||
547 | return PTR_ERR(regmap); | 547 | return PTR_ERR(regmap); |
548 | 548 | ||
549 | /* Use the correct frequency plan depending on speed of PLL4 */ | 549 | /* Use the correct frequency plan depending on speed of PLL4 */ |
550 | val = regmap_read(regmap, 0x4, &val); | 550 | regmap_read(regmap, 0x4, &val); |
551 | if (val == 0x12) { | 551 | if (val == 0x12) { |
552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; | 552 | slimbus_src.freq_tbl = clk_tbl_aif_osr_492; |
553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; | 553 | mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; |
@@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { | |||
574 | .remove = lcc_msm8960_remove, | 574 | .remove = lcc_msm8960_remove, |
575 | .driver = { | 575 | .driver = { |
576 | .name = "lcc-msm8960", | 576 | .name = "lcc-msm8960", |
577 | .owner = THIS_MODULE, | ||
578 | .of_match_table = lcc_msm8960_match_table, | 577 | .of_match_table = lcc_msm8960_match_table, |
579 | }, | 578 | }, |
580 | }; | 579 | }; |
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c | |||
@@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) | |||
84 | struct fapll_data *fd = to_fapll(hw); | 84 | struct fapll_data *fd = to_fapll(hw); |
85 | u32 v = readl_relaxed(fd->base); | 85 | u32 v = readl_relaxed(fd->base); |
86 | 86 | ||
87 | v |= (1 << FAPLL_MAIN_PLLEN); | 87 | v |= FAPLL_MAIN_PLLEN; |
88 | writel_relaxed(v, fd->base); | 88 | writel_relaxed(v, fd->base); |
89 | 89 | ||
90 | return 0; | 90 | return 0; |
@@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) | |||
95 | struct fapll_data *fd = to_fapll(hw); | 95 | struct fapll_data *fd = to_fapll(hw); |
96 | u32 v = readl_relaxed(fd->base); | 96 | u32 v = readl_relaxed(fd->base); |
97 | 97 | ||
98 | v &= ~(1 << FAPLL_MAIN_PLLEN); | 98 | v &= ~FAPLL_MAIN_PLLEN; |
99 | writel_relaxed(v, fd->base); | 99 | writel_relaxed(v, fd->base); |
100 | } | 100 | } |
101 | 101 | ||
@@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) | |||
104 | struct fapll_data *fd = to_fapll(hw); | 104 | struct fapll_data *fd = to_fapll(hw); |
105 | u32 v = readl_relaxed(fd->base); | 105 | u32 v = readl_relaxed(fd->base); |
106 | 106 | ||
107 | return v & (1 << FAPLL_MAIN_PLLEN); | 107 | return v & FAPLL_MAIN_PLLEN; |
108 | } | 108 | } |
109 | 109 | ||
110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, | 110 | static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 111849c4c8c2..d576a4dea64f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -43,9 +43,10 @@ | |||
43 | #include "drm_crtc_internal.h" | 43 | #include "drm_crtc_internal.h" |
44 | #include "drm_internal.h" | 44 | #include "drm_internal.h" |
45 | 45 | ||
46 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 46 | static struct drm_framebuffer * |
47 | struct drm_mode_fb_cmd2 *r, | 47 | internal_framebuffer_create(struct drm_device *dev, |
48 | struct drm_file *file_priv); | 48 | struct drm_mode_fb_cmd2 *r, |
49 | struct drm_file *file_priv); | ||
49 | 50 | ||
50 | /* Avoid boilerplate. I'm tired of typing. */ | 51 | /* Avoid boilerplate. I'm tired of typing. */ |
51 | #define DRM_ENUM_NAME_FN(fnname, list) \ | 52 | #define DRM_ENUM_NAME_FN(fnname, list) \ |
@@ -2943,13 +2944,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, | |||
2943 | */ | 2944 | */ |
2944 | if (req->flags & DRM_MODE_CURSOR_BO) { | 2945 | if (req->flags & DRM_MODE_CURSOR_BO) { |
2945 | if (req->handle) { | 2946 | if (req->handle) { |
2946 | fb = add_framebuffer_internal(dev, &fbreq, file_priv); | 2947 | fb = internal_framebuffer_create(dev, &fbreq, file_priv); |
2947 | if (IS_ERR(fb)) { | 2948 | if (IS_ERR(fb)) { |
2948 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); | 2949 | DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); |
2949 | return PTR_ERR(fb); | 2950 | return PTR_ERR(fb); |
2950 | } | 2951 | } |
2951 | |||
2952 | drm_framebuffer_reference(fb); | ||
2953 | } else { | 2952 | } else { |
2954 | fb = NULL; | 2953 | fb = NULL; |
2955 | } | 2954 | } |
@@ -3308,9 +3307,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) | |||
3308 | return 0; | 3307 | return 0; |
3309 | } | 3308 | } |
3310 | 3309 | ||
3311 | static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | 3310 | static struct drm_framebuffer * |
3312 | struct drm_mode_fb_cmd2 *r, | 3311 | internal_framebuffer_create(struct drm_device *dev, |
3313 | struct drm_file *file_priv) | 3312 | struct drm_mode_fb_cmd2 *r, |
3313 | struct drm_file *file_priv) | ||
3314 | { | 3314 | { |
3315 | struct drm_mode_config *config = &dev->mode_config; | 3315 | struct drm_mode_config *config = &dev->mode_config; |
3316 | struct drm_framebuffer *fb; | 3316 | struct drm_framebuffer *fb; |
@@ -3348,12 +3348,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3348 | return fb; | 3348 | return fb; |
3349 | } | 3349 | } |
3350 | 3350 | ||
3351 | mutex_lock(&file_priv->fbs_lock); | ||
3352 | r->fb_id = fb->base.id; | ||
3353 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3354 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3355 | mutex_unlock(&file_priv->fbs_lock); | ||
3356 | |||
3357 | return fb; | 3351 | return fb; |
3358 | } | 3352 | } |
3359 | 3353 | ||
@@ -3375,15 +3369,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, | |||
3375 | int drm_mode_addfb2(struct drm_device *dev, | 3369 | int drm_mode_addfb2(struct drm_device *dev, |
3376 | void *data, struct drm_file *file_priv) | 3370 | void *data, struct drm_file *file_priv) |
3377 | { | 3371 | { |
3372 | struct drm_mode_fb_cmd2 *r = data; | ||
3378 | struct drm_framebuffer *fb; | 3373 | struct drm_framebuffer *fb; |
3379 | 3374 | ||
3380 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 3375 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
3381 | return -EINVAL; | 3376 | return -EINVAL; |
3382 | 3377 | ||
3383 | fb = add_framebuffer_internal(dev, data, file_priv); | 3378 | fb = internal_framebuffer_create(dev, r, file_priv); |
3384 | if (IS_ERR(fb)) | 3379 | if (IS_ERR(fb)) |
3385 | return PTR_ERR(fb); | 3380 | return PTR_ERR(fb); |
3386 | 3381 | ||
3382 | /* Transfer ownership to the filp for reaping on close */ | ||
3383 | |||
3384 | DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); | ||
3385 | mutex_lock(&file_priv->fbs_lock); | ||
3386 | r->fb_id = fb->base.id; | ||
3387 | list_add(&fb->filp_head, &file_priv->fbs); | ||
3388 | mutex_unlock(&file_priv->fbs_lock); | ||
3389 | |||
3387 | return 0; | 3390 | return 0; |
3388 | } | 3391 | } |
3389 | 3392 | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 0d15e6e30732..132581ca4ad8 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, | |||
733 | struct drm_dp_sideband_msg_tx *txmsg) | 733 | struct drm_dp_sideband_msg_tx *txmsg) |
734 | { | 734 | { |
735 | bool ret; | 735 | bool ret; |
736 | mutex_lock(&mgr->qlock); | 736 | |
737 | /* | ||
738 | * All updates to txmsg->state are protected by mgr->qlock, and the two | ||
739 | * cases we check here are terminal states. For those the barriers | ||
740 | * provided by the wake_up/wait_event pair are enough. | ||
741 | */ | ||
737 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || | 742 | ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || |
738 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); | 743 | txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); |
739 | mutex_unlock(&mgr->qlock); | ||
740 | return ret; | 744 | return ret; |
741 | } | 745 | } |
742 | 746 | ||
@@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, | |||
1363 | return 0; | 1367 | return 0; |
1364 | } | 1368 | } |
1365 | 1369 | ||
1366 | /* must be called holding qlock */ | ||
1367 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) | 1370 | static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) |
1368 | { | 1371 | { |
1369 | struct drm_dp_sideband_msg_tx *txmsg; | 1372 | struct drm_dp_sideband_msg_tx *txmsg; |
1370 | int ret; | 1373 | int ret; |
1371 | 1374 | ||
1375 | WARN_ON(!mutex_is_locked(&mgr->qlock)); | ||
1376 | |||
1372 | /* construct a chunk from the first msg in the tx_msg queue */ | 1377 | /* construct a chunk from the first msg in the tx_msg queue */ |
1373 | if (list_empty(&mgr->tx_msg_downq)) { | 1378 | if (list_empty(&mgr->tx_msg_downq)) { |
1374 | mgr->tx_down_in_progress = false; | 1379 | mgr->tx_down_in_progress = false; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) | |||
403 | unsigned rem; | 403 | unsigned rem; |
404 | 404 | ||
405 | rem = do_div(tmp, alignment); | 405 | rem = do_div(tmp, alignment); |
406 | if (tmp) | 406 | if (rem) |
407 | start += alignment - rem; | 407 | start += alignment - rem; |
408 | } | 408 | } |
409 | 409 | ||
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 9a6da3536ae5..61ae8ff4eaed 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -836,8 +836,11 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj, | |||
836 | } | 836 | } |
837 | 837 | ||
838 | i = 0; | 838 | i = 0; |
839 | for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page) | 839 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { |
840 | pages[i++] = sg_page_iter_page(&sg_iter); | 840 | pages[i++] = sg_page_iter_page(&sg_iter); |
841 | if (i == npages) | ||
842 | break; | ||
843 | } | ||
841 | 844 | ||
842 | addr = vmap(pages, i, 0, PAGE_KERNEL); | 845 | addr = vmap(pages, i, 0, PAGE_KERNEL); |
843 | if (addr == NULL) { | 846 | if (addr == NULL) { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e38f45374d55..1a52d6ab0f80 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1090,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1090 | seq_printf(m, "Current P-state: %d\n", | 1090 | seq_printf(m, "Current P-state: %d\n", |
1091 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); | 1091 | (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); |
1092 | } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || | 1092 | } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || |
1093 | IS_BROADWELL(dev)) { | 1093 | IS_BROADWELL(dev) || IS_GEN9(dev)) { |
1094 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 1094 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
1095 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); | 1095 | u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); |
1096 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 1096 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
@@ -1109,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1109 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 1109 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
1110 | 1110 | ||
1111 | reqf = I915_READ(GEN6_RPNSWREQ); | 1111 | reqf = I915_READ(GEN6_RPNSWREQ); |
1112 | reqf &= ~GEN6_TURBO_DISABLE; | 1112 | if (IS_GEN9(dev)) |
1113 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 1113 | reqf >>= 23; |
1114 | reqf >>= 24; | 1114 | else { |
1115 | else | 1115 | reqf &= ~GEN6_TURBO_DISABLE; |
1116 | reqf >>= 25; | 1116 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1117 | reqf >>= 24; | ||
1118 | else | ||
1119 | reqf >>= 25; | ||
1120 | } | ||
1117 | reqf = intel_gpu_freq(dev_priv, reqf); | 1121 | reqf = intel_gpu_freq(dev_priv, reqf); |
1118 | 1122 | ||
1119 | rpmodectl = I915_READ(GEN6_RP_CONTROL); | 1123 | rpmodectl = I915_READ(GEN6_RP_CONTROL); |
@@ -1127,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1127 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); | 1131 | rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); |
1128 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); | 1132 | rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); |
1129 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); | 1133 | rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); |
1130 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 1134 | if (IS_GEN9(dev)) |
1135 | cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; | ||
1136 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | ||
1131 | cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; | 1137 | cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; |
1132 | else | 1138 | else |
1133 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | 1139 | cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; |
@@ -1153,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1153 | pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); | 1159 | pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); |
1154 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 1160 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
1155 | seq_printf(m, "Render p-state ratio: %d\n", | 1161 | seq_printf(m, "Render p-state ratio: %d\n", |
1156 | (gt_perf_status & 0xff00) >> 8); | 1162 | (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); |
1157 | seq_printf(m, "Render p-state VID: %d\n", | 1163 | seq_printf(m, "Render p-state VID: %d\n", |
1158 | gt_perf_status & 0xff); | 1164 | gt_perf_status & 0xff); |
1159 | seq_printf(m, "Render p-state limit: %d\n", | 1165 | seq_printf(m, "Render p-state limit: %d\n", |
@@ -1178,14 +1184,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1178 | GEN6_CURBSYTAVG_MASK); | 1184 | GEN6_CURBSYTAVG_MASK); |
1179 | 1185 | ||
1180 | max_freq = (rp_state_cap & 0xff0000) >> 16; | 1186 | max_freq = (rp_state_cap & 0xff0000) >> 16; |
1187 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); | ||
1181 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", | 1188 | seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", |
1182 | intel_gpu_freq(dev_priv, max_freq)); | 1189 | intel_gpu_freq(dev_priv, max_freq)); |
1183 | 1190 | ||
1184 | max_freq = (rp_state_cap & 0xff00) >> 8; | 1191 | max_freq = (rp_state_cap & 0xff00) >> 8; |
1192 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); | ||
1185 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", | 1193 | seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", |
1186 | intel_gpu_freq(dev_priv, max_freq)); | 1194 | intel_gpu_freq(dev_priv, max_freq)); |
1187 | 1195 | ||
1188 | max_freq = rp_state_cap & 0xff; | 1196 | max_freq = rp_state_cap & 0xff; |
1197 | max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); | ||
1189 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 1198 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
1190 | intel_gpu_freq(dev_priv, max_freq)); | 1199 | intel_gpu_freq(dev_priv, max_freq)); |
1191 | 1200 | ||
@@ -1831,18 +1840,6 @@ static int i915_context_status(struct seq_file *m, void *unused) | |||
1831 | if (ret) | 1840 | if (ret) |
1832 | return ret; | 1841 | return ret; |
1833 | 1842 | ||
1834 | if (dev_priv->ips.pwrctx) { | ||
1835 | seq_puts(m, "power context "); | ||
1836 | describe_obj(m, dev_priv->ips.pwrctx); | ||
1837 | seq_putc(m, '\n'); | ||
1838 | } | ||
1839 | |||
1840 | if (dev_priv->ips.renderctx) { | ||
1841 | seq_puts(m, "render context "); | ||
1842 | describe_obj(m, dev_priv->ips.renderctx); | ||
1843 | seq_putc(m, '\n'); | ||
1844 | } | ||
1845 | |||
1846 | list_for_each_entry(ctx, &dev_priv->context_list, link) { | 1843 | list_for_each_entry(ctx, &dev_priv->context_list, link) { |
1847 | if (!i915.enable_execlists && | 1844 | if (!i915.enable_execlists && |
1848 | ctx->legacy_hw_ctx.rcs_state == NULL) | 1845 | ctx->legacy_hw_ctx.rcs_state == NULL) |
@@ -2246,6 +2243,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2246 | enum pipe pipe; | 2243 | enum pipe pipe; |
2247 | bool enabled = false; | 2244 | bool enabled = false; |
2248 | 2245 | ||
2246 | if (!HAS_PSR(dev)) { | ||
2247 | seq_puts(m, "PSR not supported\n"); | ||
2248 | return 0; | ||
2249 | } | ||
2250 | |||
2249 | intel_runtime_pm_get(dev_priv); | 2251 | intel_runtime_pm_get(dev_priv); |
2250 | 2252 | ||
2251 | mutex_lock(&dev_priv->psr.lock); | 2253 | mutex_lock(&dev_priv->psr.lock); |
@@ -2258,17 +2260,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2258 | seq_printf(m, "Re-enable work scheduled: %s\n", | 2260 | seq_printf(m, "Re-enable work scheduled: %s\n", |
2259 | yesno(work_busy(&dev_priv->psr.work.work))); | 2261 | yesno(work_busy(&dev_priv->psr.work.work))); |
2260 | 2262 | ||
2261 | if (HAS_PSR(dev)) { | 2263 | if (HAS_DDI(dev)) |
2262 | if (HAS_DDI(dev)) | 2264 | enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; |
2263 | enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; | 2265 | else { |
2264 | else { | 2266 | for_each_pipe(dev_priv, pipe) { |
2265 | for_each_pipe(dev_priv, pipe) { | 2267 | stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & |
2266 | stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & | 2268 | VLV_EDP_PSR_CURR_STATE_MASK; |
2267 | VLV_EDP_PSR_CURR_STATE_MASK; | 2269 | if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || |
2268 | if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || | 2270 | (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) |
2269 | (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) | 2271 | enabled = true; |
2270 | enabled = true; | ||
2271 | } | ||
2272 | } | 2272 | } |
2273 | } | 2273 | } |
2274 | seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); | 2274 | seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); |
@@ -2285,7 +2285,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2285 | yesno((bool)dev_priv->psr.link_standby)); | 2285 | yesno((bool)dev_priv->psr.link_standby)); |
2286 | 2286 | ||
2287 | /* CHV PSR has no kind of performance counter */ | 2287 | /* CHV PSR has no kind of performance counter */ |
2288 | if (HAS_PSR(dev) && HAS_DDI(dev)) { | 2288 | if (HAS_DDI(dev)) { |
2289 | psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & | 2289 | psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & |
2290 | EDP_PSR_PERF_CNT_MASK; | 2290 | EDP_PSR_PERF_CNT_MASK; |
2291 | 2291 | ||
@@ -2308,8 +2308,7 @@ static int i915_sink_crc(struct seq_file *m, void *data) | |||
2308 | u8 crc[6]; | 2308 | u8 crc[6]; |
2309 | 2309 | ||
2310 | drm_modeset_lock_all(dev); | 2310 | drm_modeset_lock_all(dev); |
2311 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 2311 | for_each_intel_encoder(dev, connector) { |
2312 | base.head) { | ||
2313 | 2312 | ||
2314 | if (connector->base.dpms != DRM_MODE_DPMS_ON) | 2313 | if (connector->base.dpms != DRM_MODE_DPMS_ON) |
2315 | continue; | 2314 | continue; |
@@ -2677,7 +2676,8 @@ static int i915_display_info(struct seq_file *m, void *unused) | |||
2677 | active = cursor_position(dev, crtc->pipe, &x, &y); | 2676 | active = cursor_position(dev, crtc->pipe, &x, &y); |
2678 | seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", | 2677 | seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", |
2679 | yesno(crtc->cursor_base), | 2678 | yesno(crtc->cursor_base), |
2680 | x, y, crtc->cursor_width, crtc->cursor_height, | 2679 | x, y, crtc->base.cursor->state->crtc_w, |
2680 | crtc->base.cursor->state->crtc_h, | ||
2681 | crtc->cursor_addr, yesno(active)); | 2681 | crtc->cursor_addr, yesno(active)); |
2682 | } | 2682 | } |
2683 | 2683 | ||
@@ -2853,7 +2853,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused) | |||
2853 | for_each_pipe(dev_priv, pipe) { | 2853 | for_each_pipe(dev_priv, pipe) { |
2854 | seq_printf(m, "Pipe %c\n", pipe_name(pipe)); | 2854 | seq_printf(m, "Pipe %c\n", pipe_name(pipe)); |
2855 | 2855 | ||
2856 | for_each_plane(pipe, plane) { | 2856 | for_each_plane(dev_priv, pipe, plane) { |
2857 | entry = &ddb->plane[pipe][plane]; | 2857 | entry = &ddb->plane[pipe][plane]; |
2858 | seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, | 2858 | seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, |
2859 | entry->start, entry->end, | 2859 | entry->start, entry->end, |
@@ -2870,6 +2870,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused) | |||
2870 | return 0; | 2870 | return 0; |
2871 | } | 2871 | } |
2872 | 2872 | ||
2873 | static void drrs_status_per_crtc(struct seq_file *m, | ||
2874 | struct drm_device *dev, struct intel_crtc *intel_crtc) | ||
2875 | { | ||
2876 | struct intel_encoder *intel_encoder; | ||
2877 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2878 | struct i915_drrs *drrs = &dev_priv->drrs; | ||
2879 | int vrefresh = 0; | ||
2880 | |||
2881 | for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { | ||
2882 | /* Encoder connected on this CRTC */ | ||
2883 | switch (intel_encoder->type) { | ||
2884 | case INTEL_OUTPUT_EDP: | ||
2885 | seq_puts(m, "eDP:\n"); | ||
2886 | break; | ||
2887 | case INTEL_OUTPUT_DSI: | ||
2888 | seq_puts(m, "DSI:\n"); | ||
2889 | break; | ||
2890 | case INTEL_OUTPUT_HDMI: | ||
2891 | seq_puts(m, "HDMI:\n"); | ||
2892 | break; | ||
2893 | case INTEL_OUTPUT_DISPLAYPORT: | ||
2894 | seq_puts(m, "DP:\n"); | ||
2895 | break; | ||
2896 | default: | ||
2897 | seq_printf(m, "Other encoder (id=%d).\n", | ||
2898 | intel_encoder->type); | ||
2899 | return; | ||
2900 | } | ||
2901 | } | ||
2902 | |||
2903 | if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) | ||
2904 | seq_puts(m, "\tVBT: DRRS_type: Static"); | ||
2905 | else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) | ||
2906 | seq_puts(m, "\tVBT: DRRS_type: Seamless"); | ||
2907 | else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) | ||
2908 | seq_puts(m, "\tVBT: DRRS_type: None"); | ||
2909 | else | ||
2910 | seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); | ||
2911 | |||
2912 | seq_puts(m, "\n\n"); | ||
2913 | |||
2914 | if (intel_crtc->config->has_drrs) { | ||
2915 | struct intel_panel *panel; | ||
2916 | |||
2917 | mutex_lock(&drrs->mutex); | ||
2918 | /* DRRS Supported */ | ||
2919 | seq_puts(m, "\tDRRS Supported: Yes\n"); | ||
2920 | |||
2921 | /* disable_drrs() will make drrs->dp NULL */ | ||
2922 | if (!drrs->dp) { | ||
2923 | seq_puts(m, "Idleness DRRS: Disabled"); | ||
2924 | mutex_unlock(&drrs->mutex); | ||
2925 | return; | ||
2926 | } | ||
2927 | |||
2928 | panel = &drrs->dp->attached_connector->panel; | ||
2929 | seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", | ||
2930 | drrs->busy_frontbuffer_bits); | ||
2931 | |||
2932 | seq_puts(m, "\n\t\t"); | ||
2933 | if (drrs->refresh_rate_type == DRRS_HIGH_RR) { | ||
2934 | seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); | ||
2935 | vrefresh = panel->fixed_mode->vrefresh; | ||
2936 | } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { | ||
2937 | seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); | ||
2938 | vrefresh = panel->downclock_mode->vrefresh; | ||
2939 | } else { | ||
2940 | seq_printf(m, "DRRS_State: Unknown(%d)\n", | ||
2941 | drrs->refresh_rate_type); | ||
2942 | mutex_unlock(&drrs->mutex); | ||
2943 | return; | ||
2944 | } | ||
2945 | seq_printf(m, "\t\tVrefresh: %d", vrefresh); | ||
2946 | |||
2947 | seq_puts(m, "\n\t\t"); | ||
2948 | mutex_unlock(&drrs->mutex); | ||
2949 | } else { | ||
2950 | /* DRRS not supported. Print the VBT parameter*/ | ||
2951 | seq_puts(m, "\tDRRS Supported : No"); | ||
2952 | } | ||
2953 | seq_puts(m, "\n"); | ||
2954 | } | ||
2955 | |||
2956 | static int i915_drrs_status(struct seq_file *m, void *unused) | ||
2957 | { | ||
2958 | struct drm_info_node *node = m->private; | ||
2959 | struct drm_device *dev = node->minor->dev; | ||
2960 | struct intel_crtc *intel_crtc; | ||
2961 | int active_crtc_cnt = 0; | ||
2962 | |||
2963 | for_each_intel_crtc(dev, intel_crtc) { | ||
2964 | drm_modeset_lock(&intel_crtc->base.mutex, NULL); | ||
2965 | |||
2966 | if (intel_crtc->active) { | ||
2967 | active_crtc_cnt++; | ||
2968 | seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); | ||
2969 | |||
2970 | drrs_status_per_crtc(m, dev, intel_crtc); | ||
2971 | } | ||
2972 | |||
2973 | drm_modeset_unlock(&intel_crtc->base.mutex); | ||
2974 | } | ||
2975 | |||
2976 | if (!active_crtc_cnt) | ||
2977 | seq_puts(m, "No active crtc found\n"); | ||
2978 | |||
2979 | return 0; | ||
2980 | } | ||
2981 | |||
2873 | struct pipe_crc_info { | 2982 | struct pipe_crc_info { |
2874 | const char *name; | 2983 | const char *name; |
2875 | struct drm_device *dev; | 2984 | struct drm_device *dev; |
@@ -4362,7 +4471,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) | |||
4362 | struct drm_i915_private *dev_priv = dev->dev_private; | 4471 | struct drm_i915_private *dev_priv = dev->dev_private; |
4363 | unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0; | 4472 | unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0; |
4364 | 4473 | ||
4365 | if (INTEL_INFO(dev)->gen < 9) | 4474 | if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) |
4366 | return -ENODEV; | 4475 | return -ENODEV; |
4367 | 4476 | ||
4368 | seq_puts(m, "SSEU Device Info\n"); | 4477 | seq_puts(m, "SSEU Device Info\n"); |
@@ -4384,7 +4493,34 @@ static int i915_sseu_status(struct seq_file *m, void *unused) | |||
4384 | yesno(INTEL_INFO(dev)->has_eu_pg)); | 4493 | yesno(INTEL_INFO(dev)->has_eu_pg)); |
4385 | 4494 | ||
4386 | seq_puts(m, "SSEU Device Status\n"); | 4495 | seq_puts(m, "SSEU Device Status\n"); |
4387 | if (IS_SKYLAKE(dev)) { | 4496 | if (IS_CHERRYVIEW(dev)) { |
4497 | const int ss_max = 2; | ||
4498 | int ss; | ||
4499 | u32 sig1[ss_max], sig2[ss_max]; | ||
4500 | |||
4501 | sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); | ||
4502 | sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); | ||
4503 | sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); | ||
4504 | sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); | ||
4505 | |||
4506 | for (ss = 0; ss < ss_max; ss++) { | ||
4507 | unsigned int eu_cnt; | ||
4508 | |||
4509 | if (sig1[ss] & CHV_SS_PG_ENABLE) | ||
4510 | /* skip disabled subslice */ | ||
4511 | continue; | ||
4512 | |||
4513 | s_tot = 1; | ||
4514 | ss_per++; | ||
4515 | eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + | ||
4516 | ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + | ||
4517 | ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + | ||
4518 | ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); | ||
4519 | eu_tot += eu_cnt; | ||
4520 | eu_per = max(eu_per, eu_cnt); | ||
4521 | } | ||
4522 | ss_tot = ss_per; | ||
4523 | } else if (IS_SKYLAKE(dev)) { | ||
4388 | const int s_max = 3, ss_max = 4; | 4524 | const int s_max = 3, ss_max = 4; |
4389 | int s, ss; | 4525 | int s, ss; |
4390 | u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; | 4526 | u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; |
@@ -4548,6 +4684,7 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
4548 | {"i915_wa_registers", i915_wa_registers, 0}, | 4684 | {"i915_wa_registers", i915_wa_registers, 0}, |
4549 | {"i915_ddb_info", i915_ddb_info, 0}, | 4685 | {"i915_ddb_info", i915_ddb_info, 0}, |
4550 | {"i915_sseu_status", i915_sseu_status, 0}, | 4686 | {"i915_sseu_status", i915_sseu_status, 0}, |
4687 | {"i915_drrs_status", i915_drrs_status, 0}, | ||
4551 | }; | 4688 | }; |
4552 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 4689 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
4553 | 4690 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 053e1788f578..d49ed68f041e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -68,6 +68,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
68 | case I915_PARAM_CHIPSET_ID: | 68 | case I915_PARAM_CHIPSET_ID: |
69 | value = dev->pdev->device; | 69 | value = dev->pdev->device; |
70 | break; | 70 | break; |
71 | case I915_PARAM_REVISION: | ||
72 | value = dev->pdev->revision; | ||
73 | break; | ||
71 | case I915_PARAM_HAS_GEM: | 74 | case I915_PARAM_HAS_GEM: |
72 | value = 1; | 75 | value = 1; |
73 | break; | 76 | break; |
@@ -150,6 +153,16 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
150 | case I915_PARAM_MMAP_VERSION: | 153 | case I915_PARAM_MMAP_VERSION: |
151 | value = 1; | 154 | value = 1; |
152 | break; | 155 | break; |
156 | case I915_PARAM_SUBSLICE_TOTAL: | ||
157 | value = INTEL_INFO(dev)->subslice_total; | ||
158 | if (!value) | ||
159 | return -ENODEV; | ||
160 | break; | ||
161 | case I915_PARAM_EU_TOTAL: | ||
162 | value = INTEL_INFO(dev)->eu_total; | ||
163 | if (!value) | ||
164 | return -ENODEV; | ||
165 | break; | ||
153 | default: | 166 | default: |
154 | DRM_DEBUG("Unknown parameter %d\n", param->param); | 167 | DRM_DEBUG("Unknown parameter %d\n", param->param); |
155 | return -EINVAL; | 168 | return -EINVAL; |
@@ -608,14 +621,42 @@ static void intel_device_info_runtime_init(struct drm_device *dev) | |||
608 | 621 | ||
609 | /* Initialize slice/subslice/EU info */ | 622 | /* Initialize slice/subslice/EU info */ |
610 | if (IS_CHERRYVIEW(dev)) { | 623 | if (IS_CHERRYVIEW(dev)) { |
611 | u32 fuse, mask_eu; | 624 | u32 fuse, eu_dis; |
612 | 625 | ||
613 | fuse = I915_READ(CHV_FUSE_GT); | 626 | fuse = I915_READ(CHV_FUSE_GT); |
614 | mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | | 627 | |
615 | CHV_FGT_EU_DIS_SS0_R1_MASK | | 628 | info->slice_total = 1; |
616 | CHV_FGT_EU_DIS_SS1_R0_MASK | | 629 | |
617 | CHV_FGT_EU_DIS_SS1_R1_MASK); | 630 | if (!(fuse & CHV_FGT_DISABLE_SS0)) { |
618 | info->eu_total = 16 - hweight32(mask_eu); | 631 | info->subslice_per_slice++; |
632 | eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | | ||
633 | CHV_FGT_EU_DIS_SS0_R1_MASK); | ||
634 | info->eu_total += 8 - hweight32(eu_dis); | ||
635 | } | ||
636 | |||
637 | if (!(fuse & CHV_FGT_DISABLE_SS1)) { | ||
638 | info->subslice_per_slice++; | ||
639 | eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | | ||
640 | CHV_FGT_EU_DIS_SS1_R1_MASK); | ||
641 | info->eu_total += 8 - hweight32(eu_dis); | ||
642 | } | ||
643 | |||
644 | info->subslice_total = info->subslice_per_slice; | ||
645 | /* | ||
646 | * CHV expected to always have a uniform distribution of EU | ||
647 | * across subslices. | ||
648 | */ | ||
649 | info->eu_per_subslice = info->subslice_total ? | ||
650 | info->eu_total / info->subslice_total : | ||
651 | 0; | ||
652 | /* | ||
653 | * CHV supports subslice power gating on devices with more than | ||
654 | * one subslice, and supports EU power gating on devices with | ||
655 | * more than one EU pair per subslice. | ||
656 | */ | ||
657 | info->has_slice_pg = 0; | ||
658 | info->has_subslice_pg = (info->subslice_total > 1); | ||
659 | info->has_eu_pg = (info->eu_per_subslice > 2); | ||
619 | } else if (IS_SKYLAKE(dev)) { | 660 | } else if (IS_SKYLAKE(dev)) { |
620 | const int s_max = 3, ss_max = 4, eu_max = 8; | 661 | const int s_max = 3, ss_max = 4, eu_max = 8; |
621 | int s, ss; | 662 | int s, ss; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0001642c38b4..82f8be4b6745 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -346,7 +346,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = { | |||
346 | }; | 346 | }; |
347 | 347 | ||
348 | static const struct intel_device_info intel_cherryview_info = { | 348 | static const struct intel_device_info intel_cherryview_info = { |
349 | .is_preliminary = 1, | ||
350 | .gen = 8, .num_pipes = 3, | 349 | .gen = 8, .num_pipes = 3, |
351 | .need_gfx_hws = 1, .has_hotplug = 1, | 350 | .need_gfx_hws = 1, .has_hotplug = 1, |
352 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, | 351 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
@@ -882,12 +881,6 @@ int i915_reset(struct drm_device *dev) | |||
882 | } | 881 | } |
883 | 882 | ||
884 | /* | 883 | /* |
885 | * FIXME: This races pretty badly against concurrent holders of | ||
886 | * ring interrupts. This is possible since we've started to drop | ||
887 | * dev->struct_mutex in select places when waiting for the gpu. | ||
888 | */ | ||
889 | |||
890 | /* | ||
891 | * rps/rc6 re-init is necessary to restore state lost after the | 884 | * rps/rc6 re-init is necessary to restore state lost after the |
892 | * reset and the re-install of gt irqs. Skip for ironlake per | 885 | * reset and the re-install of gt irqs. Skip for ironlake per |
893 | * previous concerns that it doesn't respond well to some forms | 886 | * previous concerns that it doesn't respond well to some forms |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ee5bc43dfc0b..8ba7e1b7b733 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -56,7 +56,7 @@ | |||
56 | 56 | ||
57 | #define DRIVER_NAME "i915" | 57 | #define DRIVER_NAME "i915" |
58 | #define DRIVER_DESC "Intel Graphics" | 58 | #define DRIVER_DESC "Intel Graphics" |
59 | #define DRIVER_DATE "20150227" | 59 | #define DRIVER_DATE "20150313" |
60 | 60 | ||
61 | #undef WARN_ON | 61 | #undef WARN_ON |
62 | /* Many gcc seem to no see through this and fall over :( */ | 62 | /* Many gcc seem to no see through this and fall over :( */ |
@@ -70,6 +70,9 @@ | |||
70 | #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") | 70 | #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #undef WARN_ON_ONCE | ||
74 | #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") | ||
75 | |||
73 | #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ | 76 | #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ |
74 | (long) (x), __func__); | 77 | (long) (x), __func__); |
75 | 78 | ||
@@ -223,9 +226,14 @@ enum hpd_pin { | |||
223 | 226 | ||
224 | #define for_each_pipe(__dev_priv, __p) \ | 227 | #define for_each_pipe(__dev_priv, __p) \ |
225 | for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) | 228 | for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) |
226 | #define for_each_plane(pipe, p) \ | 229 | #define for_each_plane(__dev_priv, __pipe, __p) \ |
227 | for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) | 230 | for ((__p) = 0; \ |
228 | #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) | 231 | (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ |
232 | (__p)++) | ||
233 | #define for_each_sprite(__dev_priv, __p, __s) \ | ||
234 | for ((__s) = 0; \ | ||
235 | (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ | ||
236 | (__s)++) | ||
229 | 237 | ||
230 | #define for_each_crtc(dev, crtc) \ | 238 | #define for_each_crtc(dev, crtc) \ |
231 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 239 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
@@ -238,6 +246,12 @@ enum hpd_pin { | |||
238 | &(dev)->mode_config.encoder_list, \ | 246 | &(dev)->mode_config.encoder_list, \ |
239 | base.head) | 247 | base.head) |
240 | 248 | ||
249 | #define for_each_intel_connector(dev, intel_connector) \ | ||
250 | list_for_each_entry(intel_connector, \ | ||
251 | &dev->mode_config.connector_list, \ | ||
252 | base.head) | ||
253 | |||
254 | |||
241 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ | 255 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
242 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ | 256 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
243 | if ((intel_encoder)->base.crtc == (__crtc)) | 257 | if ((intel_encoder)->base.crtc == (__crtc)) |
@@ -783,10 +797,19 @@ struct intel_context { | |||
783 | struct list_head link; | 797 | struct list_head link; |
784 | }; | 798 | }; |
785 | 799 | ||
800 | enum fb_op_origin { | ||
801 | ORIGIN_GTT, | ||
802 | ORIGIN_CPU, | ||
803 | ORIGIN_CS, | ||
804 | ORIGIN_FLIP, | ||
805 | }; | ||
806 | |||
786 | struct i915_fbc { | 807 | struct i915_fbc { |
787 | unsigned long uncompressed_size; | 808 | unsigned long uncompressed_size; |
788 | unsigned threshold; | 809 | unsigned threshold; |
789 | unsigned int fb_id; | 810 | unsigned int fb_id; |
811 | unsigned int possible_framebuffer_bits; | ||
812 | unsigned int busy_bits; | ||
790 | struct intel_crtc *crtc; | 813 | struct intel_crtc *crtc; |
791 | int y; | 814 | int y; |
792 | 815 | ||
@@ -799,14 +822,6 @@ struct i915_fbc { | |||
799 | * possible. */ | 822 | * possible. */ |
800 | bool enabled; | 823 | bool enabled; |
801 | 824 | ||
802 | /* On gen8 some rings cannont perform fbc clean operation so for now | ||
803 | * we are doing this on SW with mmio. | ||
804 | * This variable works in the opposite information direction | ||
805 | * of ring->fbc_dirty telling software on frontbuffer tracking | ||
806 | * to perform the cache clean on sw side. | ||
807 | */ | ||
808 | bool need_sw_cache_clean; | ||
809 | |||
810 | struct intel_fbc_work { | 825 | struct intel_fbc_work { |
811 | struct delayed_work work; | 826 | struct delayed_work work; |
812 | struct drm_crtc *crtc; | 827 | struct drm_crtc *crtc; |
@@ -1053,9 +1068,6 @@ struct intel_ilk_power_mgmt { | |||
1053 | 1068 | ||
1054 | int c_m; | 1069 | int c_m; |
1055 | int r_t; | 1070 | int r_t; |
1056 | |||
1057 | struct drm_i915_gem_object *pwrctx; | ||
1058 | struct drm_i915_gem_object *renderctx; | ||
1059 | }; | 1071 | }; |
1060 | 1072 | ||
1061 | struct drm_i915_private; | 1073 | struct drm_i915_private; |
@@ -1398,6 +1410,25 @@ struct ilk_wm_values { | |||
1398 | enum intel_ddb_partitioning partitioning; | 1410 | enum intel_ddb_partitioning partitioning; |
1399 | }; | 1411 | }; |
1400 | 1412 | ||
1413 | struct vlv_wm_values { | ||
1414 | struct { | ||
1415 | uint16_t primary; | ||
1416 | uint16_t sprite[2]; | ||
1417 | uint8_t cursor; | ||
1418 | } pipe[3]; | ||
1419 | |||
1420 | struct { | ||
1421 | uint16_t plane; | ||
1422 | uint8_t cursor; | ||
1423 | } sr; | ||
1424 | |||
1425 | struct { | ||
1426 | uint8_t cursor; | ||
1427 | uint8_t sprite[2]; | ||
1428 | uint8_t primary; | ||
1429 | } ddl[3]; | ||
1430 | }; | ||
1431 | |||
1401 | struct skl_ddb_entry { | 1432 | struct skl_ddb_entry { |
1402 | uint16_t start, end; /* in number of blocks, 'end' is exclusive */ | 1433 | uint16_t start, end; /* in number of blocks, 'end' is exclusive */ |
1403 | }; | 1434 | }; |
@@ -1760,6 +1791,7 @@ struct drm_i915_private { | |||
1760 | union { | 1791 | union { |
1761 | struct ilk_wm_values hw; | 1792 | struct ilk_wm_values hw; |
1762 | struct skl_wm_values skl_hw; | 1793 | struct skl_wm_values skl_hw; |
1794 | struct vlv_wm_values vlv; | ||
1763 | }; | 1795 | }; |
1764 | } wm; | 1796 | } wm; |
1765 | 1797 | ||
@@ -2396,6 +2428,7 @@ struct drm_i915_cmd_table { | |||
2396 | #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) | 2428 | #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) |
2397 | 2429 | ||
2398 | #define GT_FREQUENCY_MULTIPLIER 50 | 2430 | #define GT_FREQUENCY_MULTIPLIER 50 |
2431 | #define GEN9_FREQ_SCALER 3 | ||
2399 | 2432 | ||
2400 | #include "i915_trace.h" | 2433 | #include "i915_trace.h" |
2401 | 2434 | ||
@@ -2433,7 +2466,7 @@ struct i915_params { | |||
2433 | bool disable_display; | 2466 | bool disable_display; |
2434 | bool disable_vtd_wa; | 2467 | bool disable_vtd_wa; |
2435 | int use_mmio_flip; | 2468 | int use_mmio_flip; |
2436 | bool mmio_debug; | 2469 | int mmio_debug; |
2437 | bool verbose_state_checks; | 2470 | bool verbose_state_checks; |
2438 | bool nuclear_pageflip; | 2471 | bool nuclear_pageflip; |
2439 | }; | 2472 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0107c2ae77d0..0fe313d0f609 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -351,7 +351,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
351 | struct drm_device *dev = obj->base.dev; | 351 | struct drm_device *dev = obj->base.dev; |
352 | void *vaddr = obj->phys_handle->vaddr + args->offset; | 352 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
353 | char __user *user_data = to_user_ptr(args->data_ptr); | 353 | char __user *user_data = to_user_ptr(args->data_ptr); |
354 | int ret; | 354 | int ret = 0; |
355 | 355 | ||
356 | /* We manually control the domain here and pretend that it | 356 | /* We manually control the domain here and pretend that it |
357 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | 357 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
@@ -360,6 +360,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
360 | if (ret) | 360 | if (ret) |
361 | return ret; | 361 | return ret; |
362 | 362 | ||
363 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); | ||
363 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 364 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
364 | unsigned long unwritten; | 365 | unsigned long unwritten; |
365 | 366 | ||
@@ -370,13 +371,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
370 | mutex_unlock(&dev->struct_mutex); | 371 | mutex_unlock(&dev->struct_mutex); |
371 | unwritten = copy_from_user(vaddr, user_data, args->size); | 372 | unwritten = copy_from_user(vaddr, user_data, args->size); |
372 | mutex_lock(&dev->struct_mutex); | 373 | mutex_lock(&dev->struct_mutex); |
373 | if (unwritten) | 374 | if (unwritten) { |
374 | return -EFAULT; | 375 | ret = -EFAULT; |
376 | goto out; | ||
377 | } | ||
375 | } | 378 | } |
376 | 379 | ||
377 | drm_clflush_virt_range(vaddr, args->size); | 380 | drm_clflush_virt_range(vaddr, args->size); |
378 | i915_gem_chipset_flush(dev); | 381 | i915_gem_chipset_flush(dev); |
379 | return 0; | 382 | |
383 | out: | ||
384 | intel_fb_obj_flush(obj, false); | ||
385 | return ret; | ||
380 | } | 386 | } |
381 | 387 | ||
382 | void *i915_gem_object_alloc(struct drm_device *dev) | 388 | void *i915_gem_object_alloc(struct drm_device *dev) |
@@ -810,6 +816,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
810 | 816 | ||
811 | offset = i915_gem_obj_ggtt_offset(obj) + args->offset; | 817 | offset = i915_gem_obj_ggtt_offset(obj) + args->offset; |
812 | 818 | ||
819 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); | ||
820 | |||
813 | while (remain > 0) { | 821 | while (remain > 0) { |
814 | /* Operation in this page | 822 | /* Operation in this page |
815 | * | 823 | * |
@@ -830,7 +838,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
830 | if (fast_user_write(dev_priv->gtt.mappable, page_base, | 838 | if (fast_user_write(dev_priv->gtt.mappable, page_base, |
831 | page_offset, user_data, page_length)) { | 839 | page_offset, user_data, page_length)) { |
832 | ret = -EFAULT; | 840 | ret = -EFAULT; |
833 | goto out_unpin; | 841 | goto out_flush; |
834 | } | 842 | } |
835 | 843 | ||
836 | remain -= page_length; | 844 | remain -= page_length; |
@@ -838,6 +846,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, | |||
838 | offset += page_length; | 846 | offset += page_length; |
839 | } | 847 | } |
840 | 848 | ||
849 | out_flush: | ||
850 | intel_fb_obj_flush(obj, false); | ||
841 | out_unpin: | 851 | out_unpin: |
842 | i915_gem_object_ggtt_unpin(obj); | 852 | i915_gem_object_ggtt_unpin(obj); |
843 | out: | 853 | out: |
@@ -952,6 +962,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev, | |||
952 | if (ret) | 962 | if (ret) |
953 | return ret; | 963 | return ret; |
954 | 964 | ||
965 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); | ||
966 | |||
955 | i915_gem_object_pin_pages(obj); | 967 | i915_gem_object_pin_pages(obj); |
956 | 968 | ||
957 | offset = args->offset; | 969 | offset = args->offset; |
@@ -1030,6 +1042,7 @@ out: | |||
1030 | if (needs_clflush_after) | 1042 | if (needs_clflush_after) |
1031 | i915_gem_chipset_flush(dev); | 1043 | i915_gem_chipset_flush(dev); |
1032 | 1044 | ||
1045 | intel_fb_obj_flush(obj, false); | ||
1033 | return ret; | 1046 | return ret; |
1034 | } | 1047 | } |
1035 | 1048 | ||
@@ -2929,9 +2942,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2929 | req = obj->last_read_req; | 2942 | req = obj->last_read_req; |
2930 | 2943 | ||
2931 | /* Do this after OLR check to make sure we make forward progress polling | 2944 | /* Do this after OLR check to make sure we make forward progress polling |
2932 | * on this IOCTL with a timeout <=0 (like busy ioctl) | 2945 | * on this IOCTL with a timeout == 0 (like busy ioctl) |
2933 | */ | 2946 | */ |
2934 | if (args->timeout_ns <= 0) { | 2947 | if (args->timeout_ns == 0) { |
2935 | ret = -ETIME; | 2948 | ret = -ETIME; |
2936 | goto out; | 2949 | goto out; |
2937 | } | 2950 | } |
@@ -2941,7 +2954,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2941 | i915_gem_request_reference(req); | 2954 | i915_gem_request_reference(req); |
2942 | mutex_unlock(&dev->struct_mutex); | 2955 | mutex_unlock(&dev->struct_mutex); |
2943 | 2956 | ||
2944 | ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, | 2957 | ret = __i915_wait_request(req, reset_counter, true, |
2958 | args->timeout_ns > 0 ? &args->timeout_ns : NULL, | ||
2945 | file->driver_priv); | 2959 | file->driver_priv); |
2946 | mutex_lock(&dev->struct_mutex); | 2960 | mutex_lock(&dev->struct_mutex); |
2947 | i915_gem_request_unreference(req); | 2961 | i915_gem_request_unreference(req); |
@@ -3756,7 +3770,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
3756 | } | 3770 | } |
3757 | 3771 | ||
3758 | if (write) | 3772 | if (write) |
3759 | intel_fb_obj_invalidate(obj, NULL); | 3773 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); |
3760 | 3774 | ||
3761 | trace_i915_gem_object_change_domain(obj, | 3775 | trace_i915_gem_object_change_domain(obj, |
3762 | old_read_domains, | 3776 | old_read_domains, |
@@ -4071,7 +4085,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) | |||
4071 | } | 4085 | } |
4072 | 4086 | ||
4073 | if (write) | 4087 | if (write) |
4074 | intel_fb_obj_invalidate(obj, NULL); | 4088 | intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); |
4075 | 4089 | ||
4076 | trace_i915_gem_object_change_domain(obj, | 4090 | trace_i915_gem_object_change_domain(obj, |
4077 | old_read_domains, | 4091 | old_read_domains, |
@@ -4781,6 +4795,9 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4781 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4795 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
4782 | return -EIO; | 4796 | return -EIO; |
4783 | 4797 | ||
4798 | /* Double layer security blanket, see i915_gem_init() */ | ||
4799 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4800 | |||
4784 | if (dev_priv->ellc_size) | 4801 | if (dev_priv->ellc_size) |
4785 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); | 4802 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4786 | 4803 | ||
@@ -4813,7 +4830,7 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4813 | for_each_ring(ring, dev_priv, i) { | 4830 | for_each_ring(ring, dev_priv, i) { |
4814 | ret = ring->init_hw(ring); | 4831 | ret = ring->init_hw(ring); |
4815 | if (ret) | 4832 | if (ret) |
4816 | return ret; | 4833 | goto out; |
4817 | } | 4834 | } |
4818 | 4835 | ||
4819 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4836 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
@@ -4830,9 +4847,11 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4830 | DRM_ERROR("Context enable failed %d\n", ret); | 4847 | DRM_ERROR("Context enable failed %d\n", ret); |
4831 | i915_gem_cleanup_ringbuffer(dev); | 4848 | i915_gem_cleanup_ringbuffer(dev); |
4832 | 4849 | ||
4833 | return ret; | 4850 | goto out; |
4834 | } | 4851 | } |
4835 | 4852 | ||
4853 | out: | ||
4854 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4836 | return ret; | 4855 | return ret; |
4837 | } | 4856 | } |
4838 | 4857 | ||
@@ -4866,6 +4885,14 @@ int i915_gem_init(struct drm_device *dev) | |||
4866 | dev_priv->gt.stop_ring = intel_logical_ring_stop; | 4885 | dev_priv->gt.stop_ring = intel_logical_ring_stop; |
4867 | } | 4886 | } |
4868 | 4887 | ||
4888 | /* This is just a security blanket to placate dragons. | ||
4889 | * On some systems, we very sporadically observe that the first TLBs | ||
4890 | * used by the CS may be stale, despite us poking the TLB reset. If | ||
4891 | * we hold the forcewake during initialisation these problems | ||
4892 | * just magically go away. | ||
4893 | */ | ||
4894 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | ||
4895 | |||
4869 | ret = i915_gem_init_userptr(dev); | 4896 | ret = i915_gem_init_userptr(dev); |
4870 | if (ret) | 4897 | if (ret) |
4871 | goto out_unlock; | 4898 | goto out_unlock; |
@@ -4892,6 +4919,7 @@ int i915_gem_init(struct drm_device *dev) | |||
4892 | } | 4919 | } |
4893 | 4920 | ||
4894 | out_unlock: | 4921 | out_unlock: |
4922 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | ||
4895 | mutex_unlock(&dev->struct_mutex); | 4923 | mutex_unlock(&dev->struct_mutex); |
4896 | 4924 | ||
4897 | return ret; | 4925 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 85a6adaba258..dc10bc43864e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -971,7 +971,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
971 | obj->dirty = 1; | 971 | obj->dirty = 1; |
972 | i915_gem_request_assign(&obj->last_write_req, req); | 972 | i915_gem_request_assign(&obj->last_write_req, req); |
973 | 973 | ||
974 | intel_fb_obj_invalidate(obj, ring); | 974 | intel_fb_obj_invalidate(obj, ring, ORIGIN_CS); |
975 | 975 | ||
976 | /* update for the implicit flush after a batch */ | 976 | /* update for the implicit flush after a batch */ |
977 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 977 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
@@ -1518,7 +1518,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1518 | * - The batch is already pinned into the relevant ppgtt, so we | 1518 | * - The batch is already pinned into the relevant ppgtt, so we |
1519 | * already have the backing storage fully allocated. | 1519 | * already have the backing storage fully allocated. |
1520 | * - No other BO uses the global gtt (well contexts, but meh), | 1520 | * - No other BO uses the global gtt (well contexts, but meh), |
1521 | * so we don't really have issues with mutliple objects not | 1521 | * so we don't really have issues with multiple objects not |
1522 | * fitting due to fragmentation. | 1522 | * fitting due to fragmentation. |
1523 | * So this is actually safe. | 1523 | * So this is actually safe. |
1524 | */ | 1524 | */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 74df3d1581dd..2034f7cf238b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -716,15 +716,19 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) | |||
716 | if (size % (1<<30)) | 716 | if (size % (1<<30)) |
717 | DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); | 717 | DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); |
718 | 718 | ||
719 | /* 1. Do all our allocations for page directories and page tables. */ | 719 | /* 1. Do all our allocations for page directories and page tables. |
720 | ret = gen8_ppgtt_alloc(ppgtt, max_pdp); | 720 | * We allocate more than was asked so that we can point the unused parts |
721 | * to valid entries that point to scratch page. Dynamic page tables | ||
722 | * will fix this eventually. | ||
723 | */ | ||
724 | ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES); | ||
721 | if (ret) | 725 | if (ret) |
722 | return ret; | 726 | return ret; |
723 | 727 | ||
724 | /* | 728 | /* |
725 | * 2. Create DMA mappings for the page directories and page tables. | 729 | * 2. Create DMA mappings for the page directories and page tables. |
726 | */ | 730 | */ |
727 | for (i = 0; i < max_pdp; i++) { | 731 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) { |
728 | ret = gen8_ppgtt_setup_page_directories(ppgtt, i); | 732 | ret = gen8_ppgtt_setup_page_directories(ppgtt, i); |
729 | if (ret) | 733 | if (ret) |
730 | goto bail; | 734 | goto bail; |
@@ -744,7 +748,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) | |||
744 | * plugged in correctly. So we do that now/here. For aliasing PPGTT, we | 748 | * plugged in correctly. So we do that now/here. For aliasing PPGTT, we |
745 | * will never need to touch the PDEs again. | 749 | * will never need to touch the PDEs again. |
746 | */ | 750 | */ |
747 | for (i = 0; i < max_pdp; i++) { | 751 | for (i = 0; i < GEN8_LEGACY_PDPES; i++) { |
748 | struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; | 752 | struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; |
749 | gen8_ppgtt_pde_t *pd_vaddr; | 753 | gen8_ppgtt_pde_t *pd_vaddr; |
750 | pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); | 754 | pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); |
@@ -764,9 +768,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) | |||
764 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; | 768 | ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
765 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; | 769 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
766 | ppgtt->base.start = 0; | 770 | ppgtt->base.start = 0; |
767 | ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; | ||
768 | 771 | ||
769 | ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); | 772 | /* This is the area that we advertise as usable for the caller */ |
773 | ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE; | ||
774 | |||
775 | /* Set all ptes to a valid scratch page. Also above requested space */ | ||
776 | ppgtt->base.clear_range(&ppgtt->base, 0, | ||
777 | ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE, | ||
778 | true); | ||
770 | 779 | ||
771 | DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", | 780 | DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", |
772 | ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); | 781 | ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9baecb79de8c..49ad5fb82ace 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1696,11 +1696,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) | |||
1696 | * the work queue. */ | 1696 | * the work queue. */ |
1697 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | 1697 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) |
1698 | { | 1698 | { |
1699 | /* TODO: RPS on GEN9+ is not supported yet. */ | ||
1700 | if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, | ||
1701 | "GEN9+: unexpected RPS IRQ\n")) | ||
1702 | return; | ||
1703 | |||
1704 | if (pm_iir & dev_priv->pm_rps_events) { | 1699 | if (pm_iir & dev_priv->pm_rps_events) { |
1705 | spin_lock(&dev_priv->irq_lock); | 1700 | spin_lock(&dev_priv->irq_lock); |
1706 | gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | 1701 | gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
@@ -3169,15 +3164,24 @@ static void gen8_irq_reset(struct drm_device *dev) | |||
3169 | ibx_irq_reset(dev); | 3164 | ibx_irq_reset(dev); |
3170 | } | 3165 | } |
3171 | 3166 | ||
3172 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) | 3167 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, |
3168 | unsigned int pipe_mask) | ||
3173 | { | 3169 | { |
3174 | uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; | 3170 | uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; |
3175 | 3171 | ||
3176 | spin_lock_irq(&dev_priv->irq_lock); | 3172 | spin_lock_irq(&dev_priv->irq_lock); |
3177 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], | 3173 | if (pipe_mask & 1 << PIPE_A) |
3178 | ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); | 3174 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, |
3179 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], | 3175 | dev_priv->de_irq_mask[PIPE_A], |
3180 | ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); | 3176 | ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); |
3177 | if (pipe_mask & 1 << PIPE_B) | ||
3178 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, | ||
3179 | dev_priv->de_irq_mask[PIPE_B], | ||
3180 | ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); | ||
3181 | if (pipe_mask & 1 << PIPE_C) | ||
3182 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, | ||
3183 | dev_priv->de_irq_mask[PIPE_C], | ||
3184 | ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); | ||
3181 | spin_unlock_irq(&dev_priv->irq_lock); | 3185 | spin_unlock_irq(&dev_priv->irq_lock); |
3182 | } | 3186 | } |
3183 | 3187 | ||
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 44f2262a5553..e2d20ffe6586 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -171,10 +171,10 @@ module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); | |||
171 | MODULE_PARM_DESC(use_mmio_flip, | 171 | MODULE_PARM_DESC(use_mmio_flip, |
172 | "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); | 172 | "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); |
173 | 173 | ||
174 | module_param_named(mmio_debug, i915.mmio_debug, bool, 0600); | 174 | module_param_named(mmio_debug, i915.mmio_debug, int, 0600); |
175 | MODULE_PARM_DESC(mmio_debug, | 175 | MODULE_PARM_DESC(mmio_debug, |
176 | "Enable the MMIO debug code (default: false). This may negatively " | 176 | "Enable the MMIO debug code for the first N failures (default: off). " |
177 | "affect performance."); | 177 | "This may negatively affect performance."); |
178 | 178 | ||
179 | module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); | 179 | module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); |
180 | MODULE_PARM_DESC(verbose_state_checks, | 180 | MODULE_PARM_DESC(verbose_state_checks, |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 55143cb36e74..cc8ebabc488d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -566,6 +566,9 @@ | |||
566 | #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) | 566 | #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) |
567 | #define DSPFREQGUAR_SHIFT 14 | 567 | #define DSPFREQGUAR_SHIFT 14 |
568 | #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) | 568 | #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) |
569 | #define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ | ||
570 | #define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ | ||
571 | #define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ | ||
569 | #define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) | 572 | #define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) |
570 | #define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) | 573 | #define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) |
571 | #define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) | 574 | #define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) |
@@ -641,6 +644,11 @@ enum skl_disp_power_wells { | |||
641 | #define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 | 644 | #define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 |
642 | #define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 | 645 | #define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 |
643 | 646 | ||
647 | #define PUNIT_REG_DDR_SETUP2 0x139 | ||
648 | #define FORCE_DDR_FREQ_REQ_ACK (1 << 8) | ||
649 | #define FORCE_DDR_LOW_FREQ (1 << 1) | ||
650 | #define FORCE_DDR_HIGH_FREQ (1 << 0) | ||
651 | |||
644 | #define PUNIT_GPU_STATUS_REG 0xdb | 652 | #define PUNIT_GPU_STATUS_REG 0xdb |
645 | #define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 | 653 | #define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 |
646 | #define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff | 654 | #define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff |
@@ -1029,6 +1037,7 @@ enum skl_disp_power_wells { | |||
1029 | #define DPIO_CHV_FIRST_MOD (0 << 8) | 1037 | #define DPIO_CHV_FIRST_MOD (0 << 8) |
1030 | #define DPIO_CHV_SECOND_MOD (1 << 8) | 1038 | #define DPIO_CHV_SECOND_MOD (1 << 8) |
1031 | #define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0 | 1039 | #define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0 |
1040 | #define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0) | ||
1032 | #define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1) | 1041 | #define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1) |
1033 | 1042 | ||
1034 | #define _CHV_PLL_DW6_CH0 0x8018 | 1043 | #define _CHV_PLL_DW6_CH0 0x8018 |
@@ -1040,11 +1049,14 @@ enum skl_disp_power_wells { | |||
1040 | 1049 | ||
1041 | #define _CHV_PLL_DW8_CH0 0x8020 | 1050 | #define _CHV_PLL_DW8_CH0 0x8020 |
1042 | #define _CHV_PLL_DW8_CH1 0x81A0 | 1051 | #define _CHV_PLL_DW8_CH1 0x81A0 |
1052 | #define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0 | ||
1053 | #define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0) | ||
1043 | #define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1) | 1054 | #define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1) |
1044 | 1055 | ||
1045 | #define _CHV_PLL_DW9_CH0 0x8024 | 1056 | #define _CHV_PLL_DW9_CH0 0x8024 |
1046 | #define _CHV_PLL_DW9_CH1 0x81A4 | 1057 | #define _CHV_PLL_DW9_CH1 0x81A4 |
1047 | #define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */ | 1058 | #define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */ |
1059 | #define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1) | ||
1048 | #define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ | 1060 | #define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ |
1049 | #define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) | 1061 | #define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) |
1050 | 1062 | ||
@@ -1522,6 +1534,8 @@ enum skl_disp_power_wells { | |||
1522 | 1534 | ||
1523 | /* Fuse readout registers for GT */ | 1535 | /* Fuse readout registers for GT */ |
1524 | #define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) | 1536 | #define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) |
1537 | #define CHV_FGT_DISABLE_SS0 (1 << 10) | ||
1538 | #define CHV_FGT_DISABLE_SS1 (1 << 11) | ||
1525 | #define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 | 1539 | #define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 |
1526 | #define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT) | 1540 | #define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
1527 | #define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20 | 1541 | #define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20 |
@@ -2099,6 +2113,14 @@ enum skl_disp_power_wells { | |||
2099 | #define CDCLK_FREQ_SHIFT 4 | 2113 | #define CDCLK_FREQ_SHIFT 4 |
2100 | #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) | 2114 | #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) |
2101 | #define CZCLK_FREQ_MASK 0xf | 2115 | #define CZCLK_FREQ_MASK 0xf |
2116 | |||
2117 | #define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C) | ||
2118 | #define PFI_CREDIT_63 (9 << 28) /* chv only */ | ||
2119 | #define PFI_CREDIT_31 (8 << 28) /* chv only */ | ||
2120 | #define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */ | ||
2121 | #define PFI_CREDIT_RESEND (1 << 27) | ||
2122 | #define VGA_FAST_MODE_DISABLE (1 << 14) | ||
2123 | |||
2102 | #define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) | 2124 | #define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) |
2103 | 2125 | ||
2104 | /* | 2126 | /* |
@@ -2427,6 +2449,12 @@ enum skl_disp_power_wells { | |||
2427 | #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) | 2449 | #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) |
2428 | #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) | 2450 | #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) |
2429 | 2451 | ||
2452 | #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) | ||
2453 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) | ||
2454 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ | ||
2455 | INTERVAL_1_33_US(us) : \ | ||
2456 | INTERVAL_1_28_US(us)) | ||
2457 | |||
2430 | /* | 2458 | /* |
2431 | * Logical Context regs | 2459 | * Logical Context regs |
2432 | */ | 2460 | */ |
@@ -3019,7 +3047,7 @@ enum skl_disp_power_wells { | |||
3019 | 3047 | ||
3020 | /* Video Data Island Packet control */ | 3048 | /* Video Data Island Packet control */ |
3021 | #define VIDEO_DIP_DATA 0x61178 | 3049 | #define VIDEO_DIP_DATA 0x61178 |
3022 | /* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC | 3050 | /* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC |
3023 | * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte | 3051 | * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte |
3024 | * of the infoframe structure specified by CEA-861. */ | 3052 | * of the infoframe structure specified by CEA-861. */ |
3025 | #define VIDEO_DIP_DATA_SIZE 32 | 3053 | #define VIDEO_DIP_DATA_SIZE 32 |
@@ -4065,7 +4093,7 @@ enum skl_disp_power_wells { | |||
4065 | #define DPINVGTT_STATUS_MASK 0xff | 4093 | #define DPINVGTT_STATUS_MASK 0xff |
4066 | #define DPINVGTT_STATUS_MASK_CHV 0xfff | 4094 | #define DPINVGTT_STATUS_MASK_CHV 0xfff |
4067 | 4095 | ||
4068 | #define DSPARB 0x70030 | 4096 | #define DSPARB (dev_priv->info.display_mmio_offset + 0x70030) |
4069 | #define DSPARB_CSTART_MASK (0x7f << 7) | 4097 | #define DSPARB_CSTART_MASK (0x7f << 7) |
4070 | #define DSPARB_CSTART_SHIFT 7 | 4098 | #define DSPARB_CSTART_SHIFT 7 |
4071 | #define DSPARB_BSTART_MASK (0x7f) | 4099 | #define DSPARB_BSTART_MASK (0x7f) |
@@ -4073,6 +4101,9 @@ enum skl_disp_power_wells { | |||
4073 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ | 4101 | #define DSPARB_BEND_SHIFT 9 /* on 855 */ |
4074 | #define DSPARB_AEND_SHIFT 0 | 4102 | #define DSPARB_AEND_SHIFT 0 |
4075 | 4103 | ||
4104 | #define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ | ||
4105 | #define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ | ||
4106 | |||
4076 | /* pnv/gen4/g4x/vlv/chv */ | 4107 | /* pnv/gen4/g4x/vlv/chv */ |
4077 | #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) | 4108 | #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) |
4078 | #define DSPFW_SR_SHIFT 23 | 4109 | #define DSPFW_SR_SHIFT 23 |
@@ -4096,8 +4127,8 @@ enum skl_disp_power_wells { | |||
4096 | #define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */ | 4127 | #define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */ |
4097 | #define DSPFW_CURSORA_SHIFT 8 | 4128 | #define DSPFW_CURSORA_SHIFT 8 |
4098 | #define DSPFW_CURSORA_MASK (0x3f<<8) | 4129 | #define DSPFW_CURSORA_MASK (0x3f<<8) |
4099 | #define DSPFW_PLANEC_SHIFT_OLD 0 | 4130 | #define DSPFW_PLANEC_OLD_SHIFT 0 |
4100 | #define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */ | 4131 | #define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */ |
4101 | #define DSPFW_SPRITEA_SHIFT 0 | 4132 | #define DSPFW_SPRITEA_SHIFT 0 |
4102 | #define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ | 4133 | #define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ |
4103 | #define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ | 4134 | #define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ |
@@ -4136,25 +4167,25 @@ enum skl_disp_power_wells { | |||
4136 | #define DSPFW_SPRITED_WM1_SHIFT 24 | 4167 | #define DSPFW_SPRITED_WM1_SHIFT 24 |
4137 | #define DSPFW_SPRITED_WM1_MASK (0xff<<24) | 4168 | #define DSPFW_SPRITED_WM1_MASK (0xff<<24) |
4138 | #define DSPFW_SPRITED_SHIFT 16 | 4169 | #define DSPFW_SPRITED_SHIFT 16 |
4139 | #define DSPFW_SPRITED_MASK (0xff<<16) | 4170 | #define DSPFW_SPRITED_MASK_VLV (0xff<<16) |
4140 | #define DSPFW_SPRITEC_WM1_SHIFT 8 | 4171 | #define DSPFW_SPRITEC_WM1_SHIFT 8 |
4141 | #define DSPFW_SPRITEC_WM1_MASK (0xff<<8) | 4172 | #define DSPFW_SPRITEC_WM1_MASK (0xff<<8) |
4142 | #define DSPFW_SPRITEC_SHIFT 0 | 4173 | #define DSPFW_SPRITEC_SHIFT 0 |
4143 | #define DSPFW_SPRITEC_MASK (0xff<<0) | 4174 | #define DSPFW_SPRITEC_MASK_VLV (0xff<<0) |
4144 | #define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) | 4175 | #define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) |
4145 | #define DSPFW_SPRITEF_WM1_SHIFT 24 | 4176 | #define DSPFW_SPRITEF_WM1_SHIFT 24 |
4146 | #define DSPFW_SPRITEF_WM1_MASK (0xff<<24) | 4177 | #define DSPFW_SPRITEF_WM1_MASK (0xff<<24) |
4147 | #define DSPFW_SPRITEF_SHIFT 16 | 4178 | #define DSPFW_SPRITEF_SHIFT 16 |
4148 | #define DSPFW_SPRITEF_MASK (0xff<<16) | 4179 | #define DSPFW_SPRITEF_MASK_VLV (0xff<<16) |
4149 | #define DSPFW_SPRITEE_WM1_SHIFT 8 | 4180 | #define DSPFW_SPRITEE_WM1_SHIFT 8 |
4150 | #define DSPFW_SPRITEE_WM1_MASK (0xff<<8) | 4181 | #define DSPFW_SPRITEE_WM1_MASK (0xff<<8) |
4151 | #define DSPFW_SPRITEE_SHIFT 0 | 4182 | #define DSPFW_SPRITEE_SHIFT 0 |
4152 | #define DSPFW_SPRITEE_MASK (0xff<<0) | 4183 | #define DSPFW_SPRITEE_MASK_VLV (0xff<<0) |
4153 | #define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ | 4184 | #define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ |
4154 | #define DSPFW_PLANEC_WM1_SHIFT 24 | 4185 | #define DSPFW_PLANEC_WM1_SHIFT 24 |
4155 | #define DSPFW_PLANEC_WM1_MASK (0xff<<24) | 4186 | #define DSPFW_PLANEC_WM1_MASK (0xff<<24) |
4156 | #define DSPFW_PLANEC_SHIFT 16 | 4187 | #define DSPFW_PLANEC_SHIFT 16 |
4157 | #define DSPFW_PLANEC_MASK (0xff<<16) | 4188 | #define DSPFW_PLANEC_MASK_VLV (0xff<<16) |
4158 | #define DSPFW_CURSORC_WM1_SHIFT 8 | 4189 | #define DSPFW_CURSORC_WM1_SHIFT 8 |
4159 | #define DSPFW_CURSORC_WM1_MASK (0x3f<<16) | 4190 | #define DSPFW_CURSORC_WM1_MASK (0x3f<<16) |
4160 | #define DSPFW_CURSORC_SHIFT 0 | 4191 | #define DSPFW_CURSORC_SHIFT 0 |
@@ -4163,7 +4194,7 @@ enum skl_disp_power_wells { | |||
4163 | /* vlv/chv high order bits */ | 4194 | /* vlv/chv high order bits */ |
4164 | #define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) | 4195 | #define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) |
4165 | #define DSPFW_SR_HI_SHIFT 24 | 4196 | #define DSPFW_SR_HI_SHIFT 24 |
4166 | #define DSPFW_SR_HI_MASK (1<<24) | 4197 | #define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ |
4167 | #define DSPFW_SPRITEF_HI_SHIFT 23 | 4198 | #define DSPFW_SPRITEF_HI_SHIFT 23 |
4168 | #define DSPFW_SPRITEF_HI_MASK (1<<23) | 4199 | #define DSPFW_SPRITEF_HI_MASK (1<<23) |
4169 | #define DSPFW_SPRITEE_HI_SHIFT 22 | 4200 | #define DSPFW_SPRITEE_HI_SHIFT 22 |
@@ -4184,7 +4215,7 @@ enum skl_disp_power_wells { | |||
4184 | #define DSPFW_PLANEA_HI_MASK (1<<0) | 4215 | #define DSPFW_PLANEA_HI_MASK (1<<0) |
4185 | #define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) | 4216 | #define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) |
4186 | #define DSPFW_SR_WM1_HI_SHIFT 24 | 4217 | #define DSPFW_SR_WM1_HI_SHIFT 24 |
4187 | #define DSPFW_SR_WM1_HI_MASK (1<<24) | 4218 | #define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ |
4188 | #define DSPFW_SPRITEF_WM1_HI_SHIFT 23 | 4219 | #define DSPFW_SPRITEF_WM1_HI_SHIFT 23 |
4189 | #define DSPFW_SPRITEF_WM1_HI_MASK (1<<23) | 4220 | #define DSPFW_SPRITEF_WM1_HI_MASK (1<<23) |
4190 | #define DSPFW_SPRITEE_WM1_HI_SHIFT 22 | 4221 | #define DSPFW_SPRITEE_WM1_HI_SHIFT 22 |
@@ -4205,21 +4236,17 @@ enum skl_disp_power_wells { | |||
4205 | #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) | 4236 | #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) |
4206 | 4237 | ||
4207 | /* drain latency register values*/ | 4238 | /* drain latency register values*/ |
4208 | #define DRAIN_LATENCY_PRECISION_16 16 | ||
4209 | #define DRAIN_LATENCY_PRECISION_32 32 | ||
4210 | #define DRAIN_LATENCY_PRECISION_64 64 | ||
4211 | #define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) | 4239 | #define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) |
4212 | #define DDL_CURSOR_PRECISION_HIGH (1<<31) | ||
4213 | #define DDL_CURSOR_PRECISION_LOW (0<<31) | ||
4214 | #define DDL_CURSOR_SHIFT 24 | 4240 | #define DDL_CURSOR_SHIFT 24 |
4215 | #define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite))) | ||
4216 | #define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite))) | ||
4217 | #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) | 4241 | #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) |
4218 | #define DDL_PLANE_PRECISION_HIGH (1<<7) | ||
4219 | #define DDL_PLANE_PRECISION_LOW (0<<7) | ||
4220 | #define DDL_PLANE_SHIFT 0 | 4242 | #define DDL_PLANE_SHIFT 0 |
4243 | #define DDL_PRECISION_HIGH (1<<7) | ||
4244 | #define DDL_PRECISION_LOW (0<<7) | ||
4221 | #define DRAIN_LATENCY_MASK 0x7f | 4245 | #define DRAIN_LATENCY_MASK 0x7f |
4222 | 4246 | ||
4247 | #define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) | ||
4248 | #define CBR_PND_DEADLINE_DISABLE (1<<31) | ||
4249 | |||
4223 | /* FIFO watermark sizes etc */ | 4250 | /* FIFO watermark sizes etc */ |
4224 | #define G4X_FIFO_LINE_SIZE 64 | 4251 | #define G4X_FIFO_LINE_SIZE 64 |
4225 | #define I915_FIFO_LINE_SIZE 64 | 4252 | #define I915_FIFO_LINE_SIZE 64 |
@@ -6080,6 +6107,7 @@ enum skl_disp_power_wells { | |||
6080 | #define GEN6_TURBO_DISABLE (1<<31) | 6107 | #define GEN6_TURBO_DISABLE (1<<31) |
6081 | #define GEN6_FREQUENCY(x) ((x)<<25) | 6108 | #define GEN6_FREQUENCY(x) ((x)<<25) |
6082 | #define HSW_FREQUENCY(x) ((x)<<24) | 6109 | #define HSW_FREQUENCY(x) ((x)<<24) |
6110 | #define GEN9_FREQUENCY(x) ((x)<<23) | ||
6083 | #define GEN6_OFFSET(x) ((x)<<19) | 6111 | #define GEN6_OFFSET(x) ((x)<<19) |
6084 | #define GEN6_AGGRESSIVE_TURBO (0<<15) | 6112 | #define GEN6_AGGRESSIVE_TURBO (0<<15) |
6085 | #define GEN6_RC_VIDEO_FREQ 0xA00C | 6113 | #define GEN6_RC_VIDEO_FREQ 0xA00C |
@@ -6098,8 +6126,10 @@ enum skl_disp_power_wells { | |||
6098 | #define GEN6_RPSTAT1 0xA01C | 6126 | #define GEN6_RPSTAT1 0xA01C |
6099 | #define GEN6_CAGF_SHIFT 8 | 6127 | #define GEN6_CAGF_SHIFT 8 |
6100 | #define HSW_CAGF_SHIFT 7 | 6128 | #define HSW_CAGF_SHIFT 7 |
6129 | #define GEN9_CAGF_SHIFT 23 | ||
6101 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) | 6130 | #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) |
6102 | #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) | 6131 | #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) |
6132 | #define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) | ||
6103 | #define GEN6_RP_CONTROL 0xA024 | 6133 | #define GEN6_RP_CONTROL 0xA024 |
6104 | #define GEN6_RP_MEDIA_TURBO (1<<11) | 6134 | #define GEN6_RP_MEDIA_TURBO (1<<11) |
6105 | #define GEN6_RP_MEDIA_MODE_MASK (3<<9) | 6135 | #define GEN6_RP_MEDIA_MODE_MASK (3<<9) |
@@ -6225,6 +6255,17 @@ enum skl_disp_power_wells { | |||
6225 | #define GEN6_RC6 3 | 6255 | #define GEN6_RC6 3 |
6226 | #define GEN6_RC7 4 | 6256 | #define GEN6_RC7 4 |
6227 | 6257 | ||
6258 | #define CHV_POWER_SS0_SIG1 0xa720 | ||
6259 | #define CHV_POWER_SS1_SIG1 0xa728 | ||
6260 | #define CHV_SS_PG_ENABLE (1<<1) | ||
6261 | #define CHV_EU08_PG_ENABLE (1<<9) | ||
6262 | #define CHV_EU19_PG_ENABLE (1<<17) | ||
6263 | #define CHV_EU210_PG_ENABLE (1<<25) | ||
6264 | |||
6265 | #define CHV_POWER_SS0_SIG2 0xa724 | ||
6266 | #define CHV_POWER_SS1_SIG2 0xa72c | ||
6267 | #define CHV_EU311_PG_ENABLE (1<<1) | ||
6268 | |||
6228 | #define GEN9_SLICE0_PGCTL_ACK 0x804c | 6269 | #define GEN9_SLICE0_PGCTL_ACK 0x804c |
6229 | #define GEN9_SLICE1_PGCTL_ACK 0x8050 | 6270 | #define GEN9_SLICE1_PGCTL_ACK 0x8050 |
6230 | #define GEN9_SLICE2_PGCTL_ACK 0x8054 | 6271 | #define GEN9_SLICE2_PGCTL_ACK 0x8054 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 67bd07edcbb0..247626885f49 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -319,7 +319,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, | |||
319 | ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); | 319 | ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); |
320 | } else { | 320 | } else { |
321 | u32 rpstat = I915_READ(GEN6_RPSTAT1); | 321 | u32 rpstat = I915_READ(GEN6_RPSTAT1); |
322 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 322 | if (IS_GEN9(dev_priv)) |
323 | ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; | ||
324 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | ||
323 | ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; | 325 | ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; |
324 | else | 326 | else |
325 | ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | 327 | ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; |
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 0db9ccf32605..97a88b5f6a26 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h | |||
@@ -32,7 +32,7 @@ | |||
32 | * The following structure pages are defined in GEN MMIO space | 32 | * The following structure pages are defined in GEN MMIO space |
33 | * for virtualization. (One page for now) | 33 | * for virtualization. (One page for now) |
34 | */ | 34 | */ |
35 | #define VGT_MAGIC 0x4776544776544776 /* 'vGTvGTvG' */ | 35 | #define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ |
36 | #define VGT_VERSION_MAJOR 1 | 36 | #define VGT_VERSION_MAJOR 1 |
37 | #define VGT_VERSION_MINOR 0 | 37 | #define VGT_VERSION_MINOR 0 |
38 | 38 | ||
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 011b8960fd75..3903b90fb64e 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -214,12 +214,18 @@ struct drm_crtc_state * | |||
214 | intel_crtc_duplicate_state(struct drm_crtc *crtc) | 214 | intel_crtc_duplicate_state(struct drm_crtc *crtc) |
215 | { | 215 | { |
216 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 216 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
217 | struct intel_crtc_state *crtc_state; | ||
217 | 218 | ||
218 | if (WARN_ON(!intel_crtc->config)) | 219 | if (WARN_ON(!intel_crtc->config)) |
219 | return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL); | 220 | crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); |
221 | else | ||
222 | crtc_state = kmemdup(intel_crtc->config, | ||
223 | sizeof(*intel_crtc->config), GFP_KERNEL); | ||
220 | 224 | ||
221 | return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config), | 225 | if (crtc_state) |
222 | GFP_KERNEL); | 226 | crtc_state->base.crtc = crtc; |
227 | |||
228 | return &crtc_state->base; | ||
223 | } | 229 | } |
224 | 230 | ||
225 | /** | 231 | /** |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 985d531aaf9e..8aee7d77ce9d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -156,16 +156,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = { | |||
156 | 156 | ||
157 | static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { | 157 | static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { |
158 | /* Idx NT mV T mV db */ | 158 | /* Idx NT mV T mV db */ |
159 | { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ | 159 | { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */ |
160 | { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */ | ||
161 | { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */ | ||
162 | { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */ | ||
163 | { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */ | ||
164 | { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */ | ||
165 | { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */ | ||
166 | { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */ | ||
167 | { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */ | ||
168 | { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */ | ||
169 | }; | 160 | }; |
170 | 161 | ||
171 | enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) | 162 | enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) |
@@ -202,7 +193,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
202 | { | 193 | { |
203 | struct drm_i915_private *dev_priv = dev->dev_private; | 194 | struct drm_i915_private *dev_priv = dev->dev_private; |
204 | u32 reg; | 195 | u32 reg; |
205 | int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_800mV_0dB, | 196 | int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, |
206 | size; | 197 | size; |
207 | int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; | 198 | int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; |
208 | const struct ddi_buf_trans *ddi_translations_fdi; | 199 | const struct ddi_buf_trans *ddi_translations_fdi; |
@@ -223,9 +214,16 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
223 | n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp); | 214 | n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp); |
224 | } | 215 | } |
225 | 216 | ||
217 | /* | ||
218 | * On SKL, the recommendation from the hw team is to always use | ||
219 | * a certain type of level shifter (and thus the corresponding | ||
220 | * 800mV+2dB entry). Given that's the only validated entry, we | ||
221 | * override what is in the VBT, at least until further notice. | ||
222 | */ | ||
223 | hdmi_level = 0; | ||
226 | ddi_translations_hdmi = skl_ddi_translations_hdmi; | 224 | ddi_translations_hdmi = skl_ddi_translations_hdmi; |
227 | n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); | 225 | n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); |
228 | hdmi_800mV_0dB = 7; | 226 | hdmi_default_entry = 0; |
229 | } else if (IS_BROADWELL(dev)) { | 227 | } else if (IS_BROADWELL(dev)) { |
230 | ddi_translations_fdi = bdw_ddi_translations_fdi; | 228 | ddi_translations_fdi = bdw_ddi_translations_fdi; |
231 | ddi_translations_dp = bdw_ddi_translations_dp; | 229 | ddi_translations_dp = bdw_ddi_translations_dp; |
@@ -234,7 +232,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
234 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | 232 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); |
235 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 233 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
236 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 234 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
237 | hdmi_800mV_0dB = 7; | 235 | hdmi_default_entry = 7; |
238 | } else if (IS_HASWELL(dev)) { | 236 | } else if (IS_HASWELL(dev)) { |
239 | ddi_translations_fdi = hsw_ddi_translations_fdi; | 237 | ddi_translations_fdi = hsw_ddi_translations_fdi; |
240 | ddi_translations_dp = hsw_ddi_translations_dp; | 238 | ddi_translations_dp = hsw_ddi_translations_dp; |
@@ -242,7 +240,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
242 | ddi_translations_hdmi = hsw_ddi_translations_hdmi; | 240 | ddi_translations_hdmi = hsw_ddi_translations_hdmi; |
243 | n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); | 241 | n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); |
244 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); | 242 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); |
245 | hdmi_800mV_0dB = 6; | 243 | hdmi_default_entry = 6; |
246 | } else { | 244 | } else { |
247 | WARN(1, "ddi translation table missing\n"); | 245 | WARN(1, "ddi translation table missing\n"); |
248 | ddi_translations_edp = bdw_ddi_translations_dp; | 246 | ddi_translations_edp = bdw_ddi_translations_dp; |
@@ -252,7 +250,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
252 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | 250 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); |
253 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 251 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
254 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 252 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
255 | hdmi_800mV_0dB = 7; | 253 | hdmi_default_entry = 7; |
256 | } | 254 | } |
257 | 255 | ||
258 | switch (port) { | 256 | switch (port) { |
@@ -295,7 +293,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) | |||
295 | /* Choose a good default if VBT is badly populated */ | 293 | /* Choose a good default if VBT is badly populated */ |
296 | if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || | 294 | if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || |
297 | hdmi_level >= n_hdmi_entries) | 295 | hdmi_level >= n_hdmi_entries) |
298 | hdmi_level = hdmi_800mV_0dB; | 296 | hdmi_level = hdmi_default_entry; |
299 | 297 | ||
300 | /* Entry 9 is for HDMI: */ | 298 | /* Entry 9 is for HDMI: */ |
301 | I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1); | 299 | I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1); |
@@ -786,9 +784,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, | |||
786 | case DPLL_CRTL1_LINK_RATE_810: | 784 | case DPLL_CRTL1_LINK_RATE_810: |
787 | link_clock = 81000; | 785 | link_clock = 81000; |
788 | break; | 786 | break; |
787 | case DPLL_CRTL1_LINK_RATE_1080: | ||
788 | link_clock = 108000; | ||
789 | break; | ||
789 | case DPLL_CRTL1_LINK_RATE_1350: | 790 | case DPLL_CRTL1_LINK_RATE_1350: |
790 | link_clock = 135000; | 791 | link_clock = 135000; |
791 | break; | 792 | break; |
793 | case DPLL_CRTL1_LINK_RATE_1620: | ||
794 | link_clock = 162000; | ||
795 | break; | ||
796 | case DPLL_CRTL1_LINK_RATE_2160: | ||
797 | link_clock = 216000; | ||
798 | break; | ||
792 | case DPLL_CRTL1_LINK_RATE_2700: | 799 | case DPLL_CRTL1_LINK_RATE_2700: |
793 | link_clock = 270000; | 800 | link_clock = 270000; |
794 | break; | 801 | break; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1aa1cbd16c19..90b460cf2b57 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <drm/i915_drm.h> | 37 | #include <drm/i915_drm.h> |
38 | #include "i915_drv.h" | 38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" | 39 | #include "i915_trace.h" |
40 | #include <drm/drm_atomic.h> | ||
40 | #include <drm/drm_atomic_helper.h> | 41 | #include <drm/drm_atomic_helper.h> |
41 | #include <drm/drm_dp_helper.h> | 42 | #include <drm/drm_dp_helper.h> |
42 | #include <drm/drm_crtc_helper.h> | 43 | #include <drm/drm_crtc_helper.h> |
@@ -390,7 +391,7 @@ static const intel_limit_t intel_limits_chv = { | |||
390 | * them would make no difference. | 391 | * them would make no difference. |
391 | */ | 392 | */ |
392 | .dot = { .min = 25000 * 5, .max = 540000 * 5}, | 393 | .dot = { .min = 25000 * 5, .max = 540000 * 5}, |
393 | .vco = { .min = 4860000, .max = 6480000 }, | 394 | .vco = { .min = 4800000, .max = 6480000 }, |
394 | .n = { .min = 1, .max = 1 }, | 395 | .n = { .min = 1, .max = 1 }, |
395 | .m1 = { .min = 2, .max = 2 }, | 396 | .m1 = { .min = 2, .max = 2 }, |
396 | .m2 = { .min = 24 << 22, .max = 175 << 22 }, | 397 | .m2 = { .min = 24 << 22, .max = 175 << 22 }, |
@@ -896,8 +897,12 @@ bool intel_crtc_active(struct drm_crtc *crtc) | |||
896 | * | 897 | * |
897 | * We can ditch the crtc->primary->fb check as soon as we can | 898 | * We can ditch the crtc->primary->fb check as soon as we can |
898 | * properly reconstruct framebuffers. | 899 | * properly reconstruct framebuffers. |
900 | * | ||
901 | * FIXME: The intel_crtc->active here should be switched to | ||
902 | * crtc->state->active once we have proper CRTC states wired up | ||
903 | * for atomic. | ||
899 | */ | 904 | */ |
900 | return intel_crtc->active && crtc->primary->fb && | 905 | return intel_crtc->active && crtc->primary->state->fb && |
901 | intel_crtc->config->base.adjusted_mode.crtc_clock; | 906 | intel_crtc->config->base.adjusted_mode.crtc_clock; |
902 | } | 907 | } |
903 | 908 | ||
@@ -1300,14 +1305,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1300 | u32 val; | 1305 | u32 val; |
1301 | 1306 | ||
1302 | if (INTEL_INFO(dev)->gen >= 9) { | 1307 | if (INTEL_INFO(dev)->gen >= 9) { |
1303 | for_each_sprite(pipe, sprite) { | 1308 | for_each_sprite(dev_priv, pipe, sprite) { |
1304 | val = I915_READ(PLANE_CTL(pipe, sprite)); | 1309 | val = I915_READ(PLANE_CTL(pipe, sprite)); |
1305 | I915_STATE_WARN(val & PLANE_CTL_ENABLE, | 1310 | I915_STATE_WARN(val & PLANE_CTL_ENABLE, |
1306 | "plane %d assertion failure, should be off on pipe %c but is still active\n", | 1311 | "plane %d assertion failure, should be off on pipe %c but is still active\n", |
1307 | sprite, pipe_name(pipe)); | 1312 | sprite, pipe_name(pipe)); |
1308 | } | 1313 | } |
1309 | } else if (IS_VALLEYVIEW(dev)) { | 1314 | } else if (IS_VALLEYVIEW(dev)) { |
1310 | for_each_sprite(pipe, sprite) { | 1315 | for_each_sprite(dev_priv, pipe, sprite) { |
1311 | reg = SPCNTR(pipe, sprite); | 1316 | reg = SPCNTR(pipe, sprite); |
1312 | val = I915_READ(reg); | 1317 | val = I915_READ(reg); |
1313 | I915_STATE_WARN(val & SP_ENABLE, | 1318 | I915_STATE_WARN(val & SP_ENABLE, |
@@ -2533,7 +2538,6 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, | |||
2533 | break; | 2538 | break; |
2534 | } | 2539 | } |
2535 | } | 2540 | } |
2536 | |||
2537 | } | 2541 | } |
2538 | 2542 | ||
2539 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, | 2543 | static void i9xx_update_primary_plane(struct drm_crtc *crtc, |
@@ -2654,9 +2658,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, | |||
2654 | 2658 | ||
2655 | I915_WRITE(reg, dspcntr); | 2659 | I915_WRITE(reg, dspcntr); |
2656 | 2660 | ||
2657 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||
2658 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, | ||
2659 | fb->pitches[0]); | ||
2660 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2661 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2661 | if (INTEL_INFO(dev)->gen >= 4) { | 2662 | if (INTEL_INFO(dev)->gen >= 4) { |
2662 | I915_WRITE(DSPSURF(plane), | 2663 | I915_WRITE(DSPSURF(plane), |
@@ -2758,9 +2759,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, | |||
2758 | 2759 | ||
2759 | I915_WRITE(reg, dspcntr); | 2760 | I915_WRITE(reg, dspcntr); |
2760 | 2761 | ||
2761 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | ||
2762 | i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, | ||
2763 | fb->pitches[0]); | ||
2764 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); | 2762 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2765 | I915_WRITE(DSPSURF(plane), | 2763 | I915_WRITE(DSPSURF(plane), |
2766 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); | 2764 | i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
@@ -2886,11 +2884,6 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, | |||
2886 | 2884 | ||
2887 | I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); | 2885 | I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); |
2888 | 2886 | ||
2889 | DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", | ||
2890 | i915_gem_obj_ggtt_offset(obj), | ||
2891 | x, y, fb->width, fb->height, | ||
2892 | fb->pitches[0]); | ||
2893 | |||
2894 | I915_WRITE(PLANE_POS(pipe, 0), 0); | 2887 | I915_WRITE(PLANE_POS(pipe, 0), 0); |
2895 | I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); | 2888 | I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); |
2896 | I915_WRITE(PLANE_SIZE(pipe, 0), | 2889 | I915_WRITE(PLANE_SIZE(pipe, 0), |
@@ -3148,38 +3141,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) | |||
3148 | FDI_FE_ERRC_ENABLE); | 3141 | FDI_FE_ERRC_ENABLE); |
3149 | } | 3142 | } |
3150 | 3143 | ||
3151 | static bool pipe_has_enabled_pch(struct intel_crtc *crtc) | ||
3152 | { | ||
3153 | return crtc->base.state->enable && crtc->active && | ||
3154 | crtc->config->has_pch_encoder; | ||
3155 | } | ||
3156 | |||
3157 | static void ivb_modeset_global_resources(struct drm_device *dev) | ||
3158 | { | ||
3159 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3160 | struct intel_crtc *pipe_B_crtc = | ||
3161 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); | ||
3162 | struct intel_crtc *pipe_C_crtc = | ||
3163 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); | ||
3164 | uint32_t temp; | ||
3165 | |||
3166 | /* | ||
3167 | * When everything is off disable fdi C so that we could enable fdi B | ||
3168 | * with all lanes. Note that we don't care about enabled pipes without | ||
3169 | * an enabled pch encoder. | ||
3170 | */ | ||
3171 | if (!pipe_has_enabled_pch(pipe_B_crtc) && | ||
3172 | !pipe_has_enabled_pch(pipe_C_crtc)) { | ||
3173 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); | ||
3174 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); | ||
3175 | |||
3176 | temp = I915_READ(SOUTH_CHICKEN1); | ||
3177 | temp &= ~FDI_BC_BIFURCATION_SELECT; | ||
3178 | DRM_DEBUG_KMS("disabling fdi C rx\n"); | ||
3179 | I915_WRITE(SOUTH_CHICKEN1, temp); | ||
3180 | } | ||
3181 | } | ||
3182 | |||
3183 | /* The FDI link training functions for ILK/Ibexpeak. */ | 3144 | /* The FDI link training functions for ILK/Ibexpeak. */ |
3184 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 3145 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
3185 | { | 3146 | { |
@@ -3835,20 +3796,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, | |||
3835 | I915_READ(VSYNCSHIFT(cpu_transcoder))); | 3796 | I915_READ(VSYNCSHIFT(cpu_transcoder))); |
3836 | } | 3797 | } |
3837 | 3798 | ||
3838 | static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) | 3799 | static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) |
3839 | { | 3800 | { |
3840 | struct drm_i915_private *dev_priv = dev->dev_private; | 3801 | struct drm_i915_private *dev_priv = dev->dev_private; |
3841 | uint32_t temp; | 3802 | uint32_t temp; |
3842 | 3803 | ||
3843 | temp = I915_READ(SOUTH_CHICKEN1); | 3804 | temp = I915_READ(SOUTH_CHICKEN1); |
3844 | if (temp & FDI_BC_BIFURCATION_SELECT) | 3805 | if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) |
3845 | return; | 3806 | return; |
3846 | 3807 | ||
3847 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); | 3808 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
3848 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); | 3809 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
3849 | 3810 | ||
3850 | temp |= FDI_BC_BIFURCATION_SELECT; | 3811 | temp &= ~FDI_BC_BIFURCATION_SELECT; |
3851 | DRM_DEBUG_KMS("enabling fdi C rx\n"); | 3812 | if (enable) |
3813 | temp |= FDI_BC_BIFURCATION_SELECT; | ||
3814 | |||
3815 | DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); | ||
3852 | I915_WRITE(SOUTH_CHICKEN1, temp); | 3816 | I915_WRITE(SOUTH_CHICKEN1, temp); |
3853 | POSTING_READ(SOUTH_CHICKEN1); | 3817 | POSTING_READ(SOUTH_CHICKEN1); |
3854 | } | 3818 | } |
@@ -3856,20 +3820,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) | |||
3856 | static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) | 3820 | static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) |
3857 | { | 3821 | { |
3858 | struct drm_device *dev = intel_crtc->base.dev; | 3822 | struct drm_device *dev = intel_crtc->base.dev; |
3859 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3860 | 3823 | ||
3861 | switch (intel_crtc->pipe) { | 3824 | switch (intel_crtc->pipe) { |
3862 | case PIPE_A: | 3825 | case PIPE_A: |
3863 | break; | 3826 | break; |
3864 | case PIPE_B: | 3827 | case PIPE_B: |
3865 | if (intel_crtc->config->fdi_lanes > 2) | 3828 | if (intel_crtc->config->fdi_lanes > 2) |
3866 | WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); | 3829 | cpt_set_fdi_bc_bifurcation(dev, false); |
3867 | else | 3830 | else |
3868 | cpt_enable_fdi_bc_bifurcation(dev); | 3831 | cpt_set_fdi_bc_bifurcation(dev, true); |
3869 | 3832 | ||
3870 | break; | 3833 | break; |
3871 | case PIPE_C: | 3834 | case PIPE_C: |
3872 | cpt_enable_fdi_bc_bifurcation(dev); | 3835 | cpt_set_fdi_bc_bifurcation(dev, true); |
3873 | 3836 | ||
3874 | break; | 3837 | break; |
3875 | default: | 3838 | default: |
@@ -4204,6 +4167,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc) | |||
4204 | } | 4167 | } |
4205 | } | 4168 | } |
4206 | 4169 | ||
4170 | /* | ||
4171 | * Disable a plane internally without actually modifying the plane's state. | ||
4172 | * This will allow us to easily restore the plane later by just reprogramming | ||
4173 | * its state. | ||
4174 | */ | ||
4175 | static void disable_plane_internal(struct drm_plane *plane) | ||
4176 | { | ||
4177 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
4178 | struct drm_plane_state *state = | ||
4179 | plane->funcs->atomic_duplicate_state(plane); | ||
4180 | struct intel_plane_state *intel_state = to_intel_plane_state(state); | ||
4181 | |||
4182 | intel_state->visible = false; | ||
4183 | intel_plane->commit_plane(plane, intel_state); | ||
4184 | |||
4185 | intel_plane_destroy_state(plane, state); | ||
4186 | } | ||
4187 | |||
4207 | static void intel_disable_sprite_planes(struct drm_crtc *crtc) | 4188 | static void intel_disable_sprite_planes(struct drm_crtc *crtc) |
4208 | { | 4189 | { |
4209 | struct drm_device *dev = crtc->dev; | 4190 | struct drm_device *dev = crtc->dev; |
@@ -4213,8 +4194,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc) | |||
4213 | 4194 | ||
4214 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { | 4195 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { |
4215 | intel_plane = to_intel_plane(plane); | 4196 | intel_plane = to_intel_plane(plane); |
4216 | if (intel_plane->pipe == pipe) | 4197 | if (plane->fb && intel_plane->pipe == pipe) |
4217 | plane->funcs->disable_plane(plane); | 4198 | disable_plane_internal(plane); |
4218 | } | 4199 | } |
4219 | } | 4200 | } |
4220 | 4201 | ||
@@ -4983,24 +4964,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) | |||
4983 | WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); | 4964 | WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); |
4984 | 4965 | ||
4985 | switch (cdclk) { | 4966 | switch (cdclk) { |
4986 | case 400000: | ||
4987 | cmd = 3; | ||
4988 | break; | ||
4989 | case 333333: | 4967 | case 333333: |
4990 | case 320000: | 4968 | case 320000: |
4991 | cmd = 2; | ||
4992 | break; | ||
4993 | case 266667: | 4969 | case 266667: |
4994 | cmd = 1; | ||
4995 | break; | ||
4996 | case 200000: | 4970 | case 200000: |
4997 | cmd = 0; | ||
4998 | break; | 4971 | break; |
4999 | default: | 4972 | default: |
5000 | MISSING_CASE(cdclk); | 4973 | MISSING_CASE(cdclk); |
5001 | return; | 4974 | return; |
5002 | } | 4975 | } |
5003 | 4976 | ||
4977 | /* | ||
4978 | * Specs are full of misinformation, but testing on actual | ||
4979 | * hardware has shown that we just need to write the desired | ||
4980 | * CCK divider into the Punit register. | ||
4981 | */ | ||
4982 | cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; | ||
4983 | |||
5004 | mutex_lock(&dev_priv->rps.hw_lock); | 4984 | mutex_lock(&dev_priv->rps.hw_lock); |
5005 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | 4985 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); |
5006 | val &= ~DSPFREQGUAR_MASK_CHV; | 4986 | val &= ~DSPFREQGUAR_MASK_CHV; |
@@ -5020,27 +5000,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, | |||
5020 | int max_pixclk) | 5000 | int max_pixclk) |
5021 | { | 5001 | { |
5022 | int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; | 5002 | int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; |
5023 | 5003 | int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; | |
5024 | /* FIXME: Punit isn't quite ready yet */ | ||
5025 | if (IS_CHERRYVIEW(dev_priv->dev)) | ||
5026 | return 400000; | ||
5027 | 5004 | ||
5028 | /* | 5005 | /* |
5029 | * Really only a few cases to deal with, as only 4 CDclks are supported: | 5006 | * Really only a few cases to deal with, as only 4 CDclks are supported: |
5030 | * 200MHz | 5007 | * 200MHz |
5031 | * 267MHz | 5008 | * 267MHz |
5032 | * 320/333MHz (depends on HPLL freq) | 5009 | * 320/333MHz (depends on HPLL freq) |
5033 | * 400MHz | 5010 | * 400MHz (VLV only) |
5034 | * So we check to see whether we're above 90% of the lower bin and | 5011 | * So we check to see whether we're above 90% (VLV) or 95% (CHV) |
5035 | * adjust if needed. | 5012 | * of the lower bin and adjust if needed. |
5036 | * | 5013 | * |
5037 | * We seem to get an unstable or solid color picture at 200MHz. | 5014 | * We seem to get an unstable or solid color picture at 200MHz. |
5038 | * Not sure what's wrong. For now use 200MHz only when all pipes | 5015 | * Not sure what's wrong. For now use 200MHz only when all pipes |
5039 | * are off. | 5016 | * are off. |
5040 | */ | 5017 | */ |
5041 | if (max_pixclk > freq_320*9/10) | 5018 | if (!IS_CHERRYVIEW(dev_priv) && |
5019 | max_pixclk > freq_320*limit/100) | ||
5042 | return 400000; | 5020 | return 400000; |
5043 | else if (max_pixclk > 266667*9/10) | 5021 | else if (max_pixclk > 266667*limit/100) |
5044 | return freq_320; | 5022 | return freq_320; |
5045 | else if (max_pixclk > 0) | 5023 | else if (max_pixclk > 0) |
5046 | return 266667; | 5024 | return 266667; |
@@ -5081,6 +5059,42 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev, | |||
5081 | *prepare_pipes |= (1 << intel_crtc->pipe); | 5059 | *prepare_pipes |= (1 << intel_crtc->pipe); |
5082 | } | 5060 | } |
5083 | 5061 | ||
5062 | static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) | ||
5063 | { | ||
5064 | unsigned int credits, default_credits; | ||
5065 | |||
5066 | if (IS_CHERRYVIEW(dev_priv)) | ||
5067 | default_credits = PFI_CREDIT(12); | ||
5068 | else | ||
5069 | default_credits = PFI_CREDIT(8); | ||
5070 | |||
5071 | if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { | ||
5072 | /* CHV suggested value is 31 or 63 */ | ||
5073 | if (IS_CHERRYVIEW(dev_priv)) | ||
5074 | credits = PFI_CREDIT_31; | ||
5075 | else | ||
5076 | credits = PFI_CREDIT(15); | ||
5077 | } else { | ||
5078 | credits = default_credits; | ||
5079 | } | ||
5080 | |||
5081 | /* | ||
5082 | * WA - write default credits before re-programming | ||
5083 | * FIXME: should we also set the resend bit here? | ||
5084 | */ | ||
5085 | I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | | ||
5086 | default_credits); | ||
5087 | |||
5088 | I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | | ||
5089 | credits | PFI_CREDIT_RESEND); | ||
5090 | |||
5091 | /* | ||
5092 | * FIXME is this guaranteed to clear | ||
5093 | * immediately or should we poll for it? | ||
5094 | */ | ||
5095 | WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); | ||
5096 | } | ||
5097 | |||
5084 | static void valleyview_modeset_global_resources(struct drm_device *dev) | 5098 | static void valleyview_modeset_global_resources(struct drm_device *dev) |
5085 | { | 5099 | { |
5086 | struct drm_i915_private *dev_priv = dev->dev_private; | 5100 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -5104,6 +5118,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev) | |||
5104 | else | 5118 | else |
5105 | valleyview_set_cdclk(dev, req_cdclk); | 5119 | valleyview_set_cdclk(dev, req_cdclk); |
5106 | 5120 | ||
5121 | vlv_program_pfi_credits(dev_priv); | ||
5122 | |||
5107 | intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); | 5123 | intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); |
5108 | } | 5124 | } |
5109 | } | 5125 | } |
@@ -5517,13 +5533,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector) | |||
5517 | return encoder->get_hw_state(encoder, &pipe); | 5533 | return encoder->get_hw_state(encoder, &pipe); |
5518 | } | 5534 | } |
5519 | 5535 | ||
5536 | static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe) | ||
5537 | { | ||
5538 | struct intel_crtc *crtc = | ||
5539 | to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); | ||
5540 | |||
5541 | if (crtc->base.state->enable && | ||
5542 | crtc->config->has_pch_encoder) | ||
5543 | return crtc->config->fdi_lanes; | ||
5544 | |||
5545 | return 0; | ||
5546 | } | ||
5547 | |||
5520 | static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, | 5548 | static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, |
5521 | struct intel_crtc_state *pipe_config) | 5549 | struct intel_crtc_state *pipe_config) |
5522 | { | 5550 | { |
5523 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5524 | struct intel_crtc *pipe_B_crtc = | ||
5525 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); | ||
5526 | |||
5527 | DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", | 5551 | DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", |
5528 | pipe_name(pipe), pipe_config->fdi_lanes); | 5552 | pipe_name(pipe), pipe_config->fdi_lanes); |
5529 | if (pipe_config->fdi_lanes > 4) { | 5553 | if (pipe_config->fdi_lanes > 4) { |
@@ -5550,22 +5574,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, | |||
5550 | case PIPE_A: | 5574 | case PIPE_A: |
5551 | return true; | 5575 | return true; |
5552 | case PIPE_B: | 5576 | case PIPE_B: |
5553 | if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && | 5577 | if (pipe_config->fdi_lanes > 2 && |
5554 | pipe_config->fdi_lanes > 2) { | 5578 | pipe_required_fdi_lanes(dev, PIPE_C) > 0) { |
5555 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", | 5579 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", |
5556 | pipe_name(pipe), pipe_config->fdi_lanes); | 5580 | pipe_name(pipe), pipe_config->fdi_lanes); |
5557 | return false; | 5581 | return false; |
5558 | } | 5582 | } |
5559 | return true; | 5583 | return true; |
5560 | case PIPE_C: | 5584 | case PIPE_C: |
5561 | if (!pipe_has_enabled_pch(pipe_B_crtc) || | 5585 | if (pipe_config->fdi_lanes > 2) { |
5562 | pipe_B_crtc->config->fdi_lanes <= 2) { | 5586 | DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", |
5563 | if (pipe_config->fdi_lanes > 2) { | 5587 | pipe_name(pipe), pipe_config->fdi_lanes); |
5564 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", | 5588 | return false; |
5565 | pipe_name(pipe), pipe_config->fdi_lanes); | 5589 | } |
5566 | return false; | 5590 | if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) { |
5567 | } | ||
5568 | } else { | ||
5569 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); | 5591 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); |
5570 | return false; | 5592 | return false; |
5571 | } | 5593 | } |
@@ -5699,10 +5721,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev) | |||
5699 | u32 val; | 5721 | u32 val; |
5700 | int divider; | 5722 | int divider; |
5701 | 5723 | ||
5702 | /* FIXME: Punit isn't quite ready yet */ | ||
5703 | if (IS_CHERRYVIEW(dev)) | ||
5704 | return 400000; | ||
5705 | |||
5706 | if (dev_priv->hpll_freq == 0) | 5724 | if (dev_priv->hpll_freq == 0) |
5707 | dev_priv->hpll_freq = valleyview_get_vco(dev_priv); | 5725 | dev_priv->hpll_freq = valleyview_get_vco(dev_priv); |
5708 | 5726 | ||
@@ -6144,9 +6162,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
6144 | int pipe = crtc->pipe; | 6162 | int pipe = crtc->pipe; |
6145 | int dpll_reg = DPLL(crtc->pipe); | 6163 | int dpll_reg = DPLL(crtc->pipe); |
6146 | enum dpio_channel port = vlv_pipe_to_channel(pipe); | 6164 | enum dpio_channel port = vlv_pipe_to_channel(pipe); |
6147 | u32 loopfilter, intcoeff; | 6165 | u32 loopfilter, tribuf_calcntr; |
6148 | u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; | 6166 | u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; |
6149 | int refclk; | 6167 | u32 dpio_val; |
6168 | int vco; | ||
6150 | 6169 | ||
6151 | bestn = pipe_config->dpll.n; | 6170 | bestn = pipe_config->dpll.n; |
6152 | bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; | 6171 | bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; |
@@ -6154,6 +6173,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
6154 | bestm2 = pipe_config->dpll.m2 >> 22; | 6173 | bestm2 = pipe_config->dpll.m2 >> 22; |
6155 | bestp1 = pipe_config->dpll.p1; | 6174 | bestp1 = pipe_config->dpll.p1; |
6156 | bestp2 = pipe_config->dpll.p2; | 6175 | bestp2 = pipe_config->dpll.p2; |
6176 | vco = pipe_config->dpll.vco; | ||
6177 | dpio_val = 0; | ||
6178 | loopfilter = 0; | ||
6157 | 6179 | ||
6158 | /* | 6180 | /* |
6159 | * Enable Refclk and SSC | 6181 | * Enable Refclk and SSC |
@@ -6179,26 +6201,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc, | |||
6179 | 1 << DPIO_CHV_N_DIV_SHIFT); | 6201 | 1 << DPIO_CHV_N_DIV_SHIFT); |
6180 | 6202 | ||
6181 | /* M2 fraction division */ | 6203 | /* M2 fraction division */ |
6182 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); | 6204 | if (bestm2_frac) |
6205 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); | ||
6183 | 6206 | ||
6184 | /* M2 fraction division enable */ | 6207 | /* M2 fraction division enable */ |
6185 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), | 6208 | dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); |
6186 | DPIO_CHV_FRAC_DIV_EN | | 6209 | dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); |
6187 | (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); | 6210 | dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); |
6211 | if (bestm2_frac) | ||
6212 | dpio_val |= DPIO_CHV_FRAC_DIV_EN; | ||
6213 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); | ||
6214 | |||
6215 | /* Program digital lock detect threshold */ | ||
6216 | dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); | ||
6217 | dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | | ||
6218 | DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); | ||
6219 | dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); | ||
6220 | if (!bestm2_frac) | ||
6221 | dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; | ||
6222 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); | ||
6188 | 6223 | ||
6189 | /* Loop filter */ | 6224 | /* Loop filter */ |
6190 | refclk = i9xx_get_refclk(crtc, 0); | 6225 | if (vco == 5400000) { |
6191 | loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | | 6226 | loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); |
6192 | 2 << DPIO_CHV_GAIN_CTRL_SHIFT; | 6227 | loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); |
6193 | if (refclk == 100000) | 6228 | loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); |
6194 | intcoeff = 11; | 6229 | tribuf_calcntr = 0x9; |
6195 | else if (refclk == 38400) | 6230 | } else if (vco <= 6200000) { |
6196 | intcoeff = 10; | 6231 | loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); |
6197 | else | 6232 | loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); |
6198 | intcoeff = 9; | 6233 | loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); |
6199 | loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT; | 6234 | tribuf_calcntr = 0x9; |
6235 | } else if (vco <= 6480000) { | ||
6236 | loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); | ||
6237 | loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); | ||
6238 | loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); | ||
6239 | tribuf_calcntr = 0x8; | ||
6240 | } else { | ||
6241 | /* Not supported. Apply the same limits as in the max case */ | ||
6242 | loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); | ||
6243 | loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); | ||
6244 | loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); | ||
6245 | tribuf_calcntr = 0; | ||
6246 | } | ||
6200 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); | 6247 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); |
6201 | 6248 | ||
6249 | dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); | ||
6250 | dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; | ||
6251 | dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); | ||
6252 | vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); | ||
6253 | |||
6202 | /* AFC Recal */ | 6254 | /* AFC Recal */ |
6203 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), | 6255 | vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), |
6204 | vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | | 6256 | vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | |
@@ -8409,8 +8461,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
8409 | uint32_t cntl = 0, size = 0; | 8461 | uint32_t cntl = 0, size = 0; |
8410 | 8462 | ||
8411 | if (base) { | 8463 | if (base) { |
8412 | unsigned int width = intel_crtc->cursor_width; | 8464 | unsigned int width = intel_crtc->base.cursor->state->crtc_w; |
8413 | unsigned int height = intel_crtc->cursor_height; | 8465 | unsigned int height = intel_crtc->base.cursor->state->crtc_h; |
8414 | unsigned int stride = roundup_pow_of_two(width) * 4; | 8466 | unsigned int stride = roundup_pow_of_two(width) * 4; |
8415 | 8467 | ||
8416 | switch (stride) { | 8468 | switch (stride) { |
@@ -8474,7 +8526,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
8474 | cntl = 0; | 8526 | cntl = 0; |
8475 | if (base) { | 8527 | if (base) { |
8476 | cntl = MCURSOR_GAMMA_ENABLE; | 8528 | cntl = MCURSOR_GAMMA_ENABLE; |
8477 | switch (intel_crtc->cursor_width) { | 8529 | switch (intel_crtc->base.cursor->state->crtc_w) { |
8478 | case 64: | 8530 | case 64: |
8479 | cntl |= CURSOR_MODE_64_ARGB_AX; | 8531 | cntl |= CURSOR_MODE_64_ARGB_AX; |
8480 | break; | 8532 | break; |
@@ -8485,7 +8537,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
8485 | cntl |= CURSOR_MODE_256_ARGB_AX; | 8537 | cntl |= CURSOR_MODE_256_ARGB_AX; |
8486 | break; | 8538 | break; |
8487 | default: | 8539 | default: |
8488 | MISSING_CASE(intel_crtc->cursor_width); | 8540 | MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); |
8489 | return; | 8541 | return; |
8490 | } | 8542 | } |
8491 | cntl |= pipe << 28; /* Connect to correct pipe */ | 8543 | cntl |= pipe << 28; /* Connect to correct pipe */ |
@@ -8532,7 +8584,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
8532 | base = 0; | 8584 | base = 0; |
8533 | 8585 | ||
8534 | if (x < 0) { | 8586 | if (x < 0) { |
8535 | if (x + intel_crtc->cursor_width <= 0) | 8587 | if (x + intel_crtc->base.cursor->state->crtc_w <= 0) |
8536 | base = 0; | 8588 | base = 0; |
8537 | 8589 | ||
8538 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | 8590 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
@@ -8541,7 +8593,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
8541 | pos |= x << CURSOR_X_SHIFT; | 8593 | pos |= x << CURSOR_X_SHIFT; |
8542 | 8594 | ||
8543 | if (y < 0) { | 8595 | if (y < 0) { |
8544 | if (y + intel_crtc->cursor_height <= 0) | 8596 | if (y + intel_crtc->base.cursor->state->crtc_h <= 0) |
8545 | base = 0; | 8597 | base = 0; |
8546 | 8598 | ||
8547 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | 8599 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
@@ -8557,8 +8609,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
8557 | /* ILK+ do this automagically */ | 8609 | /* ILK+ do this automagically */ |
8558 | if (HAS_GMCH_DISPLAY(dev) && | 8610 | if (HAS_GMCH_DISPLAY(dev) && |
8559 | crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { | 8611 | crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { |
8560 | base += (intel_crtc->cursor_height * | 8612 | base += (intel_crtc->base.cursor->state->crtc_h * |
8561 | intel_crtc->cursor_width - 1) * 4; | 8613 | intel_crtc->base.cursor->state->crtc_w - 1) * 4; |
8562 | } | 8614 | } |
8563 | 8615 | ||
8564 | if (IS_845G(dev) || IS_I865G(dev)) | 8616 | if (IS_845G(dev) || IS_I865G(dev)) |
@@ -9219,7 +9271,6 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
9219 | mutex_lock(&dev->struct_mutex); | 9271 | mutex_lock(&dev->struct_mutex); |
9220 | intel_unpin_fb_obj(intel_fb_obj(work->old_fb)); | 9272 | intel_unpin_fb_obj(intel_fb_obj(work->old_fb)); |
9221 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 9273 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
9222 | drm_framebuffer_unreference(work->old_fb); | ||
9223 | 9274 | ||
9224 | intel_fbc_update(dev); | 9275 | intel_fbc_update(dev); |
9225 | 9276 | ||
@@ -9228,6 +9279,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
9228 | mutex_unlock(&dev->struct_mutex); | 9279 | mutex_unlock(&dev->struct_mutex); |
9229 | 9280 | ||
9230 | intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | 9281 | intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); |
9282 | drm_framebuffer_unreference(work->old_fb); | ||
9231 | 9283 | ||
9232 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); | 9284 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); |
9233 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); | 9285 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); |
@@ -9799,7 +9851,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) | |||
9799 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 9851 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
9800 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9852 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9801 | 9853 | ||
9802 | WARN_ON(!in_irq()); | 9854 | WARN_ON(!in_interrupt()); |
9803 | 9855 | ||
9804 | if (crtc == NULL) | 9856 | if (crtc == NULL) |
9805 | return; | 9857 | return; |
@@ -9891,10 +9943,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9891 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) | 9943 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
9892 | flush_workqueue(dev_priv->wq); | 9944 | flush_workqueue(dev_priv->wq); |
9893 | 9945 | ||
9894 | ret = i915_mutex_lock_interruptible(dev); | ||
9895 | if (ret) | ||
9896 | goto cleanup; | ||
9897 | |||
9898 | /* Reference the objects for the scheduled work. */ | 9946 | /* Reference the objects for the scheduled work. */ |
9899 | drm_framebuffer_reference(work->old_fb); | 9947 | drm_framebuffer_reference(work->old_fb); |
9900 | drm_gem_object_reference(&obj->base); | 9948 | drm_gem_object_reference(&obj->base); |
@@ -9904,6 +9952,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9904 | 9952 | ||
9905 | work->pending_flip_obj = obj; | 9953 | work->pending_flip_obj = obj; |
9906 | 9954 | ||
9955 | ret = i915_mutex_lock_interruptible(dev); | ||
9956 | if (ret) | ||
9957 | goto cleanup; | ||
9958 | |||
9907 | atomic_inc(&intel_crtc->unpin_work_count); | 9959 | atomic_inc(&intel_crtc->unpin_work_count); |
9908 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 9960 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
9909 | 9961 | ||
@@ -9968,13 +10020,14 @@ cleanup_unpin: | |||
9968 | intel_unpin_fb_obj(obj); | 10020 | intel_unpin_fb_obj(obj); |
9969 | cleanup_pending: | 10021 | cleanup_pending: |
9970 | atomic_dec(&intel_crtc->unpin_work_count); | 10022 | atomic_dec(&intel_crtc->unpin_work_count); |
10023 | mutex_unlock(&dev->struct_mutex); | ||
10024 | cleanup: | ||
9971 | crtc->primary->fb = old_fb; | 10025 | crtc->primary->fb = old_fb; |
9972 | update_state_fb(crtc->primary); | 10026 | update_state_fb(crtc->primary); |
10027 | |||
10028 | drm_gem_object_unreference_unlocked(&obj->base); | ||
9973 | drm_framebuffer_unreference(work->old_fb); | 10029 | drm_framebuffer_unreference(work->old_fb); |
9974 | drm_gem_object_unreference(&obj->base); | ||
9975 | mutex_unlock(&dev->struct_mutex); | ||
9976 | 10030 | ||
9977 | cleanup: | ||
9978 | spin_lock_irq(&dev->event_lock); | 10031 | spin_lock_irq(&dev->event_lock); |
9979 | intel_crtc->unpin_work = NULL; | 10032 | intel_crtc->unpin_work = NULL; |
9980 | spin_unlock_irq(&dev->event_lock); | 10033 | spin_unlock_irq(&dev->event_lock); |
@@ -10014,8 +10067,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) | |||
10014 | struct intel_encoder *encoder; | 10067 | struct intel_encoder *encoder; |
10015 | struct intel_connector *connector; | 10068 | struct intel_connector *connector; |
10016 | 10069 | ||
10017 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10070 | for_each_intel_connector(dev, connector) { |
10018 | base.head) { | ||
10019 | connector->new_encoder = | 10071 | connector->new_encoder = |
10020 | to_intel_encoder(connector->base.encoder); | 10072 | to_intel_encoder(connector->base.encoder); |
10021 | } | 10073 | } |
@@ -10046,8 +10098,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) | |||
10046 | struct intel_encoder *encoder; | 10098 | struct intel_encoder *encoder; |
10047 | struct intel_connector *connector; | 10099 | struct intel_connector *connector; |
10048 | 10100 | ||
10049 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10101 | for_each_intel_connector(dev, connector) { |
10050 | base.head) { | ||
10051 | connector->base.encoder = &connector->new_encoder->base; | 10102 | connector->base.encoder = &connector->new_encoder->base; |
10052 | } | 10103 | } |
10053 | 10104 | ||
@@ -10135,8 +10186,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, | |||
10135 | pipe_config->pipe_bpp = bpp; | 10186 | pipe_config->pipe_bpp = bpp; |
10136 | 10187 | ||
10137 | /* Clamp display bpp to EDID value */ | 10188 | /* Clamp display bpp to EDID value */ |
10138 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10189 | for_each_intel_connector(dev, connector) { |
10139 | base.head) { | ||
10140 | if (!connector->new_encoder || | 10190 | if (!connector->new_encoder || |
10141 | connector->new_encoder->new_crtc != crtc) | 10191 | connector->new_encoder->new_crtc != crtc) |
10142 | continue; | 10192 | continue; |
@@ -10263,8 +10313,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev) | |||
10263 | * list to detect the problem on ddi platforms | 10313 | * list to detect the problem on ddi platforms |
10264 | * where there's just one encoder per digital port. | 10314 | * where there's just one encoder per digital port. |
10265 | */ | 10315 | */ |
10266 | list_for_each_entry(connector, | 10316 | for_each_intel_connector(dev, connector) { |
10267 | &dev->mode_config.connector_list, base.head) { | ||
10268 | struct intel_encoder *encoder = connector->new_encoder; | 10317 | struct intel_encoder *encoder = connector->new_encoder; |
10269 | 10318 | ||
10270 | if (!encoder) | 10319 | if (!encoder) |
@@ -10437,8 +10486,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, | |||
10437 | * to be part of the prepare_pipes mask. We don't (yet) support global | 10486 | * to be part of the prepare_pipes mask. We don't (yet) support global |
10438 | * modeset across multiple crtcs, so modeset_pipes will only have one | 10487 | * modeset across multiple crtcs, so modeset_pipes will only have one |
10439 | * bit set at most. */ | 10488 | * bit set at most. */ |
10440 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10489 | for_each_intel_connector(dev, connector) { |
10441 | base.head) { | ||
10442 | if (connector->base.encoder == &connector->new_encoder->base) | 10490 | if (connector->base.encoder == &connector->new_encoder->base) |
10443 | continue; | 10491 | continue; |
10444 | 10492 | ||
@@ -10807,7 +10855,7 @@ static void check_wm_state(struct drm_device *dev) | |||
10807 | continue; | 10855 | continue; |
10808 | 10856 | ||
10809 | /* planes */ | 10857 | /* planes */ |
10810 | for_each_plane(pipe, plane) { | 10858 | for_each_plane(dev_priv, pipe, plane) { |
10811 | hw_entry = &hw_ddb.plane[pipe][plane]; | 10859 | hw_entry = &hw_ddb.plane[pipe][plane]; |
10812 | sw_entry = &sw_ddb->plane[pipe][plane]; | 10860 | sw_entry = &sw_ddb->plane[pipe][plane]; |
10813 | 10861 | ||
@@ -10841,8 +10889,7 @@ check_connector_state(struct drm_device *dev) | |||
10841 | { | 10889 | { |
10842 | struct intel_connector *connector; | 10890 | struct intel_connector *connector; |
10843 | 10891 | ||
10844 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10892 | for_each_intel_connector(dev, connector) { |
10845 | base.head) { | ||
10846 | /* This also checks the encoder/connector hw state with the | 10893 | /* This also checks the encoder/connector hw state with the |
10847 | * ->get_hw_state callbacks. */ | 10894 | * ->get_hw_state callbacks. */ |
10848 | intel_connector_check_state(connector); | 10895 | intel_connector_check_state(connector); |
@@ -10872,8 +10919,7 @@ check_encoder_state(struct drm_device *dev) | |||
10872 | I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, | 10919 | I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, |
10873 | "encoder's active_connectors set, but no crtc\n"); | 10920 | "encoder's active_connectors set, but no crtc\n"); |
10874 | 10921 | ||
10875 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10922 | for_each_intel_connector(dev, connector) { |
10876 | base.head) { | ||
10877 | if (connector->base.encoder != &encoder->base) | 10923 | if (connector->base.encoder != &encoder->base) |
10878 | continue; | 10924 | continue; |
10879 | enabled = true; | 10925 | enabled = true; |
@@ -11394,7 +11440,7 @@ static void intel_set_config_restore_state(struct drm_device *dev, | |||
11394 | } | 11440 | } |
11395 | 11441 | ||
11396 | count = 0; | 11442 | count = 0; |
11397 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { | 11443 | for_each_intel_connector(dev, connector) { |
11398 | connector->new_encoder = | 11444 | connector->new_encoder = |
11399 | to_intel_encoder(config->save_connector_encoders[count++]); | 11445 | to_intel_encoder(config->save_connector_encoders[count++]); |
11400 | } | 11446 | } |
@@ -11486,8 +11532,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
11486 | WARN_ON(!set->fb && (set->num_connectors != 0)); | 11532 | WARN_ON(!set->fb && (set->num_connectors != 0)); |
11487 | WARN_ON(set->fb && (set->num_connectors == 0)); | 11533 | WARN_ON(set->fb && (set->num_connectors == 0)); |
11488 | 11534 | ||
11489 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 11535 | for_each_intel_connector(dev, connector) { |
11490 | base.head) { | ||
11491 | /* Otherwise traverse passed in connector list and get encoders | 11536 | /* Otherwise traverse passed in connector list and get encoders |
11492 | * for them. */ | 11537 | * for them. */ |
11493 | for (ro = 0; ro < set->num_connectors; ro++) { | 11538 | for (ro = 0; ro < set->num_connectors; ro++) { |
@@ -11512,15 +11557,16 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
11512 | 11557 | ||
11513 | 11558 | ||
11514 | if (&connector->new_encoder->base != connector->base.encoder) { | 11559 | if (&connector->new_encoder->base != connector->base.encoder) { |
11515 | DRM_DEBUG_KMS("encoder changed, full mode switch\n"); | 11560 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n", |
11561 | connector->base.base.id, | ||
11562 | connector->base.name); | ||
11516 | config->mode_changed = true; | 11563 | config->mode_changed = true; |
11517 | } | 11564 | } |
11518 | } | 11565 | } |
11519 | /* connector->new_encoder is now updated for all connectors. */ | 11566 | /* connector->new_encoder is now updated for all connectors. */ |
11520 | 11567 | ||
11521 | /* Update crtc of enabled connectors. */ | 11568 | /* Update crtc of enabled connectors. */ |
11522 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 11569 | for_each_intel_connector(dev, connector) { |
11523 | base.head) { | ||
11524 | struct drm_crtc *new_crtc; | 11570 | struct drm_crtc *new_crtc; |
11525 | 11571 | ||
11526 | if (!connector->new_encoder) | 11572 | if (!connector->new_encoder) |
@@ -11549,9 +11595,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
11549 | /* Check for any encoders that needs to be disabled. */ | 11595 | /* Check for any encoders that needs to be disabled. */ |
11550 | for_each_intel_encoder(dev, encoder) { | 11596 | for_each_intel_encoder(dev, encoder) { |
11551 | int num_connectors = 0; | 11597 | int num_connectors = 0; |
11552 | list_for_each_entry(connector, | 11598 | for_each_intel_connector(dev, connector) { |
11553 | &dev->mode_config.connector_list, | ||
11554 | base.head) { | ||
11555 | if (connector->new_encoder == encoder) { | 11599 | if (connector->new_encoder == encoder) { |
11556 | WARN_ON(!connector->new_encoder->new_crtc); | 11600 | WARN_ON(!connector->new_encoder->new_crtc); |
11557 | num_connectors++; | 11601 | num_connectors++; |
@@ -11566,13 +11610,14 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
11566 | /* Only now check for crtc changes so we don't miss encoders | 11610 | /* Only now check for crtc changes so we don't miss encoders |
11567 | * that will be disabled. */ | 11611 | * that will be disabled. */ |
11568 | if (&encoder->new_crtc->base != encoder->base.crtc) { | 11612 | if (&encoder->new_crtc->base != encoder->base.crtc) { |
11569 | DRM_DEBUG_KMS("crtc changed, full mode switch\n"); | 11613 | DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n", |
11614 | encoder->base.base.id, | ||
11615 | encoder->base.name); | ||
11570 | config->mode_changed = true; | 11616 | config->mode_changed = true; |
11571 | } | 11617 | } |
11572 | } | 11618 | } |
11573 | /* Now we've also updated encoder->new_crtc for all encoders. */ | 11619 | /* Now we've also updated encoder->new_crtc for all encoders. */ |
11574 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 11620 | for_each_intel_connector(dev, connector) { |
11575 | base.head) { | ||
11576 | if (connector->new_encoder) | 11621 | if (connector->new_encoder) |
11577 | if (connector->new_encoder != connector->encoder) | 11622 | if (connector->new_encoder != connector->encoder) |
11578 | connector->encoder = connector->new_encoder; | 11623 | connector->encoder = connector->new_encoder; |
@@ -11588,7 +11633,8 @@ intel_modeset_stage_output_state(struct drm_device *dev, | |||
11588 | } | 11633 | } |
11589 | 11634 | ||
11590 | if (crtc->new_enabled != crtc->base.state->enable) { | 11635 | if (crtc->new_enabled != crtc->base.state->enable) { |
11591 | DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", | 11636 | DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n", |
11637 | crtc->base.base.id, | ||
11592 | crtc->new_enabled ? "en" : "dis"); | 11638 | crtc->new_enabled ? "en" : "dis"); |
11593 | config->mode_changed = true; | 11639 | config->mode_changed = true; |
11594 | } | 11640 | } |
@@ -11611,7 +11657,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc) | |||
11611 | DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", | 11657 | DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", |
11612 | pipe_name(crtc->pipe)); | 11658 | pipe_name(crtc->pipe)); |
11613 | 11659 | ||
11614 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { | 11660 | for_each_intel_connector(dev, connector) { |
11615 | if (connector->new_encoder && | 11661 | if (connector->new_encoder && |
11616 | connector->new_encoder->new_crtc == crtc) | 11662 | connector->new_encoder->new_crtc == crtc) |
11617 | connector->new_encoder = NULL; | 11663 | connector->new_encoder = NULL; |
@@ -12182,8 +12228,8 @@ void intel_plane_destroy(struct drm_plane *plane) | |||
12182 | } | 12228 | } |
12183 | 12229 | ||
12184 | const struct drm_plane_funcs intel_plane_funcs = { | 12230 | const struct drm_plane_funcs intel_plane_funcs = { |
12185 | .update_plane = drm_atomic_helper_update_plane, | 12231 | .update_plane = drm_plane_helper_update, |
12186 | .disable_plane = drm_atomic_helper_disable_plane, | 12232 | .disable_plane = drm_plane_helper_disable, |
12187 | .destroy = intel_plane_destroy, | 12233 | .destroy = intel_plane_destroy, |
12188 | .set_property = drm_atomic_helper_plane_set_property, | 12234 | .set_property = drm_atomic_helper_plane_set_property, |
12189 | .atomic_get_property = intel_plane_atomic_get_property, | 12235 | .atomic_get_property = intel_plane_atomic_get_property, |
@@ -12302,7 +12348,7 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
12302 | 12348 | ||
12303 | finish: | 12349 | finish: |
12304 | if (intel_crtc->active) { | 12350 | if (intel_crtc->active) { |
12305 | if (intel_crtc->cursor_width != state->base.crtc_w) | 12351 | if (plane->state->crtc_w != state->base.crtc_w) |
12306 | intel_crtc->atomic.update_wm = true; | 12352 | intel_crtc->atomic.update_wm = true; |
12307 | 12353 | ||
12308 | intel_crtc->atomic.fb_bits |= | 12354 | intel_crtc->atomic.fb_bits |= |
@@ -12345,8 +12391,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
12345 | intel_crtc->cursor_addr = addr; | 12391 | intel_crtc->cursor_addr = addr; |
12346 | intel_crtc->cursor_bo = obj; | 12392 | intel_crtc->cursor_bo = obj; |
12347 | update: | 12393 | update: |
12348 | intel_crtc->cursor_width = state->base.crtc_w; | ||
12349 | intel_crtc->cursor_height = state->base.crtc_h; | ||
12350 | 12394 | ||
12351 | if (intel_crtc->active) | 12395 | if (intel_crtc->active) |
12352 | intel_crtc_update_cursor(crtc, state->visible); | 12396 | intel_crtc_update_cursor(crtc, state->visible); |
@@ -12574,10 +12618,15 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
12574 | if (HAS_DDI(dev)) { | 12618 | if (HAS_DDI(dev)) { |
12575 | int found; | 12619 | int found; |
12576 | 12620 | ||
12577 | /* Haswell uses DDI functions to detect digital outputs */ | 12621 | /* |
12622 | * Haswell uses DDI functions to detect digital outputs. | ||
12623 | * On SKL pre-D0 the strap isn't connected, so we assume | ||
12624 | * it's there. | ||
12625 | */ | ||
12578 | found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; | 12626 | found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; |
12579 | /* DDI A only supports eDP */ | 12627 | /* WaIgnoreDDIAStrap: skl */ |
12580 | if (found) | 12628 | if (found || |
12629 | (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0)) | ||
12581 | intel_ddi_init(dev, PORT_A); | 12630 | intel_ddi_init(dev, PORT_A); |
12582 | 12631 | ||
12583 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP | 12632 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP |
@@ -13068,8 +13117,6 @@ static void intel_init_display(struct drm_device *dev) | |||
13068 | } else if (IS_IVYBRIDGE(dev)) { | 13117 | } else if (IS_IVYBRIDGE(dev)) { |
13069 | /* FIXME: detect B0+ stepping and use auto training */ | 13118 | /* FIXME: detect B0+ stepping and use auto training */ |
13070 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | 13119 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
13071 | dev_priv->display.modeset_global_resources = | ||
13072 | ivb_modeset_global_resources; | ||
13073 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 13120 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
13074 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; | 13121 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
13075 | } else if (IS_VALLEYVIEW(dev)) { | 13122 | } else if (IS_VALLEYVIEW(dev)) { |
@@ -13365,7 +13412,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
13365 | 13412 | ||
13366 | for_each_pipe(dev_priv, pipe) { | 13413 | for_each_pipe(dev_priv, pipe) { |
13367 | intel_crtc_init(dev, pipe); | 13414 | intel_crtc_init(dev, pipe); |
13368 | for_each_sprite(pipe, sprite) { | 13415 | for_each_sprite(dev_priv, pipe, sprite) { |
13369 | ret = intel_plane_init(dev, pipe, sprite); | 13416 | ret = intel_plane_init(dev, pipe, sprite); |
13370 | if (ret) | 13417 | if (ret) |
13371 | DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", | 13418 | DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", |
@@ -13421,9 +13468,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) | |||
13421 | /* We can't just switch on the pipe A, we need to set things up with a | 13468 | /* We can't just switch on the pipe A, we need to set things up with a |
13422 | * proper mode and output configuration. As a gross hack, enable pipe A | 13469 | * proper mode and output configuration. As a gross hack, enable pipe A |
13423 | * by enabling the load detect pipe once. */ | 13470 | * by enabling the load detect pipe once. */ |
13424 | list_for_each_entry(connector, | 13471 | for_each_intel_connector(dev, connector) { |
13425 | &dev->mode_config.connector_list, | ||
13426 | base.head) { | ||
13427 | if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { | 13472 | if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { |
13428 | crt = &connector->base; | 13473 | crt = &connector->base; |
13429 | break; | 13474 | break; |
@@ -13494,8 +13539,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
13494 | crtc->plane = plane; | 13539 | crtc->plane = plane; |
13495 | 13540 | ||
13496 | /* ... and break all links. */ | 13541 | /* ... and break all links. */ |
13497 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 13542 | for_each_intel_connector(dev, connector) { |
13498 | base.head) { | ||
13499 | if (connector->encoder->base.crtc != &crtc->base) | 13543 | if (connector->encoder->base.crtc != &crtc->base) |
13500 | continue; | 13544 | continue; |
13501 | 13545 | ||
@@ -13504,8 +13548,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
13504 | } | 13548 | } |
13505 | /* multiple connectors may have the same encoder: | 13549 | /* multiple connectors may have the same encoder: |
13506 | * handle them and break crtc link separately */ | 13550 | * handle them and break crtc link separately */ |
13507 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 13551 | for_each_intel_connector(dev, connector) |
13508 | base.head) | ||
13509 | if (connector->encoder->base.crtc == &crtc->base) { | 13552 | if (connector->encoder->base.crtc == &crtc->base) { |
13510 | connector->encoder->base.crtc = NULL; | 13553 | connector->encoder->base.crtc = NULL; |
13511 | connector->encoder->connectors_active = false; | 13554 | connector->encoder->connectors_active = false; |
@@ -13609,9 +13652,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
13609 | * a bug in one of the get_hw_state functions. Or someplace else | 13652 | * a bug in one of the get_hw_state functions. Or someplace else |
13610 | * in our code, like the register restore mess on resume. Clamp | 13653 | * in our code, like the register restore mess on resume. Clamp |
13611 | * things to off as a safer default. */ | 13654 | * things to off as a safer default. */ |
13612 | list_for_each_entry(connector, | 13655 | for_each_intel_connector(dev, connector) { |
13613 | &dev->mode_config.connector_list, | ||
13614 | base.head) { | ||
13615 | if (connector->encoder != encoder) | 13656 | if (connector->encoder != encoder) |
13616 | continue; | 13657 | continue; |
13617 | connector->base.dpms = DRM_MODE_DPMS_OFF; | 13658 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
@@ -13726,8 +13767,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
13726 | pipe_name(pipe)); | 13767 | pipe_name(pipe)); |
13727 | } | 13768 | } |
13728 | 13769 | ||
13729 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 13770 | for_each_intel_connector(dev, connector) { |
13730 | base.head) { | ||
13731 | if (connector->get_hw_state(connector)) { | 13771 | if (connector->get_hw_state(connector)) { |
13732 | connector->base.dpms = DRM_MODE_DPMS_ON; | 13772 | connector->base.dpms = DRM_MODE_DPMS_ON; |
13733 | connector->encoder->connectors_active = true; | 13773 | connector->encoder->connectors_active = true; |
@@ -13907,8 +13947,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
13907 | 13947 | ||
13908 | intel_fbc_disable(dev); | 13948 | intel_fbc_disable(dev); |
13909 | 13949 | ||
13910 | ironlake_teardown_rc6(dev); | ||
13911 | |||
13912 | mutex_unlock(&dev->struct_mutex); | 13950 | mutex_unlock(&dev->struct_mutex); |
13913 | 13951 | ||
13914 | /* flush any delayed tasks or pending work */ | 13952 | /* flush any delayed tasks or pending work */ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d1141d37e205..ca60060710d2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -84,6 +84,11 @@ static const struct dp_link_dpll chv_dpll[] = { | |||
84 | { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */ | 84 | { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */ |
85 | { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } | 85 | { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } |
86 | }; | 86 | }; |
87 | /* Skylake supports following rates */ | ||
88 | static const uint32_t gen9_rates[] = { 162000, 216000, 270000, 324000, | ||
89 | 432000, 540000 }; | ||
90 | |||
91 | static const uint32_t default_rates[] = { 162000, 270000, 540000 }; | ||
87 | 92 | ||
88 | /** | 93 | /** |
89 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) | 94 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
@@ -129,7 +134,10 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) | |||
129 | case DP_LINK_BW_2_7: | 134 | case DP_LINK_BW_2_7: |
130 | break; | 135 | break; |
131 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ | 136 | case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ |
132 | if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || | 137 | if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) |
138 | /* WaDisableHBR2:skl */ | ||
139 | max_link_bw = DP_LINK_BW_2_7; | ||
140 | else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || | ||
133 | INTEL_INFO(dev)->gen >= 8) && | 141 | INTEL_INFO(dev)->gen >= 8) && |
134 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) | 142 | intel_dp->dpcd[DP_DPCD_REV] >= 0x12) |
135 | max_link_bw = DP_LINK_BW_5_4; | 143 | max_link_bw = DP_LINK_BW_5_4; |
@@ -1075,7 +1083,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) | |||
1075 | } | 1083 | } |
1076 | 1084 | ||
1077 | static void | 1085 | static void |
1078 | skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw) | 1086 | skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) |
1079 | { | 1087 | { |
1080 | u32 ctrl1; | 1088 | u32 ctrl1; |
1081 | 1089 | ||
@@ -1084,19 +1092,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw) | |||
1084 | pipe_config->dpll_hw_state.cfgcr2 = 0; | 1092 | pipe_config->dpll_hw_state.cfgcr2 = 0; |
1085 | 1093 | ||
1086 | ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); | 1094 | ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); |
1087 | switch (link_bw) { | 1095 | switch (link_clock / 2) { |
1088 | case DP_LINK_BW_1_62: | 1096 | case 81000: |
1089 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, | 1097 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, |
1090 | SKL_DPLL0); | 1098 | SKL_DPLL0); |
1091 | break; | 1099 | break; |
1092 | case DP_LINK_BW_2_7: | 1100 | case 135000: |
1093 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, | 1101 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, |
1094 | SKL_DPLL0); | 1102 | SKL_DPLL0); |
1095 | break; | 1103 | break; |
1096 | case DP_LINK_BW_5_4: | 1104 | case 270000: |
1097 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, | 1105 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, |
1098 | SKL_DPLL0); | 1106 | SKL_DPLL0); |
1099 | break; | 1107 | break; |
1108 | case 162000: | ||
1109 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620, | ||
1110 | SKL_DPLL0); | ||
1111 | break; | ||
1112 | /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which | ||
1113 | results in CDCLK change. Need to handle the change of CDCLK by | ||
1114 | disabling pipes and re-enabling them */ | ||
1115 | case 108000: | ||
1116 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080, | ||
1117 | SKL_DPLL0); | ||
1118 | break; | ||
1119 | case 216000: | ||
1120 | ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160, | ||
1121 | SKL_DPLL0); | ||
1122 | break; | ||
1123 | |||
1100 | } | 1124 | } |
1101 | pipe_config->dpll_hw_state.ctrl1 = ctrl1; | 1125 | pipe_config->dpll_hw_state.ctrl1 = ctrl1; |
1102 | } | 1126 | } |
@@ -1117,6 +1141,52 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) | |||
1117 | } | 1141 | } |
1118 | } | 1142 | } |
1119 | 1143 | ||
1144 | static int | ||
1145 | intel_read_sink_rates(struct intel_dp *intel_dp, uint32_t *sink_rates) | ||
1146 | { | ||
1147 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1148 | int i = 0; | ||
1149 | uint16_t val; | ||
1150 | |||
1151 | if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { | ||
1152 | /* | ||
1153 | * Receiver supports only main-link rate selection by | ||
1154 | * link rate table method, so read link rates from | ||
1155 | * supported_link_rates | ||
1156 | */ | ||
1157 | for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) { | ||
1158 | val = le16_to_cpu(intel_dp->supported_rates[i]); | ||
1159 | if (val == 0) | ||
1160 | break; | ||
1161 | |||
1162 | sink_rates[i] = val * 200; | ||
1163 | } | ||
1164 | |||
1165 | if (i <= 0) | ||
1166 | DRM_ERROR("No rates in SUPPORTED_LINK_RATES"); | ||
1167 | } | ||
1168 | return i; | ||
1169 | } | ||
1170 | |||
1171 | static int | ||
1172 | intel_read_source_rates(struct intel_dp *intel_dp, uint32_t *source_rates) | ||
1173 | { | ||
1174 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1175 | int i; | ||
1176 | int max_default_rate; | ||
1177 | |||
1178 | if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { | ||
1179 | for (i = 0; i < ARRAY_SIZE(gen9_rates); ++i) | ||
1180 | source_rates[i] = gen9_rates[i]; | ||
1181 | } else { | ||
1182 | /* Index of the max_link_bw supported + 1 */ | ||
1183 | max_default_rate = (intel_dp_max_link_bw(intel_dp) >> 3) + 1; | ||
1184 | for (i = 0; i < max_default_rate; ++i) | ||
1185 | source_rates[i] = default_rates[i]; | ||
1186 | } | ||
1187 | return i; | ||
1188 | } | ||
1189 | |||
1120 | static void | 1190 | static void |
1121 | intel_dp_set_clock(struct intel_encoder *encoder, | 1191 | intel_dp_set_clock(struct intel_encoder *encoder, |
1122 | struct intel_crtc_state *pipe_config, int link_bw) | 1192 | struct intel_crtc_state *pipe_config, int link_bw) |
@@ -1150,6 +1220,45 @@ intel_dp_set_clock(struct intel_encoder *encoder, | |||
1150 | } | 1220 | } |
1151 | } | 1221 | } |
1152 | 1222 | ||
1223 | static int intel_supported_rates(const uint32_t *source_rates, int source_len, | ||
1224 | const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates) | ||
1225 | { | ||
1226 | int i = 0, j = 0, k = 0; | ||
1227 | |||
1228 | /* For panels with edp version less than 1.4 */ | ||
1229 | if (sink_len == 0) { | ||
1230 | for (i = 0; i < source_len; ++i) | ||
1231 | supported_rates[i] = source_rates[i]; | ||
1232 | return source_len; | ||
1233 | } | ||
1234 | |||
1235 | /* For edp1.4 panels, find the common rates between source and sink */ | ||
1236 | while (i < source_len && j < sink_len) { | ||
1237 | if (source_rates[i] == sink_rates[j]) { | ||
1238 | supported_rates[k] = source_rates[i]; | ||
1239 | ++k; | ||
1240 | ++i; | ||
1241 | ++j; | ||
1242 | } else if (source_rates[i] < sink_rates[j]) { | ||
1243 | ++i; | ||
1244 | } else { | ||
1245 | ++j; | ||
1246 | } | ||
1247 | } | ||
1248 | return k; | ||
1249 | } | ||
1250 | |||
1251 | static int rate_to_index(uint32_t find, const uint32_t *rates) | ||
1252 | { | ||
1253 | int i = 0; | ||
1254 | |||
1255 | for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) | ||
1256 | if (find == rates[i]) | ||
1257 | break; | ||
1258 | |||
1259 | return i; | ||
1260 | } | ||
1261 | |||
1153 | bool | 1262 | bool |
1154 | intel_dp_compute_config(struct intel_encoder *encoder, | 1263 | intel_dp_compute_config(struct intel_encoder *encoder, |
1155 | struct intel_crtc_state *pipe_config) | 1264 | struct intel_crtc_state *pipe_config) |
@@ -1166,10 +1275,25 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1166 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | 1275 | int max_lane_count = intel_dp_max_lane_count(intel_dp); |
1167 | /* Conveniently, the link BW constants become indices with a shift...*/ | 1276 | /* Conveniently, the link BW constants become indices with a shift...*/ |
1168 | int min_clock = 0; | 1277 | int min_clock = 0; |
1169 | int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; | 1278 | int max_clock; |
1170 | int bpp, mode_rate; | 1279 | int bpp, mode_rate; |
1171 | static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; | ||
1172 | int link_avail, link_clock; | 1280 | int link_avail, link_clock; |
1281 | uint32_t sink_rates[8]; | ||
1282 | uint32_t supported_rates[8] = {0}; | ||
1283 | uint32_t source_rates[8]; | ||
1284 | int source_len, sink_len, supported_len; | ||
1285 | |||
1286 | sink_len = intel_read_sink_rates(intel_dp, sink_rates); | ||
1287 | |||
1288 | source_len = intel_read_source_rates(intel_dp, source_rates); | ||
1289 | |||
1290 | supported_len = intel_supported_rates(source_rates, source_len, | ||
1291 | sink_rates, sink_len, supported_rates); | ||
1292 | |||
1293 | /* No common link rates between source and sink */ | ||
1294 | WARN_ON(supported_len <= 0); | ||
1295 | |||
1296 | max_clock = supported_len - 1; | ||
1173 | 1297 | ||
1174 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) | 1298 | if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) |
1175 | pipe_config->has_pch_encoder = true; | 1299 | pipe_config->has_pch_encoder = true; |
@@ -1193,8 +1317,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1193 | return false; | 1317 | return false; |
1194 | 1318 | ||
1195 | DRM_DEBUG_KMS("DP link computation with max lane count %i " | 1319 | DRM_DEBUG_KMS("DP link computation with max lane count %i " |
1196 | "max bw %02x pixel clock %iKHz\n", | 1320 | "max bw %d pixel clock %iKHz\n", |
1197 | max_lane_count, bws[max_clock], | 1321 | max_lane_count, supported_rates[max_clock], |
1198 | adjusted_mode->crtc_clock); | 1322 | adjusted_mode->crtc_clock); |
1199 | 1323 | ||
1200 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 1324 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
@@ -1223,8 +1347,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1223 | bpp); | 1347 | bpp); |
1224 | 1348 | ||
1225 | for (clock = min_clock; clock <= max_clock; clock++) { | 1349 | for (clock = min_clock; clock <= max_clock; clock++) { |
1226 | for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { | 1350 | for (lane_count = min_lane_count; |
1227 | link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); | 1351 | lane_count <= max_lane_count; |
1352 | lane_count <<= 1) { | ||
1353 | |||
1354 | link_clock = supported_rates[clock]; | ||
1228 | link_avail = intel_dp_max_data_rate(link_clock, | 1355 | link_avail = intel_dp_max_data_rate(link_clock, |
1229 | lane_count); | 1356 | lane_count); |
1230 | 1357 | ||
@@ -1253,10 +1380,19 @@ found: | |||
1253 | if (intel_dp->color_range) | 1380 | if (intel_dp->color_range) |
1254 | pipe_config->limited_color_range = true; | 1381 | pipe_config->limited_color_range = true; |
1255 | 1382 | ||
1256 | intel_dp->link_bw = bws[clock]; | ||
1257 | intel_dp->lane_count = lane_count; | 1383 | intel_dp->lane_count = lane_count; |
1384 | |||
1385 | intel_dp->link_bw = | ||
1386 | drm_dp_link_rate_to_bw_code(supported_rates[clock]); | ||
1387 | |||
1388 | if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { | ||
1389 | intel_dp->rate_select = | ||
1390 | rate_to_index(supported_rates[clock], sink_rates); | ||
1391 | intel_dp->link_bw = 0; | ||
1392 | } | ||
1393 | |||
1258 | pipe_config->pipe_bpp = bpp; | 1394 | pipe_config->pipe_bpp = bpp; |
1259 | pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); | 1395 | pipe_config->port_clock = supported_rates[clock]; |
1260 | 1396 | ||
1261 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", | 1397 | DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", |
1262 | intel_dp->link_bw, intel_dp->lane_count, | 1398 | intel_dp->link_bw, intel_dp->lane_count, |
@@ -1279,7 +1415,7 @@ found: | |||
1279 | } | 1415 | } |
1280 | 1416 | ||
1281 | if (IS_SKYLAKE(dev) && is_edp(intel_dp)) | 1417 | if (IS_SKYLAKE(dev) && is_edp(intel_dp)) |
1282 | skl_edp_set_pll_config(pipe_config, intel_dp->link_bw); | 1418 | skl_edp_set_pll_config(pipe_config, supported_rates[clock]); |
1283 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 1419 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1284 | hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); | 1420 | hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); |
1285 | else | 1421 | else |
@@ -3366,6 +3502,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
3366 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) | 3502 | if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) |
3367 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 3503 | link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
3368 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); | 3504 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); |
3505 | if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) | ||
3506 | drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, | ||
3507 | &intel_dp->rate_select, 1); | ||
3369 | 3508 | ||
3370 | link_config[0] = 0; | 3509 | link_config[0] = 0; |
3371 | link_config[1] = DP_SET_ANSI_8B10B; | 3510 | link_config[1] = DP_SET_ANSI_8B10B; |
@@ -3578,6 +3717,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3578 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 3717 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
3579 | struct drm_device *dev = dig_port->base.base.dev; | 3718 | struct drm_device *dev = dig_port->base.base.dev; |
3580 | struct drm_i915_private *dev_priv = dev->dev_private; | 3719 | struct drm_i915_private *dev_priv = dev->dev_private; |
3720 | uint8_t rev; | ||
3581 | 3721 | ||
3582 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, | 3722 | if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, |
3583 | sizeof(intel_dp->dpcd)) < 0) | 3723 | sizeof(intel_dp->dpcd)) < 0) |
@@ -3609,6 +3749,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3609 | } else | 3749 | } else |
3610 | intel_dp->use_tps3 = false; | 3750 | intel_dp->use_tps3 = false; |
3611 | 3751 | ||
3752 | /* Intermediate frequency support */ | ||
3753 | if (is_edp(intel_dp) && | ||
3754 | (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && | ||
3755 | (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && | ||
3756 | (rev >= 0x03)) { /* eDp v1.4 or higher */ | ||
3757 | intel_dp_dpcd_read_wake(&intel_dp->aux, | ||
3758 | DP_SUPPORTED_LINK_RATES, | ||
3759 | intel_dp->supported_rates, | ||
3760 | sizeof(intel_dp->supported_rates)); | ||
3761 | } | ||
3612 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 3762 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
3613 | DP_DWN_STRM_PORT_PRESENT)) | 3763 | DP_DWN_STRM_PORT_PRESENT)) |
3614 | return true; /* native DP sink */ | 3764 | return true; /* native DP sink */ |
@@ -4966,12 +5116,13 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, | |||
4966 | if (!dev_priv->drrs.dp) | 5116 | if (!dev_priv->drrs.dp) |
4967 | return; | 5117 | return; |
4968 | 5118 | ||
5119 | cancel_delayed_work_sync(&dev_priv->drrs.work); | ||
5120 | |||
4969 | mutex_lock(&dev_priv->drrs.mutex); | 5121 | mutex_lock(&dev_priv->drrs.mutex); |
4970 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; | 5122 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; |
4971 | pipe = to_intel_crtc(crtc)->pipe; | 5123 | pipe = to_intel_crtc(crtc)->pipe; |
4972 | 5124 | ||
4973 | if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { | 5125 | if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { |
4974 | cancel_delayed_work_sync(&dev_priv->drrs.work); | ||
4975 | intel_dp_set_drrs_state(dev_priv->dev, | 5126 | intel_dp_set_drrs_state(dev_priv->dev, |
4976 | dev_priv->drrs.dp->attached_connector->panel. | 5127 | dev_priv->drrs.dp->attached_connector->panel. |
4977 | fixed_mode->vrefresh); | 5128 | fixed_mode->vrefresh); |
@@ -5004,13 +5155,13 @@ void intel_edp_drrs_flush(struct drm_device *dev, | |||
5004 | if (!dev_priv->drrs.dp) | 5155 | if (!dev_priv->drrs.dp) |
5005 | return; | 5156 | return; |
5006 | 5157 | ||
5158 | cancel_delayed_work_sync(&dev_priv->drrs.work); | ||
5159 | |||
5007 | mutex_lock(&dev_priv->drrs.mutex); | 5160 | mutex_lock(&dev_priv->drrs.mutex); |
5008 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; | 5161 | crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; |
5009 | pipe = to_intel_crtc(crtc)->pipe; | 5162 | pipe = to_intel_crtc(crtc)->pipe; |
5010 | dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; | 5163 | dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; |
5011 | 5164 | ||
5012 | cancel_delayed_work_sync(&dev_priv->drrs.work); | ||
5013 | |||
5014 | if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR && | 5165 | if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR && |
5015 | !dev_priv->drrs.busy_frontbuffer_bits) | 5166 | !dev_priv->drrs.busy_frontbuffer_bits) |
5016 | schedule_delayed_work(&dev_priv->drrs.work, | 5167 | schedule_delayed_work(&dev_priv->drrs.work, |
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 9f67a379a9a5..be124928ca14 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -58,7 +58,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
58 | pipe_config->pipe_bpp = 24; | 58 | pipe_config->pipe_bpp = 24; |
59 | pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); | 59 | pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
60 | 60 | ||
61 | list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { | 61 | for_each_intel_connector(dev, intel_connector) { |
62 | if (intel_connector->new_encoder == encoder) { | 62 | if (intel_connector->new_encoder == encoder) { |
63 | found = intel_connector; | 63 | found = intel_connector; |
64 | break; | 64 | break; |
@@ -140,7 +140,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) | |||
140 | struct drm_crtc *crtc = encoder->base.crtc; | 140 | struct drm_crtc *crtc = encoder->base.crtc; |
141 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 141 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
142 | 142 | ||
143 | list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { | 143 | for_each_intel_connector(dev, intel_connector) { |
144 | if (intel_connector->new_encoder == encoder) { | 144 | if (intel_connector->new_encoder == encoder) { |
145 | found = intel_connector; | 145 | found = intel_connector; |
146 | break; | 146 | break; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f4aa849b243e..c77128c67cf8 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -464,7 +464,6 @@ struct intel_crtc { | |||
464 | 464 | ||
465 | struct drm_i915_gem_object *cursor_bo; | 465 | struct drm_i915_gem_object *cursor_bo; |
466 | uint32_t cursor_addr; | 466 | uint32_t cursor_addr; |
467 | int16_t cursor_width, cursor_height; | ||
468 | uint32_t cursor_cntl; | 467 | uint32_t cursor_cntl; |
469 | uint32_t cursor_size; | 468 | uint32_t cursor_size; |
470 | uint32_t cursor_base; | 469 | uint32_t cursor_base; |
@@ -623,10 +622,12 @@ struct intel_dp { | |||
623 | uint32_t color_range; | 622 | uint32_t color_range; |
624 | bool color_range_auto; | 623 | bool color_range_auto; |
625 | uint8_t link_bw; | 624 | uint8_t link_bw; |
625 | uint8_t rate_select; | ||
626 | uint8_t lane_count; | 626 | uint8_t lane_count; |
627 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 627 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
628 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | 628 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; |
629 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 629 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
630 | __le16 supported_rates[DP_MAX_SUPPORTED_RATES]; | ||
630 | struct drm_dp_aux aux; | 631 | struct drm_dp_aux aux; |
631 | uint8_t train_set[4]; | 632 | uint8_t train_set[4]; |
632 | int panel_power_up_delay; | 633 | int panel_power_up_delay; |
@@ -839,7 +840,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) | |||
839 | } | 840 | } |
840 | 841 | ||
841 | int intel_get_crtc_scanline(struct intel_crtc *crtc); | 842 | int intel_get_crtc_scanline(struct intel_crtc *crtc); |
842 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); | 843 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, |
844 | unsigned int pipe_mask); | ||
843 | 845 | ||
844 | /* intel_crt.c */ | 846 | /* intel_crt.c */ |
845 | void intel_crt_init(struct drm_device *dev); | 847 | void intel_crt_init(struct drm_device *dev); |
@@ -874,7 +876,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); | |||
874 | 876 | ||
875 | /* intel_frontbuffer.c */ | 877 | /* intel_frontbuffer.c */ |
876 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | 878 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, |
877 | struct intel_engine_cs *ring); | 879 | struct intel_engine_cs *ring, |
880 | enum fb_op_origin origin); | ||
878 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, | 881 | void intel_frontbuffer_flip_prepare(struct drm_device *dev, |
879 | unsigned frontbuffer_bits); | 882 | unsigned frontbuffer_bits); |
880 | void intel_frontbuffer_flip_complete(struct drm_device *dev, | 883 | void intel_frontbuffer_flip_complete(struct drm_device *dev, |
@@ -1115,7 +1118,11 @@ bool intel_fbc_enabled(struct drm_device *dev); | |||
1115 | void intel_fbc_update(struct drm_device *dev); | 1118 | void intel_fbc_update(struct drm_device *dev); |
1116 | void intel_fbc_init(struct drm_i915_private *dev_priv); | 1119 | void intel_fbc_init(struct drm_i915_private *dev_priv); |
1117 | void intel_fbc_disable(struct drm_device *dev); | 1120 | void intel_fbc_disable(struct drm_device *dev); |
1118 | void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); | 1121 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, |
1122 | unsigned int frontbuffer_bits, | ||
1123 | enum fb_op_origin origin); | ||
1124 | void intel_fbc_flush(struct drm_i915_private *dev_priv, | ||
1125 | unsigned int frontbuffer_bits); | ||
1119 | 1126 | ||
1120 | /* intel_hdmi.c */ | 1127 | /* intel_hdmi.c */ |
1121 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); | 1128 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); |
@@ -1231,7 +1238,6 @@ void intel_enable_gt_powersave(struct drm_device *dev); | |||
1231 | void intel_disable_gt_powersave(struct drm_device *dev); | 1238 | void intel_disable_gt_powersave(struct drm_device *dev); |
1232 | void intel_suspend_gt_powersave(struct drm_device *dev); | 1239 | void intel_suspend_gt_powersave(struct drm_device *dev); |
1233 | void intel_reset_gt_powersave(struct drm_device *dev); | 1240 | void intel_reset_gt_powersave(struct drm_device *dev); |
1234 | void ironlake_teardown_rc6(struct drm_device *dev); | ||
1235 | void gen6_update_ring_freq(struct drm_device *dev); | 1241 | void gen6_update_ring_freq(struct drm_device *dev); |
1236 | void gen6_rps_idle(struct drm_i915_private *dev_priv); | 1242 | void gen6_rps_idle(struct drm_i915_private *dev_priv); |
1237 | void gen6_rps_boost(struct drm_i915_private *dev_priv); | 1243 | void gen6_rps_boost(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 618f7bdab0ba..9fcf446e95f5 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -174,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev) | |||
174 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | 174 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
175 | } | 175 | } |
176 | 176 | ||
177 | static void snb_fbc_blit_update(struct drm_device *dev) | 177 | static void intel_fbc_nuke(struct drm_i915_private *dev_priv) |
178 | { | 178 | { |
179 | struct drm_i915_private *dev_priv = dev->dev_private; | 179 | I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); |
180 | u32 blt_ecoskpd; | 180 | POSTING_READ(MSG_FBC_REND_STATE); |
181 | |||
182 | /* Make sure blitter notifies FBC of writes */ | ||
183 | |||
184 | /* Blitter is part of Media powerwell on VLV. No impact of | ||
185 | * his param in other platforms for now */ | ||
186 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); | ||
187 | |||
188 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
189 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
190 | GEN6_BLITTER_LOCK_SHIFT; | ||
191 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
192 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
193 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
194 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
195 | GEN6_BLITTER_LOCK_SHIFT); | ||
196 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
197 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
198 | |||
199 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); | ||
200 | } | 181 | } |
201 | 182 | ||
202 | static void ilk_fbc_enable(struct drm_crtc *crtc) | 183 | static void ilk_fbc_enable(struct drm_crtc *crtc) |
@@ -239,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc) | |||
239 | I915_WRITE(SNB_DPFC_CTL_SA, | 220 | I915_WRITE(SNB_DPFC_CTL_SA, |
240 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | 221 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
241 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | 222 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
242 | snb_fbc_blit_update(dev); | ||
243 | } | 223 | } |
244 | 224 | ||
225 | intel_fbc_nuke(dev_priv); | ||
226 | |||
245 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | 227 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); |
246 | } | 228 | } |
247 | 229 | ||
@@ -320,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc) | |||
320 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | 302 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
321 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | 303 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
322 | 304 | ||
323 | snb_fbc_blit_update(dev); | 305 | intel_fbc_nuke(dev_priv); |
324 | 306 | ||
325 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | 307 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); |
326 | } | 308 | } |
@@ -340,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev) | |||
340 | return dev_priv->fbc.enabled; | 322 | return dev_priv->fbc.enabled; |
341 | } | 323 | } |
342 | 324 | ||
343 | void bdw_fbc_sw_flush(struct drm_device *dev, u32 value) | ||
344 | { | ||
345 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
346 | |||
347 | if (!IS_GEN8(dev)) | ||
348 | return; | ||
349 | |||
350 | if (!intel_fbc_enabled(dev)) | ||
351 | return; | ||
352 | |||
353 | I915_WRITE(MSG_FBC_REND_STATE, value); | ||
354 | } | ||
355 | |||
356 | static void intel_fbc_work_fn(struct work_struct *__work) | 325 | static void intel_fbc_work_fn(struct work_struct *__work) |
357 | { | 326 | { |
358 | struct intel_fbc_work *work = | 327 | struct intel_fbc_work *work = |
@@ -685,6 +654,44 @@ out_disable: | |||
685 | i915_gem_stolen_cleanup_compression(dev); | 654 | i915_gem_stolen_cleanup_compression(dev); |
686 | } | 655 | } |
687 | 656 | ||
657 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, | ||
658 | unsigned int frontbuffer_bits, | ||
659 | enum fb_op_origin origin) | ||
660 | { | ||
661 | struct drm_device *dev = dev_priv->dev; | ||
662 | unsigned int fbc_bits; | ||
663 | |||
664 | if (origin == ORIGIN_GTT) | ||
665 | return; | ||
666 | |||
667 | if (dev_priv->fbc.enabled) | ||
668 | fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); | ||
669 | else if (dev_priv->fbc.fbc_work) | ||
670 | fbc_bits = INTEL_FRONTBUFFER_PRIMARY( | ||
671 | to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe); | ||
672 | else | ||
673 | fbc_bits = dev_priv->fbc.possible_framebuffer_bits; | ||
674 | |||
675 | dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); | ||
676 | |||
677 | if (dev_priv->fbc.busy_bits) | ||
678 | intel_fbc_disable(dev); | ||
679 | } | ||
680 | |||
681 | void intel_fbc_flush(struct drm_i915_private *dev_priv, | ||
682 | unsigned int frontbuffer_bits) | ||
683 | { | ||
684 | struct drm_device *dev = dev_priv->dev; | ||
685 | |||
686 | if (!dev_priv->fbc.busy_bits) | ||
687 | return; | ||
688 | |||
689 | dev_priv->fbc.busy_bits &= ~frontbuffer_bits; | ||
690 | |||
691 | if (!dev_priv->fbc.busy_bits) | ||
692 | intel_fbc_update(dev); | ||
693 | } | ||
694 | |||
688 | /** | 695 | /** |
689 | * intel_fbc_init - Initialize FBC | 696 | * intel_fbc_init - Initialize FBC |
690 | * @dev_priv: the i915 device | 697 | * @dev_priv: the i915 device |
@@ -693,12 +700,22 @@ out_disable: | |||
693 | */ | 700 | */ |
694 | void intel_fbc_init(struct drm_i915_private *dev_priv) | 701 | void intel_fbc_init(struct drm_i915_private *dev_priv) |
695 | { | 702 | { |
703 | enum pipe pipe; | ||
704 | |||
696 | if (!HAS_FBC(dev_priv)) { | 705 | if (!HAS_FBC(dev_priv)) { |
697 | dev_priv->fbc.enabled = false; | 706 | dev_priv->fbc.enabled = false; |
698 | dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; | 707 | dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; |
699 | return; | 708 | return; |
700 | } | 709 | } |
701 | 710 | ||
711 | for_each_pipe(dev_priv, pipe) { | ||
712 | dev_priv->fbc.possible_framebuffer_bits |= | ||
713 | INTEL_FRONTBUFFER_PRIMARY(pipe); | ||
714 | |||
715 | if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) | ||
716 | break; | ||
717 | } | ||
718 | |||
702 | if (INTEL_INFO(dev_priv)->gen >= 7) { | 719 | if (INTEL_INFO(dev_priv)->gen >= 7) { |
703 | dev_priv->display.fbc_enabled = ilk_fbc_enabled; | 720 | dev_priv->display.fbc_enabled = ilk_fbc_enabled; |
704 | dev_priv->display.enable_fbc = gen7_fbc_enable; | 721 | dev_priv->display.enable_fbc = gen7_fbc_enable; |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 234a699b8219..757c0d216f80 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -71,6 +71,31 @@ static int intel_fbdev_set_par(struct fb_info *info) | |||
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
74 | static int intel_fbdev_blank(int blank, struct fb_info *info) | ||
75 | { | ||
76 | struct drm_fb_helper *fb_helper = info->par; | ||
77 | struct intel_fbdev *ifbdev = | ||
78 | container_of(fb_helper, struct intel_fbdev, helper); | ||
79 | int ret; | ||
80 | |||
81 | ret = drm_fb_helper_blank(blank, info); | ||
82 | |||
83 | if (ret == 0) { | ||
84 | /* | ||
85 | * FIXME: fbdev presumes that all callbacks also work from | ||
86 | * atomic contexts and relies on that for emergency oops | ||
87 | * printing. KMS totally doesn't do that and the locking here is | ||
88 | * by far not the only place this goes wrong. Ignore this for | ||
89 | * now until we solve this for real. | ||
90 | */ | ||
91 | mutex_lock(&fb_helper->dev->struct_mutex); | ||
92 | intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT); | ||
93 | mutex_unlock(&fb_helper->dev->struct_mutex); | ||
94 | } | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | |||
74 | static struct fb_ops intelfb_ops = { | 99 | static struct fb_ops intelfb_ops = { |
75 | .owner = THIS_MODULE, | 100 | .owner = THIS_MODULE, |
76 | .fb_check_var = drm_fb_helper_check_var, | 101 | .fb_check_var = drm_fb_helper_check_var, |
@@ -79,7 +104,7 @@ static struct fb_ops intelfb_ops = { | |||
79 | .fb_copyarea = cfb_copyarea, | 104 | .fb_copyarea = cfb_copyarea, |
80 | .fb_imageblit = cfb_imageblit, | 105 | .fb_imageblit = cfb_imageblit, |
81 | .fb_pan_display = drm_fb_helper_pan_display, | 106 | .fb_pan_display = drm_fb_helper_pan_display, |
82 | .fb_blank = drm_fb_helper_blank, | 107 | .fb_blank = intel_fbdev_blank, |
83 | .fb_setcmap = drm_fb_helper_setcmap, | 108 | .fb_setcmap = drm_fb_helper_setcmap, |
84 | .fb_debug_enter = drm_fb_helper_debug_enter, | 109 | .fb_debug_enter = drm_fb_helper_debug_enter, |
85 | .fb_debug_leave = drm_fb_helper_debug_leave, | 110 | .fb_debug_leave = drm_fb_helper_debug_leave, |
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 73cb6e036445..0a1bac8ac72b 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c | |||
@@ -118,8 +118,6 @@ static void intel_mark_fb_busy(struct drm_device *dev, | |||
118 | continue; | 118 | continue; |
119 | 119 | ||
120 | intel_increase_pllclock(dev, pipe); | 120 | intel_increase_pllclock(dev, pipe); |
121 | if (ring && intel_fbc_enabled(dev)) | ||
122 | ring->fbc_dirty = true; | ||
123 | } | 121 | } |
124 | } | 122 | } |
125 | 123 | ||
@@ -127,6 +125,7 @@ static void intel_mark_fb_busy(struct drm_device *dev, | |||
127 | * intel_fb_obj_invalidate - invalidate frontbuffer object | 125 | * intel_fb_obj_invalidate - invalidate frontbuffer object |
128 | * @obj: GEM object to invalidate | 126 | * @obj: GEM object to invalidate |
129 | * @ring: set for asynchronous rendering | 127 | * @ring: set for asynchronous rendering |
128 | * @origin: which operation caused the invalidation | ||
130 | * | 129 | * |
131 | * This function gets called every time rendering on the given object starts and | 130 | * This function gets called every time rendering on the given object starts and |
132 | * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must | 131 | * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must |
@@ -135,7 +134,8 @@ static void intel_mark_fb_busy(struct drm_device *dev, | |||
135 | * scheduled. | 134 | * scheduled. |
136 | */ | 135 | */ |
137 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | 136 | void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, |
138 | struct intel_engine_cs *ring) | 137 | struct intel_engine_cs *ring, |
138 | enum fb_op_origin origin) | ||
139 | { | 139 | { |
140 | struct drm_device *dev = obj->base.dev; | 140 | struct drm_device *dev = obj->base.dev; |
141 | struct drm_i915_private *dev_priv = dev->dev_private; | 141 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -158,6 +158,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, | |||
158 | 158 | ||
159 | intel_psr_invalidate(dev, obj->frontbuffer_bits); | 159 | intel_psr_invalidate(dev, obj->frontbuffer_bits); |
160 | intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); | 160 | intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); |
161 | intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin); | ||
161 | } | 162 | } |
162 | 163 | ||
163 | /** | 164 | /** |
@@ -185,16 +186,7 @@ void intel_frontbuffer_flush(struct drm_device *dev, | |||
185 | 186 | ||
186 | intel_edp_drrs_flush(dev, frontbuffer_bits); | 187 | intel_edp_drrs_flush(dev, frontbuffer_bits); |
187 | intel_psr_flush(dev, frontbuffer_bits); | 188 | intel_psr_flush(dev, frontbuffer_bits); |
188 | 189 | intel_fbc_flush(dev_priv, frontbuffer_bits); | |
189 | /* | ||
190 | * FIXME: Unconditional fbc flushing here is a rather gross hack and | ||
191 | * needs to be reworked into a proper frontbuffer tracking scheme like | ||
192 | * psr employs. | ||
193 | */ | ||
194 | if (dev_priv->fbc.need_sw_cache_clean) { | ||
195 | dev_priv->fbc.need_sw_cache_clean = false; | ||
196 | bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN); | ||
197 | } | ||
198 | } | 190 | } |
199 | 191 | ||
200 | /** | 192 | /** |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 542cf6844dc3..288c9d24098e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -263,6 +263,47 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, | |||
263 | return NULL; | 263 | return NULL; |
264 | } | 264 | } |
265 | 265 | ||
266 | static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) | ||
267 | { | ||
268 | u32 val; | ||
269 | |||
270 | mutex_lock(&dev_priv->rps.hw_lock); | ||
271 | |||
272 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | ||
273 | if (enable) | ||
274 | val &= ~FORCE_DDR_HIGH_FREQ; | ||
275 | else | ||
276 | val |= FORCE_DDR_HIGH_FREQ; | ||
277 | val &= ~FORCE_DDR_LOW_FREQ; | ||
278 | val |= FORCE_DDR_FREQ_REQ_ACK; | ||
279 | vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); | ||
280 | |||
281 | if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & | ||
282 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) | ||
283 | DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); | ||
284 | |||
285 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
286 | } | ||
287 | |||
288 | static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) | ||
289 | { | ||
290 | u32 val; | ||
291 | |||
292 | mutex_lock(&dev_priv->rps.hw_lock); | ||
293 | |||
294 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | ||
295 | if (enable) | ||
296 | val |= DSP_MAXFIFO_PM5_ENABLE; | ||
297 | else | ||
298 | val &= ~DSP_MAXFIFO_PM5_ENABLE; | ||
299 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); | ||
300 | |||
301 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
302 | } | ||
303 | |||
304 | #define FW_WM(value, plane) \ | ||
305 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) | ||
306 | |||
266 | void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | 307 | void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) |
267 | { | 308 | { |
268 | struct drm_device *dev = dev_priv->dev; | 309 | struct drm_device *dev = dev_priv->dev; |
@@ -270,6 +311,8 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |||
270 | 311 | ||
271 | if (IS_VALLEYVIEW(dev)) { | 312 | if (IS_VALLEYVIEW(dev)) { |
272 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); | 313 | I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); |
314 | if (IS_CHERRYVIEW(dev)) | ||
315 | chv_set_memory_pm5(dev_priv, enable); | ||
273 | } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { | 316 | } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { |
274 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); | 317 | I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); |
275 | } else if (IS_PINEVIEW(dev)) { | 318 | } else if (IS_PINEVIEW(dev)) { |
@@ -292,6 +335,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |||
292 | enable ? "enabled" : "disabled"); | 335 | enable ? "enabled" : "disabled"); |
293 | } | 336 | } |
294 | 337 | ||
338 | |||
295 | /* | 339 | /* |
296 | * Latency for FIFO fetches is dependent on several factors: | 340 | * Latency for FIFO fetches is dependent on several factors: |
297 | * - memory configuration (speed, channels) | 341 | * - memory configuration (speed, channels) |
@@ -308,6 +352,61 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) | |||
308 | */ | 352 | */ |
309 | static const int pessimal_latency_ns = 5000; | 353 | static const int pessimal_latency_ns = 5000; |
310 | 354 | ||
355 | #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ | ||
356 | ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) | ||
357 | |||
358 | static int vlv_get_fifo_size(struct drm_device *dev, | ||
359 | enum pipe pipe, int plane) | ||
360 | { | ||
361 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
362 | int sprite0_start, sprite1_start, size; | ||
363 | |||
364 | switch (pipe) { | ||
365 | uint32_t dsparb, dsparb2, dsparb3; | ||
366 | case PIPE_A: | ||
367 | dsparb = I915_READ(DSPARB); | ||
368 | dsparb2 = I915_READ(DSPARB2); | ||
369 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); | ||
370 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); | ||
371 | break; | ||
372 | case PIPE_B: | ||
373 | dsparb = I915_READ(DSPARB); | ||
374 | dsparb2 = I915_READ(DSPARB2); | ||
375 | sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); | ||
376 | sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); | ||
377 | break; | ||
378 | case PIPE_C: | ||
379 | dsparb2 = I915_READ(DSPARB2); | ||
380 | dsparb3 = I915_READ(DSPARB3); | ||
381 | sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); | ||
382 | sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); | ||
383 | break; | ||
384 | default: | ||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | switch (plane) { | ||
389 | case 0: | ||
390 | size = sprite0_start; | ||
391 | break; | ||
392 | case 1: | ||
393 | size = sprite1_start - sprite0_start; | ||
394 | break; | ||
395 | case 2: | ||
396 | size = 512 - 1 - sprite1_start; | ||
397 | break; | ||
398 | default: | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", | ||
403 | pipe_name(pipe), plane == 0 ? "primary" : "sprite", | ||
404 | plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), | ||
405 | size); | ||
406 | |||
407 | return size; | ||
408 | } | ||
409 | |||
311 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | 410 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
312 | { | 411 | { |
313 | struct drm_i915_private *dev_priv = dev->dev_private; | 412 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -553,7 +652,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
553 | crtc = single_enabled_crtc(dev); | 652 | crtc = single_enabled_crtc(dev); |
554 | if (crtc) { | 653 | if (crtc) { |
555 | const struct drm_display_mode *adjusted_mode; | 654 | const struct drm_display_mode *adjusted_mode; |
556 | int pixel_size = crtc->primary->fb->bits_per_pixel / 8; | 655 | int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; |
557 | int clock; | 656 | int clock; |
558 | 657 | ||
559 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; | 658 | adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; |
@@ -565,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
565 | pixel_size, latency->display_sr); | 664 | pixel_size, latency->display_sr); |
566 | reg = I915_READ(DSPFW1); | 665 | reg = I915_READ(DSPFW1); |
567 | reg &= ~DSPFW_SR_MASK; | 666 | reg &= ~DSPFW_SR_MASK; |
568 | reg |= wm << DSPFW_SR_SHIFT; | 667 | reg |= FW_WM(wm, SR); |
569 | I915_WRITE(DSPFW1, reg); | 668 | I915_WRITE(DSPFW1, reg); |
570 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | 669 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
571 | 670 | ||
@@ -575,7 +674,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
575 | pixel_size, latency->cursor_sr); | 674 | pixel_size, latency->cursor_sr); |
576 | reg = I915_READ(DSPFW3); | 675 | reg = I915_READ(DSPFW3); |
577 | reg &= ~DSPFW_CURSOR_SR_MASK; | 676 | reg &= ~DSPFW_CURSOR_SR_MASK; |
578 | reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; | 677 | reg |= FW_WM(wm, CURSOR_SR); |
579 | I915_WRITE(DSPFW3, reg); | 678 | I915_WRITE(DSPFW3, reg); |
580 | 679 | ||
581 | /* Display HPLL off SR */ | 680 | /* Display HPLL off SR */ |
@@ -584,7 +683,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
584 | pixel_size, latency->display_hpll_disable); | 683 | pixel_size, latency->display_hpll_disable); |
585 | reg = I915_READ(DSPFW3); | 684 | reg = I915_READ(DSPFW3); |
586 | reg &= ~DSPFW_HPLL_SR_MASK; | 685 | reg &= ~DSPFW_HPLL_SR_MASK; |
587 | reg |= wm & DSPFW_HPLL_SR_MASK; | 686 | reg |= FW_WM(wm, HPLL_SR); |
588 | I915_WRITE(DSPFW3, reg); | 687 | I915_WRITE(DSPFW3, reg); |
589 | 688 | ||
590 | /* cursor HPLL off SR */ | 689 | /* cursor HPLL off SR */ |
@@ -593,7 +692,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) | |||
593 | pixel_size, latency->cursor_hpll_disable); | 692 | pixel_size, latency->cursor_hpll_disable); |
594 | reg = I915_READ(DSPFW3); | 693 | reg = I915_READ(DSPFW3); |
595 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | 694 | reg &= ~DSPFW_HPLL_CURSOR_MASK; |
596 | reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; | 695 | reg |= FW_WM(wm, HPLL_CURSOR); |
597 | I915_WRITE(DSPFW3, reg); | 696 | I915_WRITE(DSPFW3, reg); |
598 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | 697 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); |
599 | 698 | ||
@@ -629,7 +728,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, | |||
629 | clock = adjusted_mode->crtc_clock; | 728 | clock = adjusted_mode->crtc_clock; |
630 | htotal = adjusted_mode->crtc_htotal; | 729 | htotal = adjusted_mode->crtc_htotal; |
631 | hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; | 730 | hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; |
632 | pixel_size = crtc->primary->fb->bits_per_pixel / 8; | 731 | pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; |
633 | 732 | ||
634 | /* Use the small buffer method to calculate plane watermark */ | 733 | /* Use the small buffer method to calculate plane watermark */ |
635 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | 734 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
@@ -644,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, | |||
644 | /* Use the large buffer method to calculate cursor watermark */ | 743 | /* Use the large buffer method to calculate cursor watermark */ |
645 | line_time_us = max(htotal * 1000 / clock, 1); | 744 | line_time_us = max(htotal * 1000 / clock, 1); |
646 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; | 745 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
647 | entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size; | 746 | entries = line_count * crtc->cursor->state->crtc_w * pixel_size; |
648 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | 747 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; |
649 | if (tlb_miss > 0) | 748 | if (tlb_miss > 0) |
650 | entries += tlb_miss; | 749 | entries += tlb_miss; |
@@ -716,7 +815,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, | |||
716 | clock = adjusted_mode->crtc_clock; | 815 | clock = adjusted_mode->crtc_clock; |
717 | htotal = adjusted_mode->crtc_htotal; | 816 | htotal = adjusted_mode->crtc_htotal; |
718 | hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; | 817 | hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; |
719 | pixel_size = crtc->primary->fb->bits_per_pixel / 8; | 818 | pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; |
720 | 819 | ||
721 | line_time_us = max(htotal * 1000 / clock, 1); | 820 | line_time_us = max(htotal * 1000 / clock, 1); |
722 | line_count = (latency_ns / line_time_us + 1000) / 1000; | 821 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
@@ -730,7 +829,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, | |||
730 | *display_wm = entries + display->guard_size; | 829 | *display_wm = entries + display->guard_size; |
731 | 830 | ||
732 | /* calculate the self-refresh watermark for display cursor */ | 831 | /* calculate the self-refresh watermark for display cursor */ |
733 | entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width; | 832 | entries = line_count * pixel_size * crtc->cursor->state->crtc_w; |
734 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | 833 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
735 | *cursor_wm = entries + cursor->guard_size; | 834 | *cursor_wm = entries + cursor->guard_size; |
736 | 835 | ||
@@ -739,232 +838,234 @@ static bool g4x_compute_srwm(struct drm_device *dev, | |||
739 | display, cursor); | 838 | display, cursor); |
740 | } | 839 | } |
741 | 840 | ||
742 | static bool vlv_compute_drain_latency(struct drm_crtc *crtc, | 841 | #define FW_WM_VLV(value, plane) \ |
743 | int pixel_size, | 842 | (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) |
744 | int *prec_mult, | ||
745 | int *drain_latency) | ||
746 | { | ||
747 | struct drm_device *dev = crtc->dev; | ||
748 | int entries; | ||
749 | int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; | ||
750 | 843 | ||
751 | if (WARN(clock == 0, "Pixel clock is zero!\n")) | 844 | static void vlv_write_wm_values(struct intel_crtc *crtc, |
752 | return false; | 845 | const struct vlv_wm_values *wm) |
846 | { | ||
847 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
848 | enum pipe pipe = crtc->pipe; | ||
753 | 849 | ||
754 | if (WARN(pixel_size == 0, "Pixel size is zero!\n")) | 850 | I915_WRITE(VLV_DDL(pipe), |
755 | return false; | 851 | (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | |
852 | (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | | ||
853 | (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | | ||
854 | (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); | ||
756 | 855 | ||
757 | entries = DIV_ROUND_UP(clock, 1000) * pixel_size; | 856 | I915_WRITE(DSPFW1, |
758 | if (IS_CHERRYVIEW(dev)) | 857 | FW_WM(wm->sr.plane, SR) | |
759 | *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 : | 858 | FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | |
760 | DRAIN_LATENCY_PRECISION_16; | 859 | FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | |
761 | else | 860 | FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); |
762 | *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : | 861 | I915_WRITE(DSPFW2, |
763 | DRAIN_LATENCY_PRECISION_32; | 862 | FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | |
764 | *drain_latency = (64 * (*prec_mult) * 4) / entries; | 863 | FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | |
864 | FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); | ||
865 | I915_WRITE(DSPFW3, | ||
866 | FW_WM(wm->sr.cursor, CURSOR_SR)); | ||
867 | |||
868 | if (IS_CHERRYVIEW(dev_priv)) { | ||
869 | I915_WRITE(DSPFW7_CHV, | ||
870 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | | ||
871 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); | ||
872 | I915_WRITE(DSPFW8_CHV, | ||
873 | FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | | ||
874 | FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); | ||
875 | I915_WRITE(DSPFW9_CHV, | ||
876 | FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | | ||
877 | FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); | ||
878 | I915_WRITE(DSPHOWM, | ||
879 | FW_WM(wm->sr.plane >> 9, SR_HI) | | ||
880 | FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | | ||
881 | FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | | ||
882 | FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | | ||
883 | FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | | ||
884 | FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | | ||
885 | FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | | ||
886 | FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | | ||
887 | FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | | ||
888 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | ||
889 | } else { | ||
890 | I915_WRITE(DSPFW7, | ||
891 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | | ||
892 | FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); | ||
893 | I915_WRITE(DSPHOWM, | ||
894 | FW_WM(wm->sr.plane >> 9, SR_HI) | | ||
895 | FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | | ||
896 | FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | | ||
897 | FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | | ||
898 | FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | | ||
899 | FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | | ||
900 | FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); | ||
901 | } | ||
765 | 902 | ||
766 | if (*drain_latency > DRAIN_LATENCY_MASK) | 903 | POSTING_READ(DSPFW1); |
767 | *drain_latency = DRAIN_LATENCY_MASK; | ||
768 | 904 | ||
769 | return true; | 905 | dev_priv->wm.vlv = *wm; |
770 | } | 906 | } |
771 | 907 | ||
772 | /* | 908 | #undef FW_WM_VLV |
773 | * Update drain latency registers of memory arbiter | ||
774 | * | ||
775 | * Valleyview SoC has a new memory arbiter and needs drain latency registers | ||
776 | * to be programmed. Each plane has a drain latency multiplier and a drain | ||
777 | * latency value. | ||
778 | */ | ||
779 | 909 | ||
780 | static void vlv_update_drain_latency(struct drm_crtc *crtc) | 910 | static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc, |
911 | struct drm_plane *plane) | ||
781 | { | 912 | { |
782 | struct drm_device *dev = crtc->dev; | 913 | struct drm_device *dev = crtc->dev; |
783 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
784 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 914 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
785 | int pixel_size; | 915 | int entries, prec_mult, drain_latency, pixel_size; |
786 | int drain_latency; | 916 | int clock = intel_crtc->config->base.adjusted_mode.crtc_clock; |
787 | enum pipe pipe = intel_crtc->pipe; | 917 | const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64; |
788 | int plane_prec, prec_mult, plane_dl; | ||
789 | const int high_precision = IS_CHERRYVIEW(dev) ? | ||
790 | DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; | ||
791 | 918 | ||
792 | plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH | | 919 | /* |
793 | DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH | | 920 | * FIXME the plane might have an fb |
794 | (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); | 921 | * but be invisible (eg. due to clipping) |
922 | */ | ||
923 | if (!intel_crtc->active || !plane->state->fb) | ||
924 | return 0; | ||
795 | 925 | ||
796 | if (!intel_crtc_active(crtc)) { | 926 | if (WARN(clock == 0, "Pixel clock is zero!\n")) |
797 | I915_WRITE(VLV_DDL(pipe), plane_dl); | 927 | return 0; |
798 | return; | ||
799 | } | ||
800 | 928 | ||
801 | /* Primary plane Drain Latency */ | 929 | pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0); |
802 | pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ | ||
803 | if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { | ||
804 | plane_prec = (prec_mult == high_precision) ? | ||
805 | DDL_PLANE_PRECISION_HIGH : | ||
806 | DDL_PLANE_PRECISION_LOW; | ||
807 | plane_dl |= plane_prec | drain_latency; | ||
808 | } | ||
809 | 930 | ||
810 | /* Cursor Drain Latency | 931 | if (WARN(pixel_size == 0, "Pixel size is zero!\n")) |
811 | * BPP is always 4 for cursor | 932 | return 0; |
812 | */ | 933 | |
813 | pixel_size = 4; | 934 | entries = DIV_ROUND_UP(clock, 1000) * pixel_size; |
935 | |||
936 | prec_mult = high_precision; | ||
937 | drain_latency = 64 * prec_mult * 4 / entries; | ||
814 | 938 | ||
815 | /* Program cursor DL only if it is enabled */ | 939 | if (drain_latency > DRAIN_LATENCY_MASK) { |
816 | if (intel_crtc->cursor_base && | 940 | prec_mult /= 2; |
817 | vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { | 941 | drain_latency = 64 * prec_mult * 4 / entries; |
818 | plane_prec = (prec_mult == high_precision) ? | ||
819 | DDL_CURSOR_PRECISION_HIGH : | ||
820 | DDL_CURSOR_PRECISION_LOW; | ||
821 | plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); | ||
822 | } | 942 | } |
823 | 943 | ||
824 | I915_WRITE(VLV_DDL(pipe), plane_dl); | 944 | if (drain_latency > DRAIN_LATENCY_MASK) |
825 | } | 945 | drain_latency = DRAIN_LATENCY_MASK; |
826 | 946 | ||
827 | #define single_plane_enabled(mask) is_power_of_2(mask) | 947 | return drain_latency | (prec_mult == high_precision ? |
948 | DDL_PRECISION_HIGH : DDL_PRECISION_LOW); | ||
949 | } | ||
828 | 950 | ||
829 | static void valleyview_update_wm(struct drm_crtc *crtc) | 951 | static int vlv_compute_wm(struct intel_crtc *crtc, |
952 | struct intel_plane *plane, | ||
953 | int fifo_size) | ||
830 | { | 954 | { |
831 | struct drm_device *dev = crtc->dev; | 955 | int clock, entries, pixel_size; |
832 | static const int sr_latency_ns = 12000; | ||
833 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
834 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | ||
835 | int plane_sr, cursor_sr; | ||
836 | int ignore_plane_sr, ignore_cursor_sr; | ||
837 | unsigned int enabled = 0; | ||
838 | bool cxsr_enabled; | ||
839 | 956 | ||
840 | vlv_update_drain_latency(crtc); | 957 | /* |
958 | * FIXME the plane might have an fb | ||
959 | * but be invisible (eg. due to clipping) | ||
960 | */ | ||
961 | if (!crtc->active || !plane->base.state->fb) | ||
962 | return 0; | ||
841 | 963 | ||
842 | if (g4x_compute_wm0(dev, PIPE_A, | 964 | pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0); |
843 | &valleyview_wm_info, pessimal_latency_ns, | 965 | clock = crtc->config->base.adjusted_mode.crtc_clock; |
844 | &valleyview_cursor_wm_info, pessimal_latency_ns, | ||
845 | &planea_wm, &cursora_wm)) | ||
846 | enabled |= 1 << PIPE_A; | ||
847 | 966 | ||
848 | if (g4x_compute_wm0(dev, PIPE_B, | 967 | entries = DIV_ROUND_UP(clock, 1000) * pixel_size; |
849 | &valleyview_wm_info, pessimal_latency_ns, | ||
850 | &valleyview_cursor_wm_info, pessimal_latency_ns, | ||
851 | &planeb_wm, &cursorb_wm)) | ||
852 | enabled |= 1 << PIPE_B; | ||
853 | 968 | ||
854 | if (single_plane_enabled(enabled) && | 969 | /* |
855 | g4x_compute_srwm(dev, ffs(enabled) - 1, | 970 | * Set up the watermark such that we don't start issuing memory |
856 | sr_latency_ns, | 971 | * requests until we are within PND's max deadline value (256us). |
857 | &valleyview_wm_info, | 972 | * Idea being to be idle as long as possible while still taking |
858 | &valleyview_cursor_wm_info, | 973 | * advatange of PND's deadline scheduling. The limit of 8 |
859 | &plane_sr, &ignore_cursor_sr) && | 974 | * cachelines (used when the FIFO will anyway drain in less time |
860 | g4x_compute_srwm(dev, ffs(enabled) - 1, | 975 | * than 256us) should match what we would be done if trickle |
861 | 2*sr_latency_ns, | 976 | * feed were enabled. |
862 | &valleyview_wm_info, | 977 | */ |
863 | &valleyview_cursor_wm_info, | 978 | return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8); |
864 | &ignore_plane_sr, &cursor_sr)) { | 979 | } |
865 | cxsr_enabled = true; | 980 | |
866 | } else { | 981 | static bool vlv_compute_sr_wm(struct drm_device *dev, |
867 | cxsr_enabled = false; | 982 | struct vlv_wm_values *wm) |
868 | intel_set_memory_cxsr(dev_priv, false); | 983 | { |
869 | plane_sr = cursor_sr = 0; | 984 | struct drm_i915_private *dev_priv = to_i915(dev); |
985 | struct drm_crtc *crtc; | ||
986 | enum pipe pipe = INVALID_PIPE; | ||
987 | int num_planes = 0; | ||
988 | int fifo_size = 0; | ||
989 | struct intel_plane *plane; | ||
990 | |||
991 | wm->sr.cursor = wm->sr.plane = 0; | ||
992 | |||
993 | crtc = single_enabled_crtc(dev); | ||
994 | /* maxfifo not supported on pipe C */ | ||
995 | if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) { | ||
996 | pipe = to_intel_crtc(crtc)->pipe; | ||
997 | num_planes = !!wm->pipe[pipe].primary + | ||
998 | !!wm->pipe[pipe].sprite[0] + | ||
999 | !!wm->pipe[pipe].sprite[1]; | ||
1000 | fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; | ||
870 | } | 1001 | } |
871 | 1002 | ||
872 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " | 1003 | if (fifo_size == 0 || num_planes > 1) |
873 | "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", | 1004 | return false; |
874 | planea_wm, cursora_wm, | ||
875 | planeb_wm, cursorb_wm, | ||
876 | plane_sr, cursor_sr); | ||
877 | 1005 | ||
878 | I915_WRITE(DSPFW1, | 1006 | wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc), |
879 | (plane_sr << DSPFW_SR_SHIFT) | | 1007 | to_intel_plane(crtc->cursor), 0x3f); |
880 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | ||
881 | (planeb_wm << DSPFW_PLANEB_SHIFT) | | ||
882 | (planea_wm << DSPFW_PLANEA_SHIFT)); | ||
883 | I915_WRITE(DSPFW2, | ||
884 | (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | | ||
885 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | ||
886 | I915_WRITE(DSPFW3, | ||
887 | (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | | ||
888 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | ||
889 | 1008 | ||
890 | if (cxsr_enabled) | 1009 | list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) { |
891 | intel_set_memory_cxsr(dev_priv, true); | 1010 | if (plane->base.type == DRM_PLANE_TYPE_CURSOR) |
1011 | continue; | ||
1012 | |||
1013 | if (plane->pipe != pipe) | ||
1014 | continue; | ||
1015 | |||
1016 | wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc), | ||
1017 | plane, fifo_size); | ||
1018 | if (wm->sr.plane != 0) | ||
1019 | break; | ||
1020 | } | ||
1021 | |||
1022 | return true; | ||
892 | } | 1023 | } |
893 | 1024 | ||
894 | static void cherryview_update_wm(struct drm_crtc *crtc) | 1025 | static void valleyview_update_wm(struct drm_crtc *crtc) |
895 | { | 1026 | { |
896 | struct drm_device *dev = crtc->dev; | 1027 | struct drm_device *dev = crtc->dev; |
897 | static const int sr_latency_ns = 12000; | ||
898 | struct drm_i915_private *dev_priv = dev->dev_private; | 1028 | struct drm_i915_private *dev_priv = dev->dev_private; |
899 | int planea_wm, planeb_wm, planec_wm; | 1029 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
900 | int cursora_wm, cursorb_wm, cursorc_wm; | 1030 | enum pipe pipe = intel_crtc->pipe; |
901 | int plane_sr, cursor_sr; | ||
902 | int ignore_plane_sr, ignore_cursor_sr; | ||
903 | unsigned int enabled = 0; | ||
904 | bool cxsr_enabled; | 1031 | bool cxsr_enabled; |
1032 | struct vlv_wm_values wm = dev_priv->wm.vlv; | ||
905 | 1033 | ||
906 | vlv_update_drain_latency(crtc); | 1034 | wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary); |
1035 | wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc, | ||
1036 | to_intel_plane(crtc->primary), | ||
1037 | vlv_get_fifo_size(dev, pipe, 0)); | ||
907 | 1038 | ||
908 | if (g4x_compute_wm0(dev, PIPE_A, | 1039 | wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor); |
909 | &valleyview_wm_info, pessimal_latency_ns, | 1040 | wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc, |
910 | &valleyview_cursor_wm_info, pessimal_latency_ns, | 1041 | to_intel_plane(crtc->cursor), |
911 | &planea_wm, &cursora_wm)) | 1042 | 0x3f); |
912 | enabled |= 1 << PIPE_A; | ||
913 | 1043 | ||
914 | if (g4x_compute_wm0(dev, PIPE_B, | 1044 | cxsr_enabled = vlv_compute_sr_wm(dev, &wm); |
915 | &valleyview_wm_info, pessimal_latency_ns, | ||
916 | &valleyview_cursor_wm_info, pessimal_latency_ns, | ||
917 | &planeb_wm, &cursorb_wm)) | ||
918 | enabled |= 1 << PIPE_B; | ||
919 | 1045 | ||
920 | if (g4x_compute_wm0(dev, PIPE_C, | 1046 | if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) |
921 | &valleyview_wm_info, pessimal_latency_ns, | 1047 | return; |
922 | &valleyview_cursor_wm_info, pessimal_latency_ns, | ||
923 | &planec_wm, &cursorc_wm)) | ||
924 | enabled |= 1 << PIPE_C; | ||
925 | 1048 | ||
926 | if (single_plane_enabled(enabled) && | 1049 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " |
927 | g4x_compute_srwm(dev, ffs(enabled) - 1, | 1050 | "SR: plane=%d, cursor=%d\n", pipe_name(pipe), |
928 | sr_latency_ns, | 1051 | wm.pipe[pipe].primary, wm.pipe[pipe].cursor, |
929 | &valleyview_wm_info, | 1052 | wm.sr.plane, wm.sr.cursor); |
930 | &valleyview_cursor_wm_info, | ||
931 | &plane_sr, &ignore_cursor_sr) && | ||
932 | g4x_compute_srwm(dev, ffs(enabled) - 1, | ||
933 | 2*sr_latency_ns, | ||
934 | &valleyview_wm_info, | ||
935 | &valleyview_cursor_wm_info, | ||
936 | &ignore_plane_sr, &cursor_sr)) { | ||
937 | cxsr_enabled = true; | ||
938 | } else { | ||
939 | cxsr_enabled = false; | ||
940 | intel_set_memory_cxsr(dev_priv, false); | ||
941 | plane_sr = cursor_sr = 0; | ||
942 | } | ||
943 | 1053 | ||
944 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " | 1054 | /* |
945 | "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, " | 1055 | * FIXME DDR DVFS introduces massive memory latencies which |
946 | "SR: plane=%d, cursor=%d\n", | 1056 | * are not known to system agent so any deadline specified |
947 | planea_wm, cursora_wm, | 1057 | * by the display may not be respected. To support DDR DVFS |
948 | planeb_wm, cursorb_wm, | 1058 | * the watermark code needs to be rewritten to essentially |
949 | planec_wm, cursorc_wm, | 1059 | * bypass deadline mechanism and rely solely on the |
950 | plane_sr, cursor_sr); | 1060 | * watermarks. For now disable DDR DVFS. |
1061 | */ | ||
1062 | if (IS_CHERRYVIEW(dev_priv)) | ||
1063 | chv_set_memory_dvfs(dev_priv, false); | ||
951 | 1064 | ||
952 | I915_WRITE(DSPFW1, | 1065 | if (!cxsr_enabled) |
953 | (plane_sr << DSPFW_SR_SHIFT) | | 1066 | intel_set_memory_cxsr(dev_priv, false); |
954 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | 1067 | |
955 | (planeb_wm << DSPFW_PLANEB_SHIFT) | | 1068 | vlv_write_wm_values(intel_crtc, &wm); |
956 | (planea_wm << DSPFW_PLANEA_SHIFT)); | ||
957 | I915_WRITE(DSPFW2, | ||
958 | (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | | ||
959 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | ||
960 | I915_WRITE(DSPFW3, | ||
961 | (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | | ||
962 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | ||
963 | I915_WRITE(DSPFW9_CHV, | ||
964 | (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK | | ||
965 | DSPFW_CURSORC_MASK)) | | ||
966 | (planec_wm << DSPFW_PLANEC_SHIFT) | | ||
967 | (cursorc_wm << DSPFW_CURSORC_SHIFT)); | ||
968 | 1069 | ||
969 | if (cxsr_enabled) | 1070 | if (cxsr_enabled) |
970 | intel_set_memory_cxsr(dev_priv, true); | 1071 | intel_set_memory_cxsr(dev_priv, true); |
@@ -979,30 +1080,47 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane, | |||
979 | { | 1080 | { |
980 | struct drm_device *dev = crtc->dev; | 1081 | struct drm_device *dev = crtc->dev; |
981 | struct drm_i915_private *dev_priv = dev->dev_private; | 1082 | struct drm_i915_private *dev_priv = dev->dev_private; |
982 | int pipe = to_intel_plane(plane)->pipe; | 1083 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1084 | enum pipe pipe = intel_crtc->pipe; | ||
983 | int sprite = to_intel_plane(plane)->plane; | 1085 | int sprite = to_intel_plane(plane)->plane; |
984 | int drain_latency; | 1086 | bool cxsr_enabled; |
985 | int plane_prec; | 1087 | struct vlv_wm_values wm = dev_priv->wm.vlv; |
986 | int sprite_dl; | ||
987 | int prec_mult; | ||
988 | const int high_precision = IS_CHERRYVIEW(dev) ? | ||
989 | DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; | ||
990 | 1088 | ||
991 | sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) | | 1089 | if (enabled) { |
992 | (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); | 1090 | wm.ddl[pipe].sprite[sprite] = |
1091 | vlv_compute_drain_latency(crtc, plane); | ||
993 | 1092 | ||
994 | if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, | 1093 | wm.pipe[pipe].sprite[sprite] = |
995 | &drain_latency)) { | 1094 | vlv_compute_wm(intel_crtc, |
996 | plane_prec = (prec_mult == high_precision) ? | 1095 | to_intel_plane(plane), |
997 | DDL_SPRITE_PRECISION_HIGH(sprite) : | 1096 | vlv_get_fifo_size(dev, pipe, sprite+1)); |
998 | DDL_SPRITE_PRECISION_LOW(sprite); | 1097 | } else { |
999 | sprite_dl |= plane_prec | | 1098 | wm.ddl[pipe].sprite[sprite] = 0; |
1000 | (drain_latency << DDL_SPRITE_SHIFT(sprite)); | 1099 | wm.pipe[pipe].sprite[sprite] = 0; |
1001 | } | 1100 | } |
1002 | 1101 | ||
1003 | I915_WRITE(VLV_DDL(pipe), sprite_dl); | 1102 | cxsr_enabled = vlv_compute_sr_wm(dev, &wm); |
1103 | |||
1104 | if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) | ||
1105 | return; | ||
1106 | |||
1107 | DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, " | ||
1108 | "SR: plane=%d, cursor=%d\n", pipe_name(pipe), | ||
1109 | sprite_name(pipe, sprite), | ||
1110 | wm.pipe[pipe].sprite[sprite], | ||
1111 | wm.sr.plane, wm.sr.cursor); | ||
1112 | |||
1113 | if (!cxsr_enabled) | ||
1114 | intel_set_memory_cxsr(dev_priv, false); | ||
1115 | |||
1116 | vlv_write_wm_values(intel_crtc, &wm); | ||
1117 | |||
1118 | if (cxsr_enabled) | ||
1119 | intel_set_memory_cxsr(dev_priv, true); | ||
1004 | } | 1120 | } |
1005 | 1121 | ||
1122 | #define single_plane_enabled(mask) is_power_of_2(mask) | ||
1123 | |||
1006 | static void g4x_update_wm(struct drm_crtc *crtc) | 1124 | static void g4x_update_wm(struct drm_crtc *crtc) |
1007 | { | 1125 | { |
1008 | struct drm_device *dev = crtc->dev; | 1126 | struct drm_device *dev = crtc->dev; |
@@ -1045,17 +1163,17 @@ static void g4x_update_wm(struct drm_crtc *crtc) | |||
1045 | plane_sr, cursor_sr); | 1163 | plane_sr, cursor_sr); |
1046 | 1164 | ||
1047 | I915_WRITE(DSPFW1, | 1165 | I915_WRITE(DSPFW1, |
1048 | (plane_sr << DSPFW_SR_SHIFT) | | 1166 | FW_WM(plane_sr, SR) | |
1049 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | | 1167 | FW_WM(cursorb_wm, CURSORB) | |
1050 | (planeb_wm << DSPFW_PLANEB_SHIFT) | | 1168 | FW_WM(planeb_wm, PLANEB) | |
1051 | (planea_wm << DSPFW_PLANEA_SHIFT)); | 1169 | FW_WM(planea_wm, PLANEA)); |
1052 | I915_WRITE(DSPFW2, | 1170 | I915_WRITE(DSPFW2, |
1053 | (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | | 1171 | (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | |
1054 | (cursora_wm << DSPFW_CURSORA_SHIFT)); | 1172 | FW_WM(cursora_wm, CURSORA)); |
1055 | /* HPLL off in SR has some issues on G4x... disable it */ | 1173 | /* HPLL off in SR has some issues on G4x... disable it */ |
1056 | I915_WRITE(DSPFW3, | 1174 | I915_WRITE(DSPFW3, |
1057 | (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | | 1175 | (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | |
1058 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 1176 | FW_WM(cursor_sr, CURSOR_SR)); |
1059 | 1177 | ||
1060 | if (cxsr_enabled) | 1178 | if (cxsr_enabled) |
1061 | intel_set_memory_cxsr(dev_priv, true); | 1179 | intel_set_memory_cxsr(dev_priv, true); |
@@ -1080,7 +1198,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1080 | int clock = adjusted_mode->crtc_clock; | 1198 | int clock = adjusted_mode->crtc_clock; |
1081 | int htotal = adjusted_mode->crtc_htotal; | 1199 | int htotal = adjusted_mode->crtc_htotal; |
1082 | int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; | 1200 | int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; |
1083 | int pixel_size = crtc->primary->fb->bits_per_pixel / 8; | 1201 | int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; |
1084 | unsigned long line_time_us; | 1202 | unsigned long line_time_us; |
1085 | int entries; | 1203 | int entries; |
1086 | 1204 | ||
@@ -1098,7 +1216,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1098 | entries, srwm); | 1216 | entries, srwm); |
1099 | 1217 | ||
1100 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * | 1218 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
1101 | pixel_size * to_intel_crtc(crtc)->cursor_width; | 1219 | pixel_size * crtc->cursor->state->crtc_w; |
1102 | entries = DIV_ROUND_UP(entries, | 1220 | entries = DIV_ROUND_UP(entries, |
1103 | i965_cursor_wm_info.cacheline_size); | 1221 | i965_cursor_wm_info.cacheline_size); |
1104 | cursor_sr = i965_cursor_wm_info.fifo_size - | 1222 | cursor_sr = i965_cursor_wm_info.fifo_size - |
@@ -1121,19 +1239,21 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) | |||
1121 | srwm); | 1239 | srwm); |
1122 | 1240 | ||
1123 | /* 965 has limitations... */ | 1241 | /* 965 has limitations... */ |
1124 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | | 1242 | I915_WRITE(DSPFW1, FW_WM(srwm, SR) | |
1125 | (8 << DSPFW_CURSORB_SHIFT) | | 1243 | FW_WM(8, CURSORB) | |
1126 | (8 << DSPFW_PLANEB_SHIFT) | | 1244 | FW_WM(8, PLANEB) | |
1127 | (8 << DSPFW_PLANEA_SHIFT)); | 1245 | FW_WM(8, PLANEA)); |
1128 | I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) | | 1246 | I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | |
1129 | (8 << DSPFW_PLANEC_SHIFT_OLD)); | 1247 | FW_WM(8, PLANEC_OLD)); |
1130 | /* update cursor SR watermark */ | 1248 | /* update cursor SR watermark */ |
1131 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | 1249 | I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); |
1132 | 1250 | ||
1133 | if (cxsr_enabled) | 1251 | if (cxsr_enabled) |
1134 | intel_set_memory_cxsr(dev_priv, true); | 1252 | intel_set_memory_cxsr(dev_priv, true); |
1135 | } | 1253 | } |
1136 | 1254 | ||
1255 | #undef FW_WM | ||
1256 | |||
1137 | static void i9xx_update_wm(struct drm_crtc *unused_crtc) | 1257 | static void i9xx_update_wm(struct drm_crtc *unused_crtc) |
1138 | { | 1258 | { |
1139 | struct drm_device *dev = unused_crtc->dev; | 1259 | struct drm_device *dev = unused_crtc->dev; |
@@ -1157,7 +1277,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1157 | crtc = intel_get_crtc_for_plane(dev, 0); | 1277 | crtc = intel_get_crtc_for_plane(dev, 0); |
1158 | if (intel_crtc_active(crtc)) { | 1278 | if (intel_crtc_active(crtc)) { |
1159 | const struct drm_display_mode *adjusted_mode; | 1279 | const struct drm_display_mode *adjusted_mode; |
1160 | int cpp = crtc->primary->fb->bits_per_pixel / 8; | 1280 | int cpp = crtc->primary->state->fb->bits_per_pixel / 8; |
1161 | if (IS_GEN2(dev)) | 1281 | if (IS_GEN2(dev)) |
1162 | cpp = 4; | 1282 | cpp = 4; |
1163 | 1283 | ||
@@ -1179,7 +1299,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1179 | crtc = intel_get_crtc_for_plane(dev, 1); | 1299 | crtc = intel_get_crtc_for_plane(dev, 1); |
1180 | if (intel_crtc_active(crtc)) { | 1300 | if (intel_crtc_active(crtc)) { |
1181 | const struct drm_display_mode *adjusted_mode; | 1301 | const struct drm_display_mode *adjusted_mode; |
1182 | int cpp = crtc->primary->fb->bits_per_pixel / 8; | 1302 | int cpp = crtc->primary->state->fb->bits_per_pixel / 8; |
1183 | if (IS_GEN2(dev)) | 1303 | if (IS_GEN2(dev)) |
1184 | cpp = 4; | 1304 | cpp = 4; |
1185 | 1305 | ||
@@ -1202,7 +1322,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1202 | if (IS_I915GM(dev) && enabled) { | 1322 | if (IS_I915GM(dev) && enabled) { |
1203 | struct drm_i915_gem_object *obj; | 1323 | struct drm_i915_gem_object *obj; |
1204 | 1324 | ||
1205 | obj = intel_fb_obj(enabled->primary->fb); | 1325 | obj = intel_fb_obj(enabled->primary->state->fb); |
1206 | 1326 | ||
1207 | /* self-refresh seems busted with untiled */ | 1327 | /* self-refresh seems busted with untiled */ |
1208 | if (obj->tiling_mode == I915_TILING_NONE) | 1328 | if (obj->tiling_mode == I915_TILING_NONE) |
@@ -1226,7 +1346,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) | |||
1226 | int clock = adjusted_mode->crtc_clock; | 1346 | int clock = adjusted_mode->crtc_clock; |
1227 | int htotal = adjusted_mode->crtc_htotal; | 1347 | int htotal = adjusted_mode->crtc_htotal; |
1228 | int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; | 1348 | int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; |
1229 | int pixel_size = enabled->primary->fb->bits_per_pixel / 8; | 1349 | int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; |
1230 | unsigned long line_time_us; | 1350 | unsigned long line_time_us; |
1231 | int entries; | 1351 | int entries; |
1232 | 1352 | ||
@@ -1663,7 +1783,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) | |||
1663 | struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; | 1783 | struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; |
1664 | u32 linetime, ips_linetime; | 1784 | u32 linetime, ips_linetime; |
1665 | 1785 | ||
1666 | if (!intel_crtc_active(crtc)) | 1786 | if (!intel_crtc->active) |
1667 | return 0; | 1787 | return 0; |
1668 | 1788 | ||
1669 | /* The WM are computed with base on how long it takes to fill a single | 1789 | /* The WM are computed with base on how long it takes to fill a single |
@@ -1918,19 +2038,31 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc, | |||
1918 | enum pipe pipe = intel_crtc->pipe; | 2038 | enum pipe pipe = intel_crtc->pipe; |
1919 | struct drm_plane *plane; | 2039 | struct drm_plane *plane; |
1920 | 2040 | ||
1921 | if (!intel_crtc_active(crtc)) | 2041 | if (!intel_crtc->active) |
1922 | return; | 2042 | return; |
1923 | 2043 | ||
1924 | p->active = true; | 2044 | p->active = true; |
1925 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; | 2045 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; |
1926 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); | 2046 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); |
1927 | p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8; | 2047 | |
1928 | p->cur.bytes_per_pixel = 4; | 2048 | if (crtc->primary->state->fb) { |
2049 | p->pri.enabled = true; | ||
2050 | p->pri.bytes_per_pixel = | ||
2051 | crtc->primary->state->fb->bits_per_pixel / 8; | ||
2052 | } else { | ||
2053 | p->pri.enabled = false; | ||
2054 | p->pri.bytes_per_pixel = 0; | ||
2055 | } | ||
2056 | |||
2057 | if (crtc->cursor->state->fb) { | ||
2058 | p->cur.enabled = true; | ||
2059 | p->cur.bytes_per_pixel = 4; | ||
2060 | } else { | ||
2061 | p->cur.enabled = false; | ||
2062 | p->cur.bytes_per_pixel = 0; | ||
2063 | } | ||
1929 | p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; | 2064 | p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; |
1930 | p->cur.horiz_pixels = intel_crtc->cursor_width; | 2065 | p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; |
1931 | /* TODO: for now, assume primary and cursor planes are always enabled. */ | ||
1932 | p->pri.enabled = true; | ||
1933 | p->cur.enabled = true; | ||
1934 | 2066 | ||
1935 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { | 2067 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { |
1936 | struct intel_plane *intel_plane = to_intel_plane(plane); | 2068 | struct intel_plane *intel_plane = to_intel_plane(plane); |
@@ -2430,7 +2562,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | |||
2430 | 2562 | ||
2431 | nth_active_pipe = 0; | 2563 | nth_active_pipe = 0; |
2432 | for_each_crtc(dev, crtc) { | 2564 | for_each_crtc(dev, crtc) { |
2433 | if (!intel_crtc_active(crtc)) | 2565 | if (!to_intel_crtc(crtc)->active) |
2434 | continue; | 2566 | continue; |
2435 | 2567 | ||
2436 | if (crtc == for_crtc) | 2568 | if (crtc == for_crtc) |
@@ -2463,13 +2595,12 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) | |||
2463 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | 2595 | void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, |
2464 | struct skl_ddb_allocation *ddb /* out */) | 2596 | struct skl_ddb_allocation *ddb /* out */) |
2465 | { | 2597 | { |
2466 | struct drm_device *dev = dev_priv->dev; | ||
2467 | enum pipe pipe; | 2598 | enum pipe pipe; |
2468 | int plane; | 2599 | int plane; |
2469 | u32 val; | 2600 | u32 val; |
2470 | 2601 | ||
2471 | for_each_pipe(dev_priv, pipe) { | 2602 | for_each_pipe(dev_priv, pipe) { |
2472 | for_each_plane(pipe, plane) { | 2603 | for_each_plane(dev_priv, pipe, plane) { |
2473 | val = I915_READ(PLANE_BUF_CFG(pipe, plane)); | 2604 | val = I915_READ(PLANE_BUF_CFG(pipe, plane)); |
2474 | skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], | 2605 | skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], |
2475 | val); | 2606 | val); |
@@ -2518,6 +2649,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, | |||
2518 | struct skl_ddb_allocation *ddb /* out */) | 2649 | struct skl_ddb_allocation *ddb /* out */) |
2519 | { | 2650 | { |
2520 | struct drm_device *dev = crtc->dev; | 2651 | struct drm_device *dev = crtc->dev; |
2652 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2521 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2653 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2522 | enum pipe pipe = intel_crtc->pipe; | 2654 | enum pipe pipe = intel_crtc->pipe; |
2523 | struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; | 2655 | struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; |
@@ -2542,7 +2674,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, | |||
2542 | alloc->end -= cursor_blocks; | 2674 | alloc->end -= cursor_blocks; |
2543 | 2675 | ||
2544 | /* 1. Allocate the mininum required blocks for each active plane */ | 2676 | /* 1. Allocate the mininum required blocks for each active plane */ |
2545 | for_each_plane(pipe, plane) { | 2677 | for_each_plane(dev_priv, pipe, plane) { |
2546 | const struct intel_plane_wm_parameters *p; | 2678 | const struct intel_plane_wm_parameters *p; |
2547 | 2679 | ||
2548 | p = ¶ms->plane[plane]; | 2680 | p = ¶ms->plane[plane]; |
@@ -2670,7 +2802,7 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev, | |||
2670 | struct drm_plane *plane; | 2802 | struct drm_plane *plane; |
2671 | 2803 | ||
2672 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 2804 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
2673 | config->num_pipes_active += intel_crtc_active(crtc); | 2805 | config->num_pipes_active += to_intel_crtc(crtc)->active; |
2674 | 2806 | ||
2675 | /* FIXME: I don't think we need those two global parameters on SKL */ | 2807 | /* FIXME: I don't think we need those two global parameters on SKL */ |
2676 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | 2808 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { |
@@ -2691,32 +2823,36 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, | |||
2691 | struct drm_framebuffer *fb; | 2823 | struct drm_framebuffer *fb; |
2692 | int i = 1; /* Index for sprite planes start */ | 2824 | int i = 1; /* Index for sprite planes start */ |
2693 | 2825 | ||
2694 | p->active = intel_crtc_active(crtc); | 2826 | p->active = intel_crtc->active; |
2695 | if (p->active) { | 2827 | if (p->active) { |
2696 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; | 2828 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; |
2697 | p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); | 2829 | p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); |
2698 | 2830 | ||
2699 | /* | ||
2700 | * For now, assume primary and cursor planes are always enabled. | ||
2701 | */ | ||
2702 | p->plane[0].enabled = true; | ||
2703 | p->plane[0].bytes_per_pixel = | ||
2704 | crtc->primary->fb->bits_per_pixel / 8; | ||
2705 | p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; | ||
2706 | p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; | ||
2707 | p->plane[0].tiling = DRM_FORMAT_MOD_NONE; | ||
2708 | fb = crtc->primary->state->fb; | 2831 | fb = crtc->primary->state->fb; |
2709 | /* | 2832 | if (fb) { |
2710 | * Framebuffer can be NULL on plane disable, but it does not | 2833 | p->plane[0].enabled = true; |
2711 | * matter for watermarks if we assume no tiling in that case. | 2834 | p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8; |
2712 | */ | ||
2713 | if (fb) | ||
2714 | p->plane[0].tiling = fb->modifier[0]; | 2835 | p->plane[0].tiling = fb->modifier[0]; |
2836 | } else { | ||
2837 | p->plane[0].enabled = false; | ||
2838 | p->plane[0].bytes_per_pixel = 0; | ||
2839 | p->plane[0].tiling = DRM_FORMAT_MOD_NONE; | ||
2840 | } | ||
2841 | p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; | ||
2842 | p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; | ||
2715 | 2843 | ||
2716 | p->cursor.enabled = true; | 2844 | fb = crtc->cursor->state->fb; |
2717 | p->cursor.bytes_per_pixel = 4; | 2845 | if (fb) { |
2718 | p->cursor.horiz_pixels = intel_crtc->cursor_width ? | 2846 | p->cursor.enabled = true; |
2719 | intel_crtc->cursor_width : 64; | 2847 | p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; |
2848 | p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; | ||
2849 | p->cursor.vert_pixels = crtc->cursor->state->crtc_h; | ||
2850 | } else { | ||
2851 | p->cursor.enabled = false; | ||
2852 | p->cursor.bytes_per_pixel = 0; | ||
2853 | p->cursor.horiz_pixels = 64; | ||
2854 | p->cursor.vert_pixels = 64; | ||
2855 | } | ||
2720 | } | 2856 | } |
2721 | 2857 | ||
2722 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | 2858 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { |
@@ -2822,7 +2958,7 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, | |||
2822 | static uint32_t | 2958 | static uint32_t |
2823 | skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) | 2959 | skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) |
2824 | { | 2960 | { |
2825 | if (!intel_crtc_active(crtc)) | 2961 | if (!to_intel_crtc(crtc)->active) |
2826 | return 0; | 2962 | return 0; |
2827 | 2963 | ||
2828 | return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); | 2964 | return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); |
@@ -2996,12 +3132,11 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, | |||
2996 | static void | 3132 | static void |
2997 | skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass) | 3133 | skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass) |
2998 | { | 3134 | { |
2999 | struct drm_device *dev = dev_priv->dev; | ||
3000 | int plane; | 3135 | int plane; |
3001 | 3136 | ||
3002 | DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); | 3137 | DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); |
3003 | 3138 | ||
3004 | for_each_plane(pipe, plane) { | 3139 | for_each_plane(dev_priv, pipe, plane) { |
3005 | I915_WRITE(PLANE_SURF(pipe, plane), | 3140 | I915_WRITE(PLANE_SURF(pipe, plane), |
3006 | I915_READ(PLANE_SURF(pipe, plane))); | 3141 | I915_READ(PLANE_SURF(pipe, plane))); |
3007 | } | 3142 | } |
@@ -3370,7 +3505,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
3370 | hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); | 3505 | hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); |
3371 | hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); | 3506 | hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); |
3372 | 3507 | ||
3373 | if (!intel_crtc_active(crtc)) | 3508 | if (!intel_crtc->active) |
3374 | return; | 3509 | return; |
3375 | 3510 | ||
3376 | hw->dirty[pipe] = true; | 3511 | hw->dirty[pipe] = true; |
@@ -3425,7 +3560,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | |||
3425 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 3560 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
3426 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | 3561 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); |
3427 | 3562 | ||
3428 | active->pipe_enabled = intel_crtc_active(crtc); | 3563 | active->pipe_enabled = intel_crtc->active; |
3429 | 3564 | ||
3430 | if (active->pipe_enabled) { | 3565 | if (active->pipe_enabled) { |
3431 | u32 tmp = hw->wm_pipe[pipe]; | 3566 | u32 tmp = hw->wm_pipe[pipe]; |
@@ -3539,41 +3674,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, | |||
3539 | pixel_size, enabled, scaled); | 3674 | pixel_size, enabled, scaled); |
3540 | } | 3675 | } |
3541 | 3676 | ||
3542 | static struct drm_i915_gem_object * | ||
3543 | intel_alloc_context_page(struct drm_device *dev) | ||
3544 | { | ||
3545 | struct drm_i915_gem_object *ctx; | ||
3546 | int ret; | ||
3547 | |||
3548 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
3549 | |||
3550 | ctx = i915_gem_alloc_object(dev, 4096); | ||
3551 | if (!ctx) { | ||
3552 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
3553 | return NULL; | ||
3554 | } | ||
3555 | |||
3556 | ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0); | ||
3557 | if (ret) { | ||
3558 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
3559 | goto err_unref; | ||
3560 | } | ||
3561 | |||
3562 | ret = i915_gem_object_set_to_gtt_domain(ctx, 1); | ||
3563 | if (ret) { | ||
3564 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | ||
3565 | goto err_unpin; | ||
3566 | } | ||
3567 | |||
3568 | return ctx; | ||
3569 | |||
3570 | err_unpin: | ||
3571 | i915_gem_object_ggtt_unpin(ctx); | ||
3572 | err_unref: | ||
3573 | drm_gem_object_unreference(&ctx->base); | ||
3574 | return NULL; | ||
3575 | } | ||
3576 | |||
3577 | /** | 3677 | /** |
3578 | * Lock protecting IPS related data structures | 3678 | * Lock protecting IPS related data structures |
3579 | */ | 3679 | */ |
@@ -3706,7 +3806,7 @@ static void ironlake_disable_drps(struct drm_device *dev) | |||
3706 | * ourselves, instead of doing a rmw cycle (which might result in us clearing | 3806 | * ourselves, instead of doing a rmw cycle (which might result in us clearing |
3707 | * all limits and the gpu stuck at whatever frequency it is at atm). | 3807 | * all limits and the gpu stuck at whatever frequency it is at atm). |
3708 | */ | 3808 | */ |
3709 | static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) | 3809 | static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) |
3710 | { | 3810 | { |
3711 | u32 limits; | 3811 | u32 limits; |
3712 | 3812 | ||
@@ -3716,9 +3816,15 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) | |||
3716 | * the hw runs at the minimal clock before selecting the desired | 3816 | * the hw runs at the minimal clock before selecting the desired |
3717 | * frequency, if the down threshold expires in that window we will not | 3817 | * frequency, if the down threshold expires in that window we will not |
3718 | * receive a down interrupt. */ | 3818 | * receive a down interrupt. */ |
3719 | limits = dev_priv->rps.max_freq_softlimit << 24; | 3819 | if (IS_GEN9(dev_priv->dev)) { |
3720 | if (val <= dev_priv->rps.min_freq_softlimit) | 3820 | limits = (dev_priv->rps.max_freq_softlimit) << 23; |
3721 | limits |= dev_priv->rps.min_freq_softlimit << 16; | 3821 | if (val <= dev_priv->rps.min_freq_softlimit) |
3822 | limits |= (dev_priv->rps.min_freq_softlimit) << 14; | ||
3823 | } else { | ||
3824 | limits = dev_priv->rps.max_freq_softlimit << 24; | ||
3825 | if (val <= dev_priv->rps.min_freq_softlimit) | ||
3826 | limits |= dev_priv->rps.min_freq_softlimit << 16; | ||
3827 | } | ||
3722 | 3828 | ||
3723 | return limits; | 3829 | return limits; |
3724 | } | 3830 | } |
@@ -3726,6 +3832,8 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) | |||
3726 | static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | 3832 | static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) |
3727 | { | 3833 | { |
3728 | int new_power; | 3834 | int new_power; |
3835 | u32 threshold_up = 0, threshold_down = 0; /* in % */ | ||
3836 | u32 ei_up = 0, ei_down = 0; | ||
3729 | 3837 | ||
3730 | new_power = dev_priv->rps.power; | 3838 | new_power = dev_priv->rps.power; |
3731 | switch (dev_priv->rps.power) { | 3839 | switch (dev_priv->rps.power) { |
@@ -3758,59 +3866,53 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
3758 | switch (new_power) { | 3866 | switch (new_power) { |
3759 | case LOW_POWER: | 3867 | case LOW_POWER: |
3760 | /* Upclock if more than 95% busy over 16ms */ | 3868 | /* Upclock if more than 95% busy over 16ms */ |
3761 | I915_WRITE(GEN6_RP_UP_EI, 12500); | 3869 | ei_up = 16000; |
3762 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800); | 3870 | threshold_up = 95; |
3763 | 3871 | ||
3764 | /* Downclock if less than 85% busy over 32ms */ | 3872 | /* Downclock if less than 85% busy over 32ms */ |
3765 | I915_WRITE(GEN6_RP_DOWN_EI, 25000); | 3873 | ei_down = 32000; |
3766 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250); | 3874 | threshold_down = 85; |
3767 | |||
3768 | I915_WRITE(GEN6_RP_CONTROL, | ||
3769 | GEN6_RP_MEDIA_TURBO | | ||
3770 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | ||
3771 | GEN6_RP_MEDIA_IS_GFX | | ||
3772 | GEN6_RP_ENABLE | | ||
3773 | GEN6_RP_UP_BUSY_AVG | | ||
3774 | GEN6_RP_DOWN_IDLE_AVG); | ||
3775 | break; | 3875 | break; |
3776 | 3876 | ||
3777 | case BETWEEN: | 3877 | case BETWEEN: |
3778 | /* Upclock if more than 90% busy over 13ms */ | 3878 | /* Upclock if more than 90% busy over 13ms */ |
3779 | I915_WRITE(GEN6_RP_UP_EI, 10250); | 3879 | ei_up = 13000; |
3780 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225); | 3880 | threshold_up = 90; |
3781 | 3881 | ||
3782 | /* Downclock if less than 75% busy over 32ms */ | 3882 | /* Downclock if less than 75% busy over 32ms */ |
3783 | I915_WRITE(GEN6_RP_DOWN_EI, 25000); | 3883 | ei_down = 32000; |
3784 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750); | 3884 | threshold_down = 75; |
3785 | |||
3786 | I915_WRITE(GEN6_RP_CONTROL, | ||
3787 | GEN6_RP_MEDIA_TURBO | | ||
3788 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | ||
3789 | GEN6_RP_MEDIA_IS_GFX | | ||
3790 | GEN6_RP_ENABLE | | ||
3791 | GEN6_RP_UP_BUSY_AVG | | ||
3792 | GEN6_RP_DOWN_IDLE_AVG); | ||
3793 | break; | 3885 | break; |
3794 | 3886 | ||
3795 | case HIGH_POWER: | 3887 | case HIGH_POWER: |
3796 | /* Upclock if more than 85% busy over 10ms */ | 3888 | /* Upclock if more than 85% busy over 10ms */ |
3797 | I915_WRITE(GEN6_RP_UP_EI, 8000); | 3889 | ei_up = 10000; |
3798 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800); | 3890 | threshold_up = 85; |
3799 | 3891 | ||
3800 | /* Downclock if less than 60% busy over 32ms */ | 3892 | /* Downclock if less than 60% busy over 32ms */ |
3801 | I915_WRITE(GEN6_RP_DOWN_EI, 25000); | 3893 | ei_down = 32000; |
3802 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000); | 3894 | threshold_down = 60; |
3803 | |||
3804 | I915_WRITE(GEN6_RP_CONTROL, | ||
3805 | GEN6_RP_MEDIA_TURBO | | ||
3806 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | ||
3807 | GEN6_RP_MEDIA_IS_GFX | | ||
3808 | GEN6_RP_ENABLE | | ||
3809 | GEN6_RP_UP_BUSY_AVG | | ||
3810 | GEN6_RP_DOWN_IDLE_AVG); | ||
3811 | break; | 3895 | break; |
3812 | } | 3896 | } |
3813 | 3897 | ||
3898 | I915_WRITE(GEN6_RP_UP_EI, | ||
3899 | GT_INTERVAL_FROM_US(dev_priv, ei_up)); | ||
3900 | I915_WRITE(GEN6_RP_UP_THRESHOLD, | ||
3901 | GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); | ||
3902 | |||
3903 | I915_WRITE(GEN6_RP_DOWN_EI, | ||
3904 | GT_INTERVAL_FROM_US(dev_priv, ei_down)); | ||
3905 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, | ||
3906 | GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); | ||
3907 | |||
3908 | I915_WRITE(GEN6_RP_CONTROL, | ||
3909 | GEN6_RP_MEDIA_TURBO | | ||
3910 | GEN6_RP_MEDIA_HW_NORMAL_MODE | | ||
3911 | GEN6_RP_MEDIA_IS_GFX | | ||
3912 | GEN6_RP_ENABLE | | ||
3913 | GEN6_RP_UP_BUSY_AVG | | ||
3914 | GEN6_RP_DOWN_IDLE_AVG); | ||
3915 | |||
3814 | dev_priv->rps.power = new_power; | 3916 | dev_priv->rps.power = new_power; |
3815 | dev_priv->rps.last_adj = 0; | 3917 | dev_priv->rps.last_adj = 0; |
3816 | } | 3918 | } |
@@ -3847,7 +3949,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3847 | if (val != dev_priv->rps.cur_freq) { | 3949 | if (val != dev_priv->rps.cur_freq) { |
3848 | gen6_set_rps_thresholds(dev_priv, val); | 3950 | gen6_set_rps_thresholds(dev_priv, val); |
3849 | 3951 | ||
3850 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 3952 | if (IS_GEN9(dev)) |
3953 | I915_WRITE(GEN6_RPNSWREQ, | ||
3954 | GEN9_FREQUENCY(val)); | ||
3955 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | ||
3851 | I915_WRITE(GEN6_RPNSWREQ, | 3956 | I915_WRITE(GEN6_RPNSWREQ, |
3852 | HSW_FREQUENCY(val)); | 3957 | HSW_FREQUENCY(val)); |
3853 | else | 3958 | else |
@@ -3860,7 +3965,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3860 | /* Make sure we continue to get interrupts | 3965 | /* Make sure we continue to get interrupts |
3861 | * until we hit the minimum or maximum frequencies. | 3966 | * until we hit the minimum or maximum frequencies. |
3862 | */ | 3967 | */ |
3863 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val)); | 3968 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); |
3864 | I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); | 3969 | I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); |
3865 | 3970 | ||
3866 | POSTING_READ(GEN6_RPNSWREQ); | 3971 | POSTING_READ(GEN6_RPNSWREQ); |
@@ -4081,6 +4186,13 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) | |||
4081 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; | 4186 | dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; |
4082 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; | 4187 | dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; |
4083 | dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; | 4188 | dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; |
4189 | if (IS_SKYLAKE(dev)) { | ||
4190 | /* Store the frequency values in 16.66 MHZ units, which is | ||
4191 | the natural hardware unit for SKL */ | ||
4192 | dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; | ||
4193 | dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; | ||
4194 | dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; | ||
4195 | } | ||
4084 | /* hw_max = RP0 until we check for overclocking */ | 4196 | /* hw_max = RP0 until we check for overclocking */ |
4085 | dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; | 4197 | dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; |
4086 | 4198 | ||
@@ -4121,23 +4233,21 @@ static void gen9_enable_rps(struct drm_device *dev) | |||
4121 | 4233 | ||
4122 | gen6_init_rps_frequencies(dev); | 4234 | gen6_init_rps_frequencies(dev); |
4123 | 4235 | ||
4124 | I915_WRITE(GEN6_RPNSWREQ, 0xc800000); | 4236 | /* Program defaults and thresholds for RPS*/ |
4125 | I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000); | 4237 | I915_WRITE(GEN6_RC_VIDEO_FREQ, |
4238 | GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); | ||
4239 | |||
4240 | /* 1 second timeout*/ | ||
4241 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, | ||
4242 | GT_INTERVAL_FROM_US(dev_priv, 1000000)); | ||
4126 | 4243 | ||
4127 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); | ||
4128 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000); | ||
4129 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808); | ||
4130 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08); | ||
4131 | I915_WRITE(GEN6_RP_UP_EI, 0x101d0); | ||
4132 | I915_WRITE(GEN6_RP_DOWN_EI, 0x55730); | ||
4133 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); | 4244 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); |
4134 | I915_WRITE(GEN6_PMINTRMSK, 0x6); | ||
4135 | I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | | ||
4136 | GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX | | ||
4137 | GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG | | ||
4138 | GEN6_RP_DOWN_IDLE_AVG); | ||
4139 | 4245 | ||
4140 | gen6_enable_rps_interrupts(dev); | 4246 | /* Leaning on the below call to gen6_set_rps to program/setup the |
4247 | * Up/Down EI & threshold registers, as well as the RP_CONTROL, | ||
4248 | * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ | ||
4249 | dev_priv->rps.power = HIGH_POWER; /* force a reset */ | ||
4250 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); | ||
4141 | 4251 | ||
4142 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 4252 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
4143 | } | 4253 | } |
@@ -4990,124 +5100,6 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
4990 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 5100 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
4991 | } | 5101 | } |
4992 | 5102 | ||
4993 | void ironlake_teardown_rc6(struct drm_device *dev) | ||
4994 | { | ||
4995 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4996 | |||
4997 | if (dev_priv->ips.renderctx) { | ||
4998 | i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx); | ||
4999 | drm_gem_object_unreference(&dev_priv->ips.renderctx->base); | ||
5000 | dev_priv->ips.renderctx = NULL; | ||
5001 | } | ||
5002 | |||
5003 | if (dev_priv->ips.pwrctx) { | ||
5004 | i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx); | ||
5005 | drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); | ||
5006 | dev_priv->ips.pwrctx = NULL; | ||
5007 | } | ||
5008 | } | ||
5009 | |||
5010 | static void ironlake_disable_rc6(struct drm_device *dev) | ||
5011 | { | ||
5012 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5013 | |||
5014 | if (I915_READ(PWRCTXA)) { | ||
5015 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | ||
5016 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | ||
5017 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | ||
5018 | 50); | ||
5019 | |||
5020 | I915_WRITE(PWRCTXA, 0); | ||
5021 | POSTING_READ(PWRCTXA); | ||
5022 | |||
5023 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
5024 | POSTING_READ(RSTDBYCTL); | ||
5025 | } | ||
5026 | } | ||
5027 | |||
5028 | static int ironlake_setup_rc6(struct drm_device *dev) | ||
5029 | { | ||
5030 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5031 | |||
5032 | if (dev_priv->ips.renderctx == NULL) | ||
5033 | dev_priv->ips.renderctx = intel_alloc_context_page(dev); | ||
5034 | if (!dev_priv->ips.renderctx) | ||
5035 | return -ENOMEM; | ||
5036 | |||
5037 | if (dev_priv->ips.pwrctx == NULL) | ||
5038 | dev_priv->ips.pwrctx = intel_alloc_context_page(dev); | ||
5039 | if (!dev_priv->ips.pwrctx) { | ||
5040 | ironlake_teardown_rc6(dev); | ||
5041 | return -ENOMEM; | ||
5042 | } | ||
5043 | |||
5044 | return 0; | ||
5045 | } | ||
5046 | |||
5047 | static void ironlake_enable_rc6(struct drm_device *dev) | ||
5048 | { | ||
5049 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5050 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | ||
5051 | bool was_interruptible; | ||
5052 | int ret; | ||
5053 | |||
5054 | /* rc6 disabled by default due to repeated reports of hanging during | ||
5055 | * boot and resume. | ||
5056 | */ | ||
5057 | if (!intel_enable_rc6(dev)) | ||
5058 | return; | ||
5059 | |||
5060 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
5061 | |||
5062 | ret = ironlake_setup_rc6(dev); | ||
5063 | if (ret) | ||
5064 | return; | ||
5065 | |||
5066 | was_interruptible = dev_priv->mm.interruptible; | ||
5067 | dev_priv->mm.interruptible = false; | ||
5068 | |||
5069 | /* | ||
5070 | * GPU can automatically power down the render unit if given a page | ||
5071 | * to save state. | ||
5072 | */ | ||
5073 | ret = intel_ring_begin(ring, 6); | ||
5074 | if (ret) { | ||
5075 | ironlake_teardown_rc6(dev); | ||
5076 | dev_priv->mm.interruptible = was_interruptible; | ||
5077 | return; | ||
5078 | } | ||
5079 | |||
5080 | intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); | ||
5081 | intel_ring_emit(ring, MI_SET_CONTEXT); | ||
5082 | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | | ||
5083 | MI_MM_SPACE_GTT | | ||
5084 | MI_SAVE_EXT_STATE_EN | | ||
5085 | MI_RESTORE_EXT_STATE_EN | | ||
5086 | MI_RESTORE_INHIBIT); | ||
5087 | intel_ring_emit(ring, MI_SUSPEND_FLUSH); | ||
5088 | intel_ring_emit(ring, MI_NOOP); | ||
5089 | intel_ring_emit(ring, MI_FLUSH); | ||
5090 | intel_ring_advance(ring); | ||
5091 | |||
5092 | /* | ||
5093 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW | ||
5094 | * does an implicit flush, combined with MI_FLUSH above, it should be | ||
5095 | * safe to assume that renderctx is valid | ||
5096 | */ | ||
5097 | ret = intel_ring_idle(ring); | ||
5098 | dev_priv->mm.interruptible = was_interruptible; | ||
5099 | if (ret) { | ||
5100 | DRM_ERROR("failed to enable ironlake power savings\n"); | ||
5101 | ironlake_teardown_rc6(dev); | ||
5102 | return; | ||
5103 | } | ||
5104 | |||
5105 | I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); | ||
5106 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | ||
5107 | |||
5108 | intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE); | ||
5109 | } | ||
5110 | |||
5111 | static unsigned long intel_pxfreq(u32 vidfreq) | 5103 | static unsigned long intel_pxfreq(u32 vidfreq) |
5112 | { | 5104 | { |
5113 | unsigned long freq; | 5105 | unsigned long freq; |
@@ -5620,12 +5612,7 @@ static void gen6_suspend_rps(struct drm_device *dev) | |||
5620 | 5612 | ||
5621 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | 5613 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); |
5622 | 5614 | ||
5623 | /* | 5615 | gen6_disable_rps_interrupts(dev); |
5624 | * TODO: disable RPS interrupts on GEN9+ too once RPS support | ||
5625 | * is added for it. | ||
5626 | */ | ||
5627 | if (INTEL_INFO(dev)->gen < 9) | ||
5628 | gen6_disable_rps_interrupts(dev); | ||
5629 | } | 5616 | } |
5630 | 5617 | ||
5631 | /** | 5618 | /** |
@@ -5655,7 +5642,6 @@ void intel_disable_gt_powersave(struct drm_device *dev) | |||
5655 | 5642 | ||
5656 | if (IS_IRONLAKE_M(dev)) { | 5643 | if (IS_IRONLAKE_M(dev)) { |
5657 | ironlake_disable_drps(dev); | 5644 | ironlake_disable_drps(dev); |
5658 | ironlake_disable_rc6(dev); | ||
5659 | } else if (INTEL_INFO(dev)->gen >= 6) { | 5645 | } else if (INTEL_INFO(dev)->gen >= 6) { |
5660 | intel_suspend_gt_powersave(dev); | 5646 | intel_suspend_gt_powersave(dev); |
5661 | 5647 | ||
@@ -5683,12 +5669,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) | |||
5683 | 5669 | ||
5684 | mutex_lock(&dev_priv->rps.hw_lock); | 5670 | mutex_lock(&dev_priv->rps.hw_lock); |
5685 | 5671 | ||
5686 | /* | 5672 | gen6_reset_rps_interrupts(dev); |
5687 | * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is | ||
5688 | * added for it. | ||
5689 | */ | ||
5690 | if (INTEL_INFO(dev)->gen < 9) | ||
5691 | gen6_reset_rps_interrupts(dev); | ||
5692 | 5673 | ||
5693 | if (IS_CHERRYVIEW(dev)) { | 5674 | if (IS_CHERRYVIEW(dev)) { |
5694 | cherryview_enable_rps(dev); | 5675 | cherryview_enable_rps(dev); |
@@ -5707,8 +5688,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) | |||
5707 | } | 5688 | } |
5708 | dev_priv->rps.enabled = true; | 5689 | dev_priv->rps.enabled = true; |
5709 | 5690 | ||
5710 | if (INTEL_INFO(dev)->gen < 9) | 5691 | gen6_enable_rps_interrupts(dev); |
5711 | gen6_enable_rps_interrupts(dev); | ||
5712 | 5692 | ||
5713 | mutex_unlock(&dev_priv->rps.hw_lock); | 5693 | mutex_unlock(&dev_priv->rps.hw_lock); |
5714 | 5694 | ||
@@ -5726,7 +5706,6 @@ void intel_enable_gt_powersave(struct drm_device *dev) | |||
5726 | if (IS_IRONLAKE_M(dev)) { | 5706 | if (IS_IRONLAKE_M(dev)) { |
5727 | mutex_lock(&dev->struct_mutex); | 5707 | mutex_lock(&dev->struct_mutex); |
5728 | ironlake_enable_drps(dev); | 5708 | ironlake_enable_drps(dev); |
5729 | ironlake_enable_rc6(dev); | ||
5730 | intel_init_emon(dev); | 5709 | intel_init_emon(dev); |
5731 | mutex_unlock(&dev->struct_mutex); | 5710 | mutex_unlock(&dev->struct_mutex); |
5732 | } else if (INTEL_INFO(dev)->gen >= 6) { | 5711 | } else if (INTEL_INFO(dev)->gen >= 6) { |
@@ -6259,11 +6238,22 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
6259 | gen6_check_mch_setup(dev); | 6238 | gen6_check_mch_setup(dev); |
6260 | } | 6239 | } |
6261 | 6240 | ||
6241 | static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) | ||
6242 | { | ||
6243 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | ||
6244 | |||
6245 | /* | ||
6246 | * Disable trickle feed and enable pnd deadline calculation | ||
6247 | */ | ||
6248 | I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); | ||
6249 | I915_WRITE(CBR1_VLV, 0); | ||
6250 | } | ||
6251 | |||
6262 | static void valleyview_init_clock_gating(struct drm_device *dev) | 6252 | static void valleyview_init_clock_gating(struct drm_device *dev) |
6263 | { | 6253 | { |
6264 | struct drm_i915_private *dev_priv = dev->dev_private; | 6254 | struct drm_i915_private *dev_priv = dev->dev_private; |
6265 | 6255 | ||
6266 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | 6256 | vlv_init_display_clock_gating(dev_priv); |
6267 | 6257 | ||
6268 | /* WaDisableEarlyCull:vlv */ | 6258 | /* WaDisableEarlyCull:vlv */ |
6269 | I915_WRITE(_3D_CHICKEN3, | 6259 | I915_WRITE(_3D_CHICKEN3, |
@@ -6311,8 +6301,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) | |||
6311 | I915_WRITE(GEN7_UCGCTL4, | 6301 | I915_WRITE(GEN7_UCGCTL4, |
6312 | I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); | 6302 | I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); |
6313 | 6303 | ||
6314 | I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); | ||
6315 | |||
6316 | /* | 6304 | /* |
6317 | * BSpec says this must be set, even though | 6305 | * BSpec says this must be set, even though |
6318 | * WaDisable4x2SubspanOptimization isn't listed for VLV. | 6306 | * WaDisable4x2SubspanOptimization isn't listed for VLV. |
@@ -6349,9 +6337,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev) | |||
6349 | { | 6337 | { |
6350 | struct drm_i915_private *dev_priv = dev->dev_private; | 6338 | struct drm_i915_private *dev_priv = dev->dev_private; |
6351 | 6339 | ||
6352 | I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); | 6340 | vlv_init_display_clock_gating(dev_priv); |
6353 | |||
6354 | I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); | ||
6355 | 6341 | ||
6356 | /* WaVSRefCountFullforceMissDisable:chv */ | 6342 | /* WaVSRefCountFullforceMissDisable:chv */ |
6357 | /* WaDSRefCountFullforceMissDisable:chv */ | 6343 | /* WaDSRefCountFullforceMissDisable:chv */ |
@@ -6541,7 +6527,7 @@ void intel_init_pm(struct drm_device *dev) | |||
6541 | else if (INTEL_INFO(dev)->gen == 8) | 6527 | else if (INTEL_INFO(dev)->gen == 8) |
6542 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; | 6528 | dev_priv->display.init_clock_gating = broadwell_init_clock_gating; |
6543 | } else if (IS_CHERRYVIEW(dev)) { | 6529 | } else if (IS_CHERRYVIEW(dev)) { |
6544 | dev_priv->display.update_wm = cherryview_update_wm; | 6530 | dev_priv->display.update_wm = valleyview_update_wm; |
6545 | dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; | 6531 | dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; |
6546 | dev_priv->display.init_clock_gating = | 6532 | dev_priv->display.init_clock_gating = |
6547 | cherryview_init_clock_gating; | 6533 | cherryview_init_clock_gating; |
@@ -6709,7 +6695,9 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) | |||
6709 | 6695 | ||
6710 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | 6696 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) |
6711 | { | 6697 | { |
6712 | if (IS_CHERRYVIEW(dev_priv->dev)) | 6698 | if (IS_GEN9(dev_priv->dev)) |
6699 | return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; | ||
6700 | else if (IS_CHERRYVIEW(dev_priv->dev)) | ||
6713 | return chv_gpu_freq(dev_priv, val); | 6701 | return chv_gpu_freq(dev_priv, val); |
6714 | else if (IS_VALLEYVIEW(dev_priv->dev)) | 6702 | else if (IS_VALLEYVIEW(dev_priv->dev)) |
6715 | return byt_gpu_freq(dev_priv, val); | 6703 | return byt_gpu_freq(dev_priv, val); |
@@ -6719,7 +6707,9 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | |||
6719 | 6707 | ||
6720 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) | 6708 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) |
6721 | { | 6709 | { |
6722 | if (IS_CHERRYVIEW(dev_priv->dev)) | 6710 | if (IS_GEN9(dev_priv->dev)) |
6711 | return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; | ||
6712 | else if (IS_CHERRYVIEW(dev_priv->dev)) | ||
6723 | return chv_freq_opcode(dev_priv, val); | 6713 | return chv_freq_opcode(dev_priv, val); |
6724 | else if (IS_VALLEYVIEW(dev_priv->dev)) | 6714 | else if (IS_VALLEYVIEW(dev_priv->dev)) |
6725 | return byt_freq_opcode(dev_priv, val); | 6715 | return byt_freq_opcode(dev_priv, val); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cd79c3843452..441e2502b889 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) | |||
317 | return 0; | 317 | return 0; |
318 | } | 318 | } |
319 | 319 | ||
320 | static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) | ||
321 | { | ||
322 | int ret; | ||
323 | |||
324 | if (!ring->fbc_dirty) | ||
325 | return 0; | ||
326 | |||
327 | ret = intel_ring_begin(ring, 6); | ||
328 | if (ret) | ||
329 | return ret; | ||
330 | /* WaFbcNukeOn3DBlt:ivb/hsw */ | ||
331 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | ||
332 | intel_ring_emit(ring, MSG_FBC_REND_STATE); | ||
333 | intel_ring_emit(ring, value); | ||
334 | intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); | ||
335 | intel_ring_emit(ring, MSG_FBC_REND_STATE); | ||
336 | intel_ring_emit(ring, ring->scratch.gtt_offset + 256); | ||
337 | intel_ring_advance(ring); | ||
338 | |||
339 | ring->fbc_dirty = false; | ||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static int | 320 | static int |
344 | gen7_render_ring_flush(struct intel_engine_cs *ring, | 321 | gen7_render_ring_flush(struct intel_engine_cs *ring, |
345 | u32 invalidate_domains, u32 flush_domains) | 322 | u32 invalidate_domains, u32 flush_domains) |
@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, | |||
398 | intel_ring_emit(ring, 0); | 375 | intel_ring_emit(ring, 0); |
399 | intel_ring_advance(ring); | 376 | intel_ring_advance(ring); |
400 | 377 | ||
401 | if (!invalidate_domains && flush_domains) | ||
402 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); | ||
403 | |||
404 | return 0; | 378 | return 0; |
405 | } | 379 | } |
406 | 380 | ||
@@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, | |||
458 | return ret; | 432 | return ret; |
459 | } | 433 | } |
460 | 434 | ||
461 | ret = gen8_emit_pipe_control(ring, flags, scratch_addr); | 435 | return gen8_emit_pipe_control(ring, flags, scratch_addr); |
462 | if (ret) | ||
463 | return ret; | ||
464 | |||
465 | if (!invalidate_domains && flush_domains) | ||
466 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); | ||
467 | |||
468 | return 0; | ||
469 | } | 436 | } |
470 | 437 | ||
471 | static void ring_write_tail(struct intel_engine_cs *ring, | 438 | static void ring_write_tail(struct intel_engine_cs *ring, |
@@ -2477,7 +2444,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, | |||
2477 | u32 invalidate, u32 flush) | 2444 | u32 invalidate, u32 flush) |
2478 | { | 2445 | { |
2479 | struct drm_device *dev = ring->dev; | 2446 | struct drm_device *dev = ring->dev; |
2480 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2481 | uint32_t cmd; | 2447 | uint32_t cmd; |
2482 | int ret; | 2448 | int ret; |
2483 | 2449 | ||
@@ -2486,7 +2452,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, | |||
2486 | return ret; | 2452 | return ret; |
2487 | 2453 | ||
2488 | cmd = MI_FLUSH_DW; | 2454 | cmd = MI_FLUSH_DW; |
2489 | if (INTEL_INFO(ring->dev)->gen >= 8) | 2455 | if (INTEL_INFO(dev)->gen >= 8) |
2490 | cmd += 1; | 2456 | cmd += 1; |
2491 | 2457 | ||
2492 | /* We always require a command barrier so that subsequent | 2458 | /* We always require a command barrier so that subsequent |
@@ -2506,7 +2472,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, | |||
2506 | cmd |= MI_INVALIDATE_TLB; | 2472 | cmd |= MI_INVALIDATE_TLB; |
2507 | intel_ring_emit(ring, cmd); | 2473 | intel_ring_emit(ring, cmd); |
2508 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); | 2474 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
2509 | if (INTEL_INFO(ring->dev)->gen >= 8) { | 2475 | if (INTEL_INFO(dev)->gen >= 8) { |
2510 | intel_ring_emit(ring, 0); /* upper addr */ | 2476 | intel_ring_emit(ring, 0); /* upper addr */ |
2511 | intel_ring_emit(ring, 0); /* value */ | 2477 | intel_ring_emit(ring, 0); /* value */ |
2512 | } else { | 2478 | } else { |
@@ -2515,13 +2481,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, | |||
2515 | } | 2481 | } |
2516 | intel_ring_advance(ring); | 2482 | intel_ring_advance(ring); |
2517 | 2483 | ||
2518 | if (!invalidate && flush) { | ||
2519 | if (IS_GEN7(dev)) | ||
2520 | return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); | ||
2521 | else if (IS_BROADWELL(dev)) | ||
2522 | dev_priv->fbc.need_sw_cache_clean = true; | ||
2523 | } | ||
2524 | |||
2525 | return 0; | 2484 | return 0; |
2526 | } | 2485 | } |
2527 | 2486 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8f3b49a23ccf..c761fe05ad6f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -267,7 +267,6 @@ struct intel_engine_cs { | |||
267 | */ | 267 | */ |
268 | struct drm_i915_gem_request *outstanding_lazy_request; | 268 | struct drm_i915_gem_request *outstanding_lazy_request; |
269 | bool gpu_caches_dirty; | 269 | bool gpu_caches_dirty; |
270 | bool fbc_dirty; | ||
271 | 270 | ||
272 | wait_queue_head_t irq_queue; | 271 | wait_queue_head_t irq_queue; |
273 | 272 | ||
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6d8e29abbc33..ce00e6994eeb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -194,8 +194,39 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) | |||
194 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); | 194 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); |
195 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | 195 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
196 | 196 | ||
197 | if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) | 197 | if (IS_BROADWELL(dev)) |
198 | gen8_irq_power_well_post_enable(dev_priv); | 198 | gen8_irq_power_well_post_enable(dev_priv, |
199 | 1 << PIPE_C | 1 << PIPE_B); | ||
200 | } | ||
201 | |||
202 | static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, | ||
203 | struct i915_power_well *power_well) | ||
204 | { | ||
205 | struct drm_device *dev = dev_priv->dev; | ||
206 | |||
207 | /* | ||
208 | * After we re-enable the power well, if we touch VGA register 0x3d5 | ||
209 | * we'll get unclaimed register interrupts. This stops after we write | ||
210 | * anything to the VGA MSR register. The vgacon module uses this | ||
211 | * register all the time, so if we unbind our driver and, as a | ||
212 | * consequence, bind vgacon, we'll get stuck in an infinite loop at | ||
213 | * console_unlock(). So make here we touch the VGA MSR register, making | ||
214 | * sure vgacon can keep working normally without triggering interrupts | ||
215 | * and error messages. | ||
216 | */ | ||
217 | if (power_well->data == SKL_DISP_PW_2) { | ||
218 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
219 | outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); | ||
220 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | ||
221 | |||
222 | gen8_irq_power_well_post_enable(dev_priv, | ||
223 | 1 << PIPE_C | 1 << PIPE_B); | ||
224 | } | ||
225 | |||
226 | if (power_well->data == SKL_DISP_PW_1) { | ||
227 | intel_prepare_ddi(dev); | ||
228 | gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); | ||
229 | } | ||
199 | } | 230 | } |
200 | 231 | ||
201 | static void hsw_set_power_well(struct drm_i915_private *dev_priv, | 232 | static void hsw_set_power_well(struct drm_i915_private *dev_priv, |
@@ -293,7 +324,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
293 | { | 324 | { |
294 | uint32_t tmp, fuse_status; | 325 | uint32_t tmp, fuse_status; |
295 | uint32_t req_mask, state_mask; | 326 | uint32_t req_mask, state_mask; |
296 | bool check_fuse_status = false; | 327 | bool is_enabled, enable_requested, check_fuse_status = false; |
297 | 328 | ||
298 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | 329 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
299 | fuse_status = I915_READ(SKL_FUSE_STATUS); | 330 | fuse_status = I915_READ(SKL_FUSE_STATUS); |
@@ -324,15 +355,17 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
324 | } | 355 | } |
325 | 356 | ||
326 | req_mask = SKL_POWER_WELL_REQ(power_well->data); | 357 | req_mask = SKL_POWER_WELL_REQ(power_well->data); |
358 | enable_requested = tmp & req_mask; | ||
327 | state_mask = SKL_POWER_WELL_STATE(power_well->data); | 359 | state_mask = SKL_POWER_WELL_STATE(power_well->data); |
360 | is_enabled = tmp & state_mask; | ||
328 | 361 | ||
329 | if (enable) { | 362 | if (enable) { |
330 | if (!(tmp & req_mask)) { | 363 | if (!enable_requested) { |
331 | I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); | 364 | I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); |
332 | DRM_DEBUG_KMS("Enabling %s\n", power_well->name); | ||
333 | } | 365 | } |
334 | 366 | ||
335 | if (!(tmp & state_mask)) { | 367 | if (!is_enabled) { |
368 | DRM_DEBUG_KMS("Enabling %s\n", power_well->name); | ||
336 | if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & | 369 | if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & |
337 | state_mask), 1)) | 370 | state_mask), 1)) |
338 | DRM_ERROR("%s enable timeout\n", | 371 | DRM_ERROR("%s enable timeout\n", |
@@ -340,7 +373,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
340 | check_fuse_status = true; | 373 | check_fuse_status = true; |
341 | } | 374 | } |
342 | } else { | 375 | } else { |
343 | if (tmp & req_mask) { | 376 | if (enable_requested) { |
344 | I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); | 377 | I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); |
345 | POSTING_READ(HSW_PWR_WELL_DRIVER); | 378 | POSTING_READ(HSW_PWR_WELL_DRIVER); |
346 | DRM_DEBUG_KMS("Disabling %s\n", power_well->name); | 379 | DRM_DEBUG_KMS("Disabling %s\n", power_well->name); |
@@ -358,6 +391,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, | |||
358 | DRM_ERROR("PG2 distributing status timeout\n"); | 391 | DRM_ERROR("PG2 distributing status timeout\n"); |
359 | } | 392 | } |
360 | } | 393 | } |
394 | |||
395 | if (enable && !is_enabled) | ||
396 | skl_power_well_post_enable(dev_priv, power_well); | ||
361 | } | 397 | } |
362 | 398 | ||
363 | static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, | 399 | static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, |
@@ -1420,7 +1456,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) | |||
1420 | } | 1456 | } |
1421 | 1457 | ||
1422 | /** | 1458 | /** |
1423 | * intel_aux_display_runtime_get - grab an auxilliary power domain reference | 1459 | * intel_aux_display_runtime_get - grab an auxiliary power domain reference |
1424 | * @dev_priv: i915 device instance | 1460 | * @dev_priv: i915 device instance |
1425 | * | 1461 | * |
1426 | * This function grabs a power domain reference for the auxiliary power domain | 1462 | * This function grabs a power domain reference for the auxiliary power domain |
@@ -1437,10 +1473,10 @@ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) | |||
1437 | } | 1473 | } |
1438 | 1474 | ||
1439 | /** | 1475 | /** |
1440 | * intel_aux_display_runtime_put - release an auxilliary power domain reference | 1476 | * intel_aux_display_runtime_put - release an auxiliary power domain reference |
1441 | * @dev_priv: i915 device instance | 1477 | * @dev_priv: i915 device instance |
1442 | * | 1478 | * |
1443 | * This function drops the auxilliary power domain reference obtained by | 1479 | * This function drops the auxiliary power domain reference obtained by |
1444 | * intel_aux_display_runtime_get() and might power down the corresponding | 1480 | * intel_aux_display_runtime_get() and might power down the corresponding |
1445 | * hardware block right away if this is the last reference. | 1481 | * hardware block right away if this is the last reference. |
1446 | */ | 1482 | */ |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 64ad2b40179f..9e554c2cfbb4 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1247,7 +1247,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) | |||
1247 | 1247 | ||
1248 | switch (crtc->config->pixel_multiplier) { | 1248 | switch (crtc->config->pixel_multiplier) { |
1249 | default: | 1249 | default: |
1250 | WARN(1, "unknown pixel mutlipler specified\n"); | 1250 | WARN(1, "unknown pixel multiplier specified\n"); |
1251 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; | 1251 | case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; |
1252 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; | 1252 | case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; |
1253 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; | 1253 | case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7051da7015d3..a82873631851 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -1361,10 +1361,10 @@ out_unlock: | |||
1361 | 1361 | ||
1362 | int intel_plane_restore(struct drm_plane *plane) | 1362 | int intel_plane_restore(struct drm_plane *plane) |
1363 | { | 1363 | { |
1364 | if (!plane->crtc || !plane->fb) | 1364 | if (!plane->crtc || !plane->state->fb) |
1365 | return 0; | 1365 | return 0; |
1366 | 1366 | ||
1367 | return plane->funcs->update_plane(plane, plane->crtc, plane->fb, | 1367 | return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb, |
1368 | plane->state->crtc_x, plane->state->crtc_y, | 1368 | plane->state->crtc_x, plane->state->crtc_y, |
1369 | plane->state->crtc_w, plane->state->crtc_h, | 1369 | plane->state->crtc_w, plane->state->crtc_h, |
1370 | plane->state->src_x, plane->state->src_y, | 1370 | plane->state->src_x, plane->state->src_y, |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8879f17770aa..ab5cc94588e1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -557,18 +557,24 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, | |||
557 | WARN(1, "Unclaimed register detected %s %s register 0x%x\n", | 557 | WARN(1, "Unclaimed register detected %s %s register 0x%x\n", |
558 | when, op, reg); | 558 | when, op, reg); |
559 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | 559 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
560 | i915.mmio_debug--; /* Only report the first N failures */ | ||
560 | } | 561 | } |
561 | } | 562 | } |
562 | 563 | ||
563 | static void | 564 | static void |
564 | hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) | 565 | hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) |
565 | { | 566 | { |
566 | if (i915.mmio_debug) | 567 | static bool mmio_debug_once = true; |
568 | |||
569 | if (i915.mmio_debug || !mmio_debug_once) | ||
567 | return; | 570 | return; |
568 | 571 | ||
569 | if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { | 572 | if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { |
570 | DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem."); | 573 | DRM_DEBUG("Unclaimed register detected, " |
574 | "enabling oneshot unclaimed register reporting. " | ||
575 | "Please use i915.mmio_debug=N for more information.\n"); | ||
571 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | 576 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
577 | i915.mmio_debug = mmio_debug_once--; | ||
572 | } | 578 | } |
573 | } | 579 | } |
574 | 580 | ||
@@ -1082,8 +1088,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) | |||
1082 | 1088 | ||
1083 | /* We need to init first for ECOBUS access and then | 1089 | /* We need to init first for ECOBUS access and then |
1084 | * determine later if we want to reinit, in case of MT access is | 1090 | * determine later if we want to reinit, in case of MT access is |
1085 | * not working | 1091 | * not working. In this stage we don't know which flavour this |
1092 | * ivb is, so it is better to reset also the gen6 fw registers | ||
1093 | * before the ecobus check. | ||
1086 | */ | 1094 | */ |
1095 | |||
1096 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
1097 | __raw_posting_read(dev_priv, ECOBUS); | ||
1098 | |||
1087 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | 1099 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1088 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | 1100 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); |
1089 | 1101 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) | |||
1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); | 1030 | return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | struct radeon_wait_cb { | ||
1034 | struct fence_cb base; | ||
1035 | struct task_struct *task; | ||
1036 | }; | ||
1037 | |||
1038 | static void | ||
1039 | radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) | ||
1040 | { | ||
1041 | struct radeon_wait_cb *wait = | ||
1042 | container_of(cb, struct radeon_wait_cb, base); | ||
1043 | |||
1044 | wake_up_process(wait->task); | ||
1045 | } | ||
1046 | |||
1033 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, | 1047 | static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
1034 | signed long t) | 1048 | signed long t) |
1035 | { | 1049 | { |
1036 | struct radeon_fence *fence = to_radeon_fence(f); | 1050 | struct radeon_fence *fence = to_radeon_fence(f); |
1037 | struct radeon_device *rdev = fence->rdev; | 1051 | struct radeon_device *rdev = fence->rdev; |
1038 | bool signaled; | 1052 | struct radeon_wait_cb cb; |
1039 | 1053 | ||
1040 | fence_enable_sw_signaling(&fence->base); | 1054 | cb.task = current; |
1041 | 1055 | ||
1042 | /* | 1056 | if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) |
1043 | * This function has to return -EDEADLK, but cannot hold | 1057 | return t; |
1044 | * exclusive_lock during the wait because some callers | 1058 | |
1045 | * may already hold it. This means checking needs_reset without | 1059 | while (t > 0) { |
1046 | * lock, and not fiddling with any gpu internals. | 1060 | if (intr) |
1047 | * | 1061 | set_current_state(TASK_INTERRUPTIBLE); |
1048 | * The callback installed with fence_enable_sw_signaling will | 1062 | else |
1049 | * run before our wait_event_*timeout call, so we will see | 1063 | set_current_state(TASK_UNINTERRUPTIBLE); |
1050 | * both the signaled fence and the changes to needs_reset. | 1064 | |
1051 | */ | 1065 | /* |
1066 | * radeon_test_signaled must be called after | ||
1067 | * set_current_state to prevent a race with wake_up_process | ||
1068 | */ | ||
1069 | if (radeon_test_signaled(fence)) | ||
1070 | break; | ||
1071 | |||
1072 | if (rdev->needs_reset) { | ||
1073 | t = -EDEADLK; | ||
1074 | break; | ||
1075 | } | ||
1076 | |||
1077 | t = schedule_timeout(t); | ||
1078 | |||
1079 | if (t > 0 && intr && signal_pending(current)) | ||
1080 | t = -ERESTARTSYS; | ||
1081 | } | ||
1082 | |||
1083 | __set_current_state(TASK_RUNNING); | ||
1084 | fence_remove_callback(f, &cb.base); | ||
1052 | 1085 | ||
1053 | if (intr) | ||
1054 | t = wait_event_interruptible_timeout(rdev->fence_queue, | ||
1055 | ((signaled = radeon_test_signaled(fence)) || | ||
1056 | rdev->needs_reset), t); | ||
1057 | else | ||
1058 | t = wait_event_timeout(rdev->fence_queue, | ||
1059 | ((signaled = radeon_test_signaled(fence)) || | ||
1060 | rdev->needs_reset), t); | ||
1061 | |||
1062 | if (t > 0 && !signaled) | ||
1063 | return -EDEADLK; | ||
1064 | return t; | 1086 | return t; |
1065 | } | 1087 | } |
1066 | 1088 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 86e75798320f..b1d74bc375d8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -7236,8 +7236,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7236 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); | 7236 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
7237 | 7237 | ||
7238 | if (!vclk || !dclk) { | 7238 | if (!vclk || !dclk) { |
7239 | /* keep the Bypass mode, put PLL to sleep */ | 7239 | /* keep the Bypass mode */ |
7240 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7241 | return 0; | 7240 | return 0; |
7242 | } | 7241 | } |
7243 | 7242 | ||
@@ -7253,8 +7252,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | |||
7253 | /* set VCO_MODE to 1 */ | 7252 | /* set VCO_MODE to 1 */ |
7254 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); | 7253 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
7255 | 7254 | ||
7256 | /* toggle UPLL_SLEEP to 1 then back to 0 */ | 7255 | /* disable sleep mode */ |
7257 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
7258 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); | 7256 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
7259 | 7257 | ||
7260 | /* deassert UPLL_RESET */ | 7258 | /* deassert UPLL_RESET */ |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
725 | goto out_err1; | 725 | goto out_err1; |
726 | } | 726 | } |
727 | 727 | ||
728 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
729 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
730 | if (unlikely(ret != 0)) { | ||
731 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
732 | goto out_err2; | ||
733 | } | ||
734 | |||
735 | dev_priv->has_gmr = true; | ||
736 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
737 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
738 | VMW_PL_GMR) != 0) { | ||
739 | DRM_INFO("No GMR memory available. " | ||
740 | "Graphics memory resources are very limited.\n"); | ||
741 | dev_priv->has_gmr = false; | ||
742 | } | ||
743 | |||
744 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
745 | dev_priv->has_mob = true; | ||
746 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
747 | VMW_PL_MOB) != 0) { | ||
748 | DRM_INFO("No MOB memory available. " | ||
749 | "3D will be disabled.\n"); | ||
750 | dev_priv->has_mob = false; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, | 728 | dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, |
755 | dev_priv->mmio_size); | 729 | dev_priv->mmio_size); |
756 | 730 | ||
@@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
813 | goto out_no_fman; | 787 | goto out_no_fman; |
814 | } | 788 | } |
815 | 789 | ||
790 | |||
791 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
792 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
793 | if (unlikely(ret != 0)) { | ||
794 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
795 | goto out_no_vram; | ||
796 | } | ||
797 | |||
798 | dev_priv->has_gmr = true; | ||
799 | if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || | ||
800 | refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, | ||
801 | VMW_PL_GMR) != 0) { | ||
802 | DRM_INFO("No GMR memory available. " | ||
803 | "Graphics memory resources are very limited.\n"); | ||
804 | dev_priv->has_gmr = false; | ||
805 | } | ||
806 | |||
807 | if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { | ||
808 | dev_priv->has_mob = true; | ||
809 | if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, | ||
810 | VMW_PL_MOB) != 0) { | ||
811 | DRM_INFO("No MOB memory available. " | ||
812 | "3D will be disabled.\n"); | ||
813 | dev_priv->has_mob = false; | ||
814 | } | ||
815 | } | ||
816 | |||
816 | vmw_kms_save_vga(dev_priv); | 817 | vmw_kms_save_vga(dev_priv); |
817 | 818 | ||
818 | /* Start kms and overlay systems, needs fifo. */ | 819 | /* Start kms and overlay systems, needs fifo. */ |
@@ -838,6 +839,12 @@ out_no_fifo: | |||
838 | vmw_kms_close(dev_priv); | 839 | vmw_kms_close(dev_priv); |
839 | out_no_kms: | 840 | out_no_kms: |
840 | vmw_kms_restore_vga(dev_priv); | 841 | vmw_kms_restore_vga(dev_priv); |
842 | if (dev_priv->has_mob) | ||
843 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
844 | if (dev_priv->has_gmr) | ||
845 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
846 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
847 | out_no_vram: | ||
841 | vmw_fence_manager_takedown(dev_priv->fman); | 848 | vmw_fence_manager_takedown(dev_priv->fman); |
842 | out_no_fman: | 849 | out_no_fman: |
843 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 850 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
@@ -853,12 +860,6 @@ out_err4: | |||
853 | iounmap(dev_priv->mmio_virt); | 860 | iounmap(dev_priv->mmio_virt); |
854 | out_err3: | 861 | out_err3: |
855 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 862 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
856 | if (dev_priv->has_mob) | ||
857 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
858 | if (dev_priv->has_gmr) | ||
859 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
860 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
861 | out_err2: | ||
862 | (void)ttm_bo_device_release(&dev_priv->bdev); | 863 | (void)ttm_bo_device_release(&dev_priv->bdev); |
863 | out_err1: | 864 | out_err1: |
864 | vmw_ttm_global_release(dev_priv); | 865 | vmw_ttm_global_release(dev_priv); |
@@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
887 | } | 888 | } |
888 | vmw_kms_close(dev_priv); | 889 | vmw_kms_close(dev_priv); |
889 | vmw_overlay_close(dev_priv); | 890 | vmw_overlay_close(dev_priv); |
891 | |||
892 | if (dev_priv->has_mob) | ||
893 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
894 | if (dev_priv->has_gmr) | ||
895 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
896 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
897 | |||
890 | vmw_fence_manager_takedown(dev_priv->fman); | 898 | vmw_fence_manager_takedown(dev_priv->fman); |
891 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | 899 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) |
892 | drm_irq_uninstall(dev_priv->dev); | 900 | drm_irq_uninstall(dev_priv->dev); |
@@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
898 | ttm_object_device_release(&dev_priv->tdev); | 906 | ttm_object_device_release(&dev_priv->tdev); |
899 | iounmap(dev_priv->mmio_virt); | 907 | iounmap(dev_priv->mmio_virt); |
900 | arch_phys_wc_del(dev_priv->mmio_mtrr); | 908 | arch_phys_wc_del(dev_priv->mmio_mtrr); |
901 | if (dev_priv->has_mob) | ||
902 | (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); | ||
903 | if (dev_priv->has_gmr) | ||
904 | (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); | ||
905 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
906 | (void)ttm_bo_device_release(&dev_priv->bdev); | 909 | (void)ttm_bo_device_release(&dev_priv->bdev); |
907 | vmw_ttm_global_release(dev_priv); | 910 | vmw_ttm_global_release(dev_priv); |
908 | 911 | ||
@@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) | |||
1235 | { | 1238 | { |
1236 | struct drm_device *dev = pci_get_drvdata(pdev); | 1239 | struct drm_device *dev = pci_get_drvdata(pdev); |
1237 | 1240 | ||
1241 | pci_disable_device(pdev); | ||
1238 | drm_put_dev(dev); | 1242 | drm_put_dev(dev); |
1239 | } | 1243 | } |
1240 | 1244 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 890 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
891 | if (unlikely(ret != 0)) { | 891 | if (unlikely(ret != 0)) { |
892 | DRM_ERROR("Could not find or use MOB buffer.\n"); | 892 | DRM_ERROR("Could not find or use MOB buffer.\n"); |
893 | return -EINVAL; | 893 | ret = -EINVAL; |
894 | goto out_no_reloc; | ||
894 | } | 895 | } |
895 | bo = &vmw_bo->base; | 896 | bo = &vmw_bo->base; |
896 | 897 | ||
@@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, | |||
914 | 915 | ||
915 | out_no_reloc: | 916 | out_no_reloc: |
916 | vmw_dmabuf_unreference(&vmw_bo); | 917 | vmw_dmabuf_unreference(&vmw_bo); |
917 | vmw_bo_p = NULL; | 918 | *vmw_bo_p = NULL; |
918 | return ret; | 919 | return ret; |
919 | } | 920 | } |
920 | 921 | ||
@@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
951 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); | 952 | ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); |
952 | if (unlikely(ret != 0)) { | 953 | if (unlikely(ret != 0)) { |
953 | DRM_ERROR("Could not find or use GMR region.\n"); | 954 | DRM_ERROR("Could not find or use GMR region.\n"); |
954 | return -EINVAL; | 955 | ret = -EINVAL; |
956 | goto out_no_reloc; | ||
955 | } | 957 | } |
956 | bo = &vmw_bo->base; | 958 | bo = &vmw_bo->base; |
957 | 959 | ||
@@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, | |||
974 | 976 | ||
975 | out_no_reloc: | 977 | out_no_reloc: |
976 | vmw_dmabuf_unreference(&vmw_bo); | 978 | vmw_dmabuf_unreference(&vmw_bo); |
977 | vmw_bo_p = NULL; | 979 | *vmw_bo_p = NULL; |
978 | return ret; | 980 | return ret; |
979 | } | 981 | } |
980 | 982 | ||
@@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
2780 | NULL, arg->command_size, arg->throttle_us, | 2782 | NULL, arg->command_size, arg->throttle_us, |
2781 | (void __user *)(unsigned long)arg->fence_rep, | 2783 | (void __user *)(unsigned long)arg->fence_rep, |
2782 | NULL); | 2784 | NULL); |
2783 | 2785 | ttm_read_unlock(&dev_priv->reservation_sem); | |
2784 | if (unlikely(ret != 0)) | 2786 | if (unlikely(ret != 0)) |
2785 | goto out_unlock; | 2787 | return ret; |
2786 | 2788 | ||
2787 | vmw_kms_cursor_post_execbuf(dev_priv); | 2789 | vmw_kms_cursor_post_execbuf(dev_priv); |
2788 | 2790 | ||
2789 | out_unlock: | 2791 | return 0; |
2790 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2791 | return ret; | ||
2792 | } | 2792 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2033 | int i; | 2033 | int i; |
2034 | struct drm_mode_config *mode_config = &dev->mode_config; | 2034 | struct drm_mode_config *mode_config = &dev->mode_config; |
2035 | 2035 | ||
2036 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); | ||
2037 | if (unlikely(ret != 0)) | ||
2038 | return ret; | ||
2039 | |||
2040 | if (!arg->num_outputs) { | 2036 | if (!arg->num_outputs) { |
2041 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | 2037 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; |
2042 | vmw_du_update_layout(dev_priv, 1, &def_rect); | 2038 | vmw_du_update_layout(dev_priv, 1, &def_rect); |
2043 | goto out_unlock; | 2039 | return 0; |
2044 | } | 2040 | } |
2045 | 2041 | ||
2046 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | 2042 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); |
2047 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), | 2043 | rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), |
2048 | GFP_KERNEL); | 2044 | GFP_KERNEL); |
2049 | if (unlikely(!rects)) { | 2045 | if (unlikely(!rects)) |
2050 | ret = -ENOMEM; | 2046 | return -ENOMEM; |
2051 | goto out_unlock; | ||
2052 | } | ||
2053 | 2047 | ||
2054 | user_rects = (void __user *)(unsigned long)arg->rects; | 2048 | user_rects = (void __user *)(unsigned long)arg->rects; |
2055 | ret = copy_from_user(rects, user_rects, rects_size); | 2049 | ret = copy_from_user(rects, user_rects, rects_size); |
@@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2074 | 2068 | ||
2075 | out_free: | 2069 | out_free: |
2076 | kfree(rects); | 2070 | kfree(rects); |
2077 | out_unlock: | ||
2078 | ttm_read_unlock(&dev_priv->reservation_sem); | ||
2079 | return ret; | 2071 | return ret; |
2080 | } | 2072 | } |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) | |||
679 | status = driver->remove(client); | 679 | status = driver->remove(client); |
680 | } | 680 | } |
681 | 681 | ||
682 | if (dev->of_node) | ||
683 | irq_dispose_mapping(client->irq); | ||
684 | |||
685 | dev_pm_domain_detach(&client->dev, true); | 682 | dev_pm_domain_detach(&client->dev, true); |
686 | return status; | 683 | return status; |
687 | } | 684 | } |
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c | |||
@@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) | |||
411 | 411 | ||
412 | input_set_drvdata(input, keypad); | 412 | input_set_drvdata(input, keypad); |
413 | 413 | ||
414 | error = request_threaded_irq(irq, NULL, | 414 | error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, |
415 | tc3589x_keypad_irq, plat->irqtype, | 415 | plat->irqtype | IRQF_ONESHOT, |
416 | "tc3589x-keypad", keypad); | 416 | "tc3589x-keypad", keypad); |
417 | if (error < 0) { | 417 | if (error < 0) { |
418 | dev_err(&pdev->dev, | 418 | dev_err(&pdev->dev, |
419 | "Could not allocate irq %d,error %d\n", | 419 | "Could not allocate irq %d,error %d\n", |
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c | |||
@@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, | |||
187 | idev->private = m; | 187 | idev->private = m; |
188 | idev->input->name = MMA8450_DRV_NAME; | 188 | idev->input->name = MMA8450_DRV_NAME; |
189 | idev->input->id.bustype = BUS_I2C; | 189 | idev->input->id.bustype = BUS_I2C; |
190 | idev->input->dev.parent = &c->dev; | ||
190 | idev->poll = mma8450_poll; | 191 | idev->poll = mma8450_poll; |
191 | idev->poll_interval = POLL_INTERVAL; | 192 | idev->poll_interval = POLL_INTERVAL; |
192 | idev->poll_interval_max = POLL_INTERVAL_MAX; | 193 | idev->poll_interval_max = POLL_INTERVAL_MAX; |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..1bd15ebc01f2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) | |||
2605 | return -ENOMEM; | 2605 | return -ENOMEM; |
2606 | 2606 | ||
2607 | error = alps_identify(psmouse, priv); | 2607 | error = alps_identify(psmouse, priv); |
2608 | if (error) | 2608 | if (error) { |
2609 | kfree(priv); | ||
2609 | return error; | 2610 | return error; |
2611 | } | ||
2610 | 2612 | ||
2611 | if (set_properties) { | 2613 | if (set_properties) { |
2612 | psmouse->vendor = "ALPS"; | 2614 | psmouse->vendor = "ALPS"; |
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/input/mt.h> | 20 | #include <linux/input/mt.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/unaligned/access_ok.h> | 23 | #include <asm/unaligned.h> |
24 | #include "cyapa.h" | 24 | #include "cyapa.h" |
25 | 25 | ||
26 | 26 | ||
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/unaligned/access_ok.h> | 20 | #include <asm/unaligned.h> |
21 | #include <linux/crc-itu-t.h> | 21 | #include <linux/crc-itu-t.h> |
22 | #include "cyapa.h" | 22 | #include "cyapa.h" |
23 | 23 | ||
@@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, | |||
1926 | electrodes_tx = cyapa->electrodes_x; | 1926 | electrodes_tx = cyapa->electrodes_x; |
1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & | 1927 | max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & |
1928 | ~7u) * electrodes_tx; | 1928 | ~7u) * electrodes_tx; |
1929 | } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { | 1929 | } else { |
1930 | offset = 2; | 1930 | offset = 2; |
1931 | max_element_cnt = cyapa->electrodes_x + | 1931 | max_element_cnt = cyapa->electrodes_x + |
1932 | cyapa->electrodes_y; | 1932 | cyapa->electrodes_y; |
diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c | |||
@@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) | |||
67 | 67 | ||
68 | #define FOC_MAX_FINGERS 5 | 68 | #define FOC_MAX_FINGERS 5 |
69 | 69 | ||
70 | #define FOC_MAX_X 2431 | ||
71 | #define FOC_MAX_Y 1663 | ||
72 | |||
73 | /* | 70 | /* |
74 | * Current state of a single finger on the touchpad. | 71 | * Current state of a single finger on the touchpad. |
75 | */ | 72 | */ |
@@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) | |||
129 | input_mt_slot(dev, i); | 126 | input_mt_slot(dev, i); |
130 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); | 127 | input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); |
131 | if (active) { | 128 | if (active) { |
132 | input_report_abs(dev, ABS_MT_POSITION_X, finger->x); | 129 | unsigned int clamped_x, clamped_y; |
130 | /* | ||
131 | * The touchpad might report invalid data, so we clamp | ||
132 | * the resulting values so that we do not confuse | ||
133 | * userspace. | ||
134 | */ | ||
135 | clamped_x = clamp(finger->x, 0U, priv->x_max); | ||
136 | clamped_y = clamp(finger->y, 0U, priv->y_max); | ||
137 | input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); | ||
133 | input_report_abs(dev, ABS_MT_POSITION_Y, | 138 | input_report_abs(dev, ABS_MT_POSITION_Y, |
134 | FOC_MAX_Y - finger->y); | 139 | priv->y_max - clamped_y); |
135 | } | 140 | } |
136 | } | 141 | } |
137 | input_mt_report_pointer_emulation(dev, true); | 142 | input_mt_report_pointer_emulation(dev, true); |
@@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, | |||
180 | 185 | ||
181 | state->pressed = (packet[0] >> 4) & 1; | 186 | state->pressed = (packet[0] >> 4) & 1; |
182 | 187 | ||
183 | /* | ||
184 | * packet[5] contains some kind of tool size in the most | ||
185 | * significant nibble. 0xff is a special value (latching) that | ||
186 | * signals a large contact area. | ||
187 | */ | ||
188 | if (packet[5] == 0xff) { | ||
189 | state->fingers[finger].valid = false; | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; | 188 | state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; |
194 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; | 189 | state->fingers[finger].y = (packet[3] << 8) | packet[4]; |
195 | state->fingers[finger].valid = true; | 190 | state->fingers[finger].valid = true; |
@@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) | |||
381 | 376 | ||
382 | return 0; | 377 | return 0; |
383 | } | 378 | } |
379 | |||
380 | void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) | ||
381 | { | ||
382 | /* not supported yet */ | ||
383 | } | ||
384 | |||
385 | static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) | ||
386 | { | ||
387 | /* not supported yet */ | ||
388 | } | ||
389 | |||
390 | static void focaltech_set_scale(struct psmouse *psmouse, | ||
391 | enum psmouse_scale scale) | ||
392 | { | ||
393 | /* not supported yet */ | ||
394 | } | ||
395 | |||
384 | int focaltech_init(struct psmouse *psmouse) | 396 | int focaltech_init(struct psmouse *psmouse) |
385 | { | 397 | { |
386 | struct focaltech_data *priv; | 398 | struct focaltech_data *priv; |
@@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) | |||
415 | psmouse->cleanup = focaltech_reset; | 427 | psmouse->cleanup = focaltech_reset; |
416 | /* resync is not supported yet */ | 428 | /* resync is not supported yet */ |
417 | psmouse->resync_time = 0; | 429 | psmouse->resync_time = 0; |
430 | /* | ||
431 | * rate/resolution/scale changes are not supported yet, and | ||
432 | * the generic implementations of these functions seem to | ||
433 | * confuse some touchpads | ||
434 | */ | ||
435 | psmouse->set_resolution = focaltech_set_resolution; | ||
436 | psmouse->set_rate = focaltech_set_rate; | ||
437 | psmouse->set_scale = focaltech_set_scale; | ||
418 | 438 | ||
419 | return 0; | 439 | return 0; |
420 | 440 | ||
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) | |||
454 | } | 454 | } |
455 | 455 | ||
456 | /* | 456 | /* |
457 | * Here we set the mouse scaling. | ||
458 | */ | ||
459 | |||
460 | static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) | ||
461 | { | ||
462 | ps2_command(&psmouse->ps2dev, NULL, | ||
463 | scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : | ||
464 | PSMOUSE_CMD_SETSCALE11); | ||
465 | } | ||
466 | |||
467 | /* | ||
457 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. | 468 | * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. |
458 | */ | 469 | */ |
459 | 470 | ||
@@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) | |||
689 | 700 | ||
690 | psmouse->set_rate = psmouse_set_rate; | 701 | psmouse->set_rate = psmouse_set_rate; |
691 | psmouse->set_resolution = psmouse_set_resolution; | 702 | psmouse->set_resolution = psmouse_set_resolution; |
703 | psmouse->set_scale = psmouse_set_scale; | ||
692 | psmouse->poll = psmouse_poll; | 704 | psmouse->poll = psmouse_poll; |
693 | psmouse->protocol_handler = psmouse_process_byte; | 705 | psmouse->protocol_handler = psmouse_process_byte; |
694 | psmouse->pktsize = 3; | 706 | psmouse->pktsize = 3; |
@@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) | |||
1160 | if (psmouse_max_proto != PSMOUSE_PS2) { | 1172 | if (psmouse_max_proto != PSMOUSE_PS2) { |
1161 | psmouse->set_rate(psmouse, psmouse->rate); | 1173 | psmouse->set_rate(psmouse, psmouse->rate); |
1162 | psmouse->set_resolution(psmouse, psmouse->resolution); | 1174 | psmouse->set_resolution(psmouse, psmouse->resolution); |
1163 | ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); | 1175 | psmouse->set_scale(psmouse, PSMOUSE_SCALE11); |
1164 | } | 1176 | } |
1165 | } | 1177 | } |
1166 | 1178 | ||
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h | |||
@@ -36,6 +36,11 @@ typedef enum { | |||
36 | PSMOUSE_FULL_PACKET | 36 | PSMOUSE_FULL_PACKET |
37 | } psmouse_ret_t; | 37 | } psmouse_ret_t; |
38 | 38 | ||
39 | enum psmouse_scale { | ||
40 | PSMOUSE_SCALE11, | ||
41 | PSMOUSE_SCALE21 | ||
42 | }; | ||
43 | |||
39 | struct psmouse { | 44 | struct psmouse { |
40 | void *private; | 45 | void *private; |
41 | struct input_dev *dev; | 46 | struct input_dev *dev; |
@@ -67,6 +72,7 @@ struct psmouse { | |||
67 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); | 72 | psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); |
68 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); | 73 | void (*set_rate)(struct psmouse *psmouse, unsigned int rate); |
69 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); | 74 | void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); |
75 | void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); | ||
70 | 76 | ||
71 | int (*reconnect)(struct psmouse *psmouse); | 77 | int (*reconnect)(struct psmouse *psmouse); |
72 | void (*disconnect)(struct psmouse *psmouse); | 78 | void (*disconnect)(struct psmouse *psmouse); |
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig | |||
@@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I | |||
943 | tristate "Allwinner sun4i resistive touchscreen controller support" | 943 | tristate "Allwinner sun4i resistive touchscreen controller support" |
944 | depends on ARCH_SUNXI || COMPILE_TEST | 944 | depends on ARCH_SUNXI || COMPILE_TEST |
945 | depends on HWMON | 945 | depends on HWMON |
946 | depends on THERMAL || !THERMAL_OF | ||
946 | help | 947 | help |
947 | This selects support for the resistive touchscreen controller | 948 | This selects support for the resistive touchscreen controller |
948 | found on Allwinner sunxi SoCs. | 949 | found on Allwinner sunxi SoCs. |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE | |||
23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
26 | depends on ARM || ARM64 || COMPILE_TEST | ||
26 | help | 27 | help |
27 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
28 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
@@ -63,6 +64,7 @@ config MSM_IOMMU | |||
63 | bool "MSM IOMMU Support" | 64 | bool "MSM IOMMU Support" |
64 | depends on ARM | 65 | depends on ARM |
65 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST | 66 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST |
67 | depends on BROKEN | ||
66 | select IOMMU_API | 68 | select IOMMU_API |
67 | help | 69 | help |
68 | Support for the IOMMUs found on certain Qualcomm SOCs. | 70 | Support for the IOMMUs found on certain Qualcomm SOCs. |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { | |||
1186 | 1186 | ||
1187 | static int __init exynos_iommu_init(void) | 1187 | static int __init exynos_iommu_init(void) |
1188 | { | 1188 | { |
1189 | struct device_node *np; | ||
1189 | int ret; | 1190 | int ret; |
1190 | 1191 | ||
1192 | np = of_find_matching_node(NULL, sysmmu_of_match); | ||
1193 | if (!np) | ||
1194 | return 0; | ||
1195 | |||
1196 | of_node_put(np); | ||
1197 | |||
1191 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", | 1198 | lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", |
1192 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); | 1199 | LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); |
1193 | if (!lv2table_kmem_cache) { | 1200 | if (!lv2table_kmem_cache) { |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -56,7 +56,8 @@ | |||
56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ | 56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ |
57 | * (d)->bits_per_level) + (d)->pg_shift) | 57 | * (d)->bits_per_level) + (d)->pg_shift) |
58 | 58 | ||
59 | #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) | 59 | #define ARM_LPAE_PAGES_PER_PGD(d) \ |
60 | DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) | ||
60 | 61 | ||
61 | /* | 62 | /* |
62 | * Calculate the index at level l used to map virtual address a using the | 63 | * Calculate the index at level l used to map virtual address a using the |
@@ -66,7 +67,7 @@ | |||
66 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) | 67 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) |
67 | 68 | ||
68 | #define ARM_LPAE_LVL_IDX(a,l,d) \ | 69 | #define ARM_LPAE_LVL_IDX(a,l,d) \ |
69 | (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ | 70 | (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ |
70 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) | 71 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) |
71 | 72 | ||
72 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | 73 | /* Calculate the block/page mapping size at level l for pagetable in d. */ |
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c | |||
@@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) | |||
1376 | struct kmem_cache *p; | 1376 | struct kmem_cache *p; |
1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | 1377 | const unsigned long flags = SLAB_HWCACHE_ALIGN; |
1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ | 1378 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
1379 | struct device_node *np; | ||
1380 | |||
1381 | np = of_find_matching_node(NULL, omap_iommu_of_match); | ||
1382 | if (!np) | ||
1383 | return 0; | ||
1384 | |||
1385 | of_node_put(np); | ||
1379 | 1386 | ||
1380 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | 1387 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
1381 | iopte_cachep_ctor); | 1388 | iopte_cachep_ctor); |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { | |||
1015 | 1015 | ||
1016 | static int __init rk_iommu_init(void) | 1016 | static int __init rk_iommu_init(void) |
1017 | { | 1017 | { |
1018 | struct device_node *np; | ||
1018 | int ret; | 1019 | int ret; |
1019 | 1020 | ||
1021 | np = of_find_matching_node(NULL, rk_iommu_dt_ids); | ||
1022 | if (!np) | ||
1023 | return 0; | ||
1024 | |||
1025 | of_node_put(np); | ||
1026 | |||
1020 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); | 1027 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); |
1021 | if (ret) | 1028 | if (ret) |
1022 | return ret; | 1029 | return ret; |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; | |||
69 | static void __iomem *main_int_base; | 69 | static void __iomem *main_int_base; |
70 | static struct irq_domain *armada_370_xp_mpic_domain; | 70 | static struct irq_domain *armada_370_xp_mpic_domain; |
71 | static u32 doorbell_mask_reg; | 71 | static u32 doorbell_mask_reg; |
72 | static int parent_irq; | ||
72 | #ifdef CONFIG_PCI_MSI | 73 | #ifdef CONFIG_PCI_MSI |
73 | static struct irq_domain *armada_370_xp_msi_domain; | 74 | static struct irq_domain *armada_370_xp_msi_domain; |
74 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); | 75 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); |
@@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, | |||
356 | { | 357 | { |
357 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 358 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
358 | armada_xp_mpic_smp_cpu_init(); | 359 | armada_xp_mpic_smp_cpu_init(); |
360 | |||
359 | return NOTIFY_OK; | 361 | return NOTIFY_OK; |
360 | } | 362 | } |
361 | 363 | ||
@@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { | |||
364 | .priority = 100, | 366 | .priority = 100, |
365 | }; | 367 | }; |
366 | 368 | ||
369 | static int mpic_cascaded_secondary_init(struct notifier_block *nfb, | ||
370 | unsigned long action, void *hcpu) | ||
371 | { | ||
372 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | ||
373 | enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); | ||
374 | |||
375 | return NOTIFY_OK; | ||
376 | } | ||
377 | |||
378 | static struct notifier_block mpic_cascaded_cpu_notifier = { | ||
379 | .notifier_call = mpic_cascaded_secondary_init, | ||
380 | .priority = 100, | ||
381 | }; | ||
382 | |||
367 | #endif /* CONFIG_SMP */ | 383 | #endif /* CONFIG_SMP */ |
368 | 384 | ||
369 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { | 385 | static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { |
@@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
539 | struct device_node *parent) | 555 | struct device_node *parent) |
540 | { | 556 | { |
541 | struct resource main_int_res, per_cpu_int_res; | 557 | struct resource main_int_res, per_cpu_int_res; |
542 | int parent_irq, nr_irqs, i; | 558 | int nr_irqs, i; |
543 | u32 control; | 559 | u32 control; |
544 | 560 | ||
545 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); | 561 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); |
@@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
587 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); | 603 | register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); |
588 | #endif | 604 | #endif |
589 | } else { | 605 | } else { |
606 | #ifdef CONFIG_SMP | ||
607 | register_cpu_notifier(&mpic_cascaded_cpu_notifier); | ||
608 | #endif | ||
590 | irq_set_chained_handler(parent_irq, | 609 | irq_set_chained_handler(parent_irq, |
591 | armada_370_xp_mpic_handle_cascade_irq); | 610 | armada_370_xp_mpic_handle_cascade_irq); |
592 | } | 611 | } |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..596b0a9eee99 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, | |||
416 | { | 416 | { |
417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; | 417 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; |
418 | struct its_collection *sync_col; | 418 | struct its_collection *sync_col; |
419 | unsigned long flags; | ||
419 | 420 | ||
420 | raw_spin_lock(&its->lock); | 421 | raw_spin_lock_irqsave(&its->lock, flags); |
421 | 422 | ||
422 | cmd = its_allocate_entry(its); | 423 | cmd = its_allocate_entry(its); |
423 | if (!cmd) { /* We're soooooo screewed... */ | 424 | if (!cmd) { /* We're soooooo screewed... */ |
424 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); | 425 | pr_err_ratelimited("ITS can't allocate, dropping command\n"); |
425 | raw_spin_unlock(&its->lock); | 426 | raw_spin_unlock_irqrestore(&its->lock, flags); |
426 | return; | 427 | return; |
427 | } | 428 | } |
428 | sync_col = builder(cmd, desc); | 429 | sync_col = builder(cmd, desc); |
@@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, | |||
442 | 443 | ||
443 | post: | 444 | post: |
444 | next_cmd = its_post_commands(its); | 445 | next_cmd = its_post_commands(its); |
445 | raw_spin_unlock(&its->lock); | 446 | raw_spin_unlock_irqrestore(&its->lock, flags); |
446 | 447 | ||
447 | its_wait_for_range_completion(its, cmd, next_cmd); | 448 | its_wait_for_range_completion(its, cmd, next_cmd); |
448 | } | 449 | } |
@@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its) | |||
799 | { | 800 | { |
800 | int err; | 801 | int err; |
801 | int i; | 802 | int i; |
802 | int psz = PAGE_SIZE; | 803 | int psz = SZ_64K; |
803 | u64 shr = GITS_BASER_InnerShareable; | 804 | u64 shr = GITS_BASER_InnerShareable; |
804 | 805 | ||
805 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 806 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
806 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); | 807 | u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); |
807 | u64 type = GITS_BASER_TYPE(val); | 808 | u64 type = GITS_BASER_TYPE(val); |
808 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); | 809 | u64 entry_size = GITS_BASER_ENTRY_SIZE(val); |
810 | int order = get_order(psz); | ||
811 | int alloc_size; | ||
809 | u64 tmp; | 812 | u64 tmp; |
810 | void *base; | 813 | void *base; |
811 | 814 | ||
812 | if (type == GITS_BASER_TYPE_NONE) | 815 | if (type == GITS_BASER_TYPE_NONE) |
813 | continue; | 816 | continue; |
814 | 817 | ||
815 | /* We're lazy and only allocate a single page for now */ | 818 | /* |
816 | base = (void *)get_zeroed_page(GFP_KERNEL); | 819 | * Allocate as many entries as required to fit the |
820 | * range of device IDs that the ITS can grok... The ID | ||
821 | * space being incredibly sparse, this results in a | ||
822 | * massive waste of memory. | ||
823 | * | ||
824 | * For other tables, only allocate a single page. | ||
825 | */ | ||
826 | if (type == GITS_BASER_TYPE_DEVICE) { | ||
827 | u64 typer = readq_relaxed(its->base + GITS_TYPER); | ||
828 | u32 ids = GITS_TYPER_DEVBITS(typer); | ||
829 | |||
830 | order = get_order((1UL << ids) * entry_size); | ||
831 | if (order >= MAX_ORDER) { | ||
832 | order = MAX_ORDER - 1; | ||
833 | pr_warn("%s: Device Table too large, reduce its page order to %u\n", | ||
834 | its->msi_chip.of_node->full_name, order); | ||
835 | } | ||
836 | } | ||
837 | |||
838 | alloc_size = (1 << order) * PAGE_SIZE; | ||
839 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
817 | if (!base) { | 840 | if (!base) { |
818 | err = -ENOMEM; | 841 | err = -ENOMEM; |
819 | goto out_free; | 842 | goto out_free; |
@@ -841,7 +864,7 @@ retry_baser: | |||
841 | break; | 864 | break; |
842 | } | 865 | } |
843 | 866 | ||
844 | val |= (PAGE_SIZE / psz) - 1; | 867 | val |= (alloc_size / psz) - 1; |
845 | 868 | ||
846 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); | 869 | writeq_relaxed(val, its->base + GITS_BASER + i * 8); |
847 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); | 870 | tmp = readq_relaxed(its->base + GITS_BASER + i * 8); |
@@ -882,7 +905,7 @@ retry_baser: | |||
882 | } | 905 | } |
883 | 906 | ||
884 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", | 907 | pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", |
885 | (int)(PAGE_SIZE / entry_size), | 908 | (int)(alloc_size / entry_size), |
886 | its_base_type_string[type], | 909 | its_base_type_string[type], |
887 | (unsigned long)virt_to_phys(base), | 910 | (unsigned long)virt_to_phys(base), |
888 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); | 911 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
@@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void) | |||
1020 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | 1043 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) |
1021 | { | 1044 | { |
1022 | struct its_device *its_dev = NULL, *tmp; | 1045 | struct its_device *its_dev = NULL, *tmp; |
1046 | unsigned long flags; | ||
1023 | 1047 | ||
1024 | raw_spin_lock(&its->lock); | 1048 | raw_spin_lock_irqsave(&its->lock, flags); |
1025 | 1049 | ||
1026 | list_for_each_entry(tmp, &its->its_device_list, entry) { | 1050 | list_for_each_entry(tmp, &its->its_device_list, entry) { |
1027 | if (tmp->device_id == dev_id) { | 1051 | if (tmp->device_id == dev_id) { |
@@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) | |||
1030 | } | 1054 | } |
1031 | } | 1055 | } |
1032 | 1056 | ||
1033 | raw_spin_unlock(&its->lock); | 1057 | raw_spin_unlock_irqrestore(&its->lock, flags); |
1034 | 1058 | ||
1035 | return its_dev; | 1059 | return its_dev; |
1036 | } | 1060 | } |
@@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1040 | { | 1064 | { |
1041 | struct its_device *dev; | 1065 | struct its_device *dev; |
1042 | unsigned long *lpi_map; | 1066 | unsigned long *lpi_map; |
1067 | unsigned long flags; | ||
1043 | void *itt; | 1068 | void *itt; |
1044 | int lpi_base; | 1069 | int lpi_base; |
1045 | int nr_lpis; | 1070 | int nr_lpis; |
@@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1056 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | 1081 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); |
1057 | sz = nr_ites * its->ite_size; | 1082 | sz = nr_ites * its->ite_size; |
1058 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 1083 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
1059 | itt = kmalloc(sz, GFP_KERNEL); | 1084 | itt = kzalloc(sz, GFP_KERNEL); |
1060 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); | 1085 | lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); |
1061 | 1086 | ||
1062 | if (!dev || !itt || !lpi_map) { | 1087 | if (!dev || !itt || !lpi_map) { |
@@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1075 | dev->device_id = dev_id; | 1100 | dev->device_id = dev_id; |
1076 | INIT_LIST_HEAD(&dev->entry); | 1101 | INIT_LIST_HEAD(&dev->entry); |
1077 | 1102 | ||
1078 | raw_spin_lock(&its->lock); | 1103 | raw_spin_lock_irqsave(&its->lock, flags); |
1079 | list_add(&dev->entry, &its->its_device_list); | 1104 | list_add(&dev->entry, &its->its_device_list); |
1080 | raw_spin_unlock(&its->lock); | 1105 | raw_spin_unlock_irqrestore(&its->lock, flags); |
1081 | 1106 | ||
1082 | /* Bind the device to the first possible CPU */ | 1107 | /* Bind the device to the first possible CPU */ |
1083 | cpu = cpumask_first(cpu_online_mask); | 1108 | cpu = cpumask_first(cpu_online_mask); |
@@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
1091 | 1116 | ||
1092 | static void its_free_device(struct its_device *its_dev) | 1117 | static void its_free_device(struct its_device *its_dev) |
1093 | { | 1118 | { |
1094 | raw_spin_lock(&its_dev->its->lock); | 1119 | unsigned long flags; |
1120 | |||
1121 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); | ||
1095 | list_del(&its_dev->entry); | 1122 | list_del(&its_dev->entry); |
1096 | raw_spin_unlock(&its_dev->its->lock); | 1123 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
1097 | kfree(its_dev->itt); | 1124 | kfree(its_dev->itt); |
1098 | kfree(its_dev); | 1125 | kfree(its_dev); |
1099 | } | 1126 | } |
@@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | |||
1112 | return 0; | 1139 | return 0; |
1113 | } | 1140 | } |
1114 | 1141 | ||
1142 | struct its_pci_alias { | ||
1143 | struct pci_dev *pdev; | ||
1144 | u32 dev_id; | ||
1145 | u32 count; | ||
1146 | }; | ||
1147 | |||
1148 | static int its_pci_msi_vec_count(struct pci_dev *pdev) | ||
1149 | { | ||
1150 | int msi, msix; | ||
1151 | |||
1152 | msi = max(pci_msi_vec_count(pdev), 0); | ||
1153 | msix = max(pci_msix_vec_count(pdev), 0); | ||
1154 | |||
1155 | return max(msi, msix); | ||
1156 | } | ||
1157 | |||
1158 | static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) | ||
1159 | { | ||
1160 | struct its_pci_alias *dev_alias = data; | ||
1161 | |||
1162 | dev_alias->dev_id = alias; | ||
1163 | if (pdev != dev_alias->pdev) | ||
1164 | dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); | ||
1165 | |||
1166 | return 0; | ||
1167 | } | ||
1168 | |||
1115 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | 1169 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
1116 | int nvec, msi_alloc_info_t *info) | 1170 | int nvec, msi_alloc_info_t *info) |
1117 | { | 1171 | { |
1118 | struct pci_dev *pdev; | 1172 | struct pci_dev *pdev; |
1119 | struct its_node *its; | 1173 | struct its_node *its; |
1120 | u32 dev_id; | ||
1121 | struct its_device *its_dev; | 1174 | struct its_device *its_dev; |
1175 | struct its_pci_alias dev_alias; | ||
1122 | 1176 | ||
1123 | if (!dev_is_pci(dev)) | 1177 | if (!dev_is_pci(dev)) |
1124 | return -EINVAL; | 1178 | return -EINVAL; |
1125 | 1179 | ||
1126 | pdev = to_pci_dev(dev); | 1180 | pdev = to_pci_dev(dev); |
1127 | dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); | 1181 | dev_alias.pdev = pdev; |
1182 | dev_alias.count = nvec; | ||
1183 | |||
1184 | pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); | ||
1128 | its = domain->parent->host_data; | 1185 | its = domain->parent->host_data; |
1129 | 1186 | ||
1130 | its_dev = its_find_device(its, dev_id); | 1187 | its_dev = its_find_device(its, dev_alias.dev_id); |
1131 | if (WARN_ON(its_dev)) | 1188 | if (its_dev) { |
1132 | return -EINVAL; | 1189 | /* |
1190 | * We already have seen this ID, probably through | ||
1191 | * another alias (PCI bridge of some sort). No need to | ||
1192 | * create the device. | ||
1193 | */ | ||
1194 | dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); | ||
1195 | goto out; | ||
1196 | } | ||
1133 | 1197 | ||
1134 | its_dev = its_create_device(its, dev_id, nvec); | 1198 | its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); |
1135 | if (!its_dev) | 1199 | if (!its_dev) |
1136 | return -ENOMEM; | 1200 | return -ENOMEM; |
1137 | 1201 | ||
1138 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); | 1202 | dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", |
1139 | 1203 | dev_alias.count, ilog2(dev_alias.count)); | |
1204 | out: | ||
1140 | info->scratchpad[0].ptr = its_dev; | 1205 | info->scratchpad[0].ptr = its_dev; |
1141 | info->scratchpad[1].ptr = dev; | 1206 | info->scratchpad[1].ptr = dev; |
1142 | return 0; | 1207 | return 0; |
@@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = { | |||
1255 | .deactivate = its_irq_domain_deactivate, | 1320 | .deactivate = its_irq_domain_deactivate, |
1256 | }; | 1321 | }; |
1257 | 1322 | ||
1323 | static int its_force_quiescent(void __iomem *base) | ||
1324 | { | ||
1325 | u32 count = 1000000; /* 1s */ | ||
1326 | u32 val; | ||
1327 | |||
1328 | val = readl_relaxed(base + GITS_CTLR); | ||
1329 | if (val & GITS_CTLR_QUIESCENT) | ||
1330 | return 0; | ||
1331 | |||
1332 | /* Disable the generation of all interrupts to this ITS */ | ||
1333 | val &= ~GITS_CTLR_ENABLE; | ||
1334 | writel_relaxed(val, base + GITS_CTLR); | ||
1335 | |||
1336 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ | ||
1337 | while (1) { | ||
1338 | val = readl_relaxed(base + GITS_CTLR); | ||
1339 | if (val & GITS_CTLR_QUIESCENT) | ||
1340 | return 0; | ||
1341 | |||
1342 | count--; | ||
1343 | if (!count) | ||
1344 | return -EBUSY; | ||
1345 | |||
1346 | cpu_relax(); | ||
1347 | udelay(1); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1258 | static int its_probe(struct device_node *node, struct irq_domain *parent) | 1351 | static int its_probe(struct device_node *node, struct irq_domain *parent) |
1259 | { | 1352 | { |
1260 | struct resource res; | 1353 | struct resource res; |
@@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1283 | goto out_unmap; | 1376 | goto out_unmap; |
1284 | } | 1377 | } |
1285 | 1378 | ||
1379 | err = its_force_quiescent(its_base); | ||
1380 | if (err) { | ||
1381 | pr_warn("%s: failed to quiesce, giving up\n", | ||
1382 | node->full_name); | ||
1383 | goto out_unmap; | ||
1384 | } | ||
1385 | |||
1286 | pr_info("ITS: %s\n", node->full_name); | 1386 | pr_info("ITS: %s\n", node->full_name); |
1287 | 1387 | ||
1288 | its = kzalloc(sizeof(*its), GFP_KERNEL); | 1388 | its = kzalloc(sizeof(*its), GFP_KERNEL); |
@@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) | |||
1323 | writeq_relaxed(baser, its->base + GITS_CBASER); | 1423 | writeq_relaxed(baser, its->base + GITS_CBASER); |
1324 | tmp = readq_relaxed(its->base + GITS_CBASER); | 1424 | tmp = readq_relaxed(its->base + GITS_CBASER); |
1325 | writeq_relaxed(0, its->base + GITS_CWRITER); | 1425 | writeq_relaxed(0, its->base + GITS_CWRITER); |
1326 | writel_relaxed(1, its->base + GITS_CTLR); | 1426 | writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); |
1327 | 1427 | ||
1328 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { | 1428 | if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { |
1329 | pr_info("ITS: using cache flushing for cmd queue\n"); | 1429 | pr_info("ITS: using cache flushing for cmd queue\n"); |
@@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void) | |||
1382 | 1482 | ||
1383 | int its_cpu_init(void) | 1483 | int its_cpu_init(void) |
1384 | { | 1484 | { |
1385 | if (!gic_rdists_supports_plpis()) { | ||
1386 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
1387 | return -ENXIO; | ||
1388 | } | ||
1389 | |||
1390 | if (!list_empty(&its_nodes)) { | 1485 | if (!list_empty(&its_nodes)) { |
1486 | if (!gic_rdists_supports_plpis()) { | ||
1487 | pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); | ||
1488 | return -ENXIO; | ||
1489 | } | ||
1391 | its_cpu_init_lpis(); | 1490 | its_cpu_init_lpis(); |
1392 | its_cpu_init_collection(); | 1491 | its_cpu_init_collection(); |
1393 | } | 1492 | } |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, | |||
466 | tlist |= 1 << (mpidr & 0xf); | 466 | tlist |= 1 << (mpidr & 0xf); |
467 | 467 | ||
468 | cpu = cpumask_next(cpu, mask); | 468 | cpu = cpumask_next(cpu, mask); |
469 | if (cpu == nr_cpu_ids) | 469 | if (cpu >= nr_cpu_ids) |
470 | goto out; | 470 | goto out; |
471 | 471 | ||
472 | mpidr = cpu_logical_map(cpu); | 472 | mpidr = cpu_logical_map(cpu); |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) | |||
154 | static void gic_mask_irq(struct irq_data *d) | 154 | static void gic_mask_irq(struct irq_data *d) |
155 | { | 155 | { |
156 | u32 mask = 1 << (gic_irq(d) % 32); | 156 | u32 mask = 1 << (gic_irq(d) % 32); |
157 | unsigned long flags; | ||
157 | 158 | ||
158 | raw_spin_lock(&irq_controller_lock); | 159 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
159 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); | 160 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
160 | if (gic_arch_extn.irq_mask) | 161 | if (gic_arch_extn.irq_mask) |
161 | gic_arch_extn.irq_mask(d); | 162 | gic_arch_extn.irq_mask(d); |
162 | raw_spin_unlock(&irq_controller_lock); | 163 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
163 | } | 164 | } |
164 | 165 | ||
165 | static void gic_unmask_irq(struct irq_data *d) | 166 | static void gic_unmask_irq(struct irq_data *d) |
166 | { | 167 | { |
167 | u32 mask = 1 << (gic_irq(d) % 32); | 168 | u32 mask = 1 << (gic_irq(d) % 32); |
169 | unsigned long flags; | ||
168 | 170 | ||
169 | raw_spin_lock(&irq_controller_lock); | 171 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
170 | if (gic_arch_extn.irq_unmask) | 172 | if (gic_arch_extn.irq_unmask) |
171 | gic_arch_extn.irq_unmask(d); | 173 | gic_arch_extn.irq_unmask(d); |
172 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); | 174 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
173 | raw_spin_unlock(&irq_controller_lock); | 175 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
174 | } | 176 | } |
175 | 177 | ||
176 | static void gic_eoi_irq(struct irq_data *d) | 178 | static void gic_eoi_irq(struct irq_data *d) |
@@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
188 | { | 190 | { |
189 | void __iomem *base = gic_dist_base(d); | 191 | void __iomem *base = gic_dist_base(d); |
190 | unsigned int gicirq = gic_irq(d); | 192 | unsigned int gicirq = gic_irq(d); |
193 | unsigned long flags; | ||
191 | int ret; | 194 | int ret; |
192 | 195 | ||
193 | /* Interrupt configuration for SGIs can't be changed */ | 196 | /* Interrupt configuration for SGIs can't be changed */ |
@@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
199 | type != IRQ_TYPE_EDGE_RISING) | 202 | type != IRQ_TYPE_EDGE_RISING) |
200 | return -EINVAL; | 203 | return -EINVAL; |
201 | 204 | ||
202 | raw_spin_lock(&irq_controller_lock); | 205 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
203 | 206 | ||
204 | if (gic_arch_extn.irq_set_type) | 207 | if (gic_arch_extn.irq_set_type) |
205 | gic_arch_extn.irq_set_type(d, type); | 208 | gic_arch_extn.irq_set_type(d, type); |
206 | 209 | ||
207 | ret = gic_configure_irq(gicirq, type, base, NULL); | 210 | ret = gic_configure_irq(gicirq, type, base, NULL); |
208 | 211 | ||
209 | raw_spin_unlock(&irq_controller_lock); | 212 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
210 | 213 | ||
211 | return ret; | 214 | return ret; |
212 | } | 215 | } |
@@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
227 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 230 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
228 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; | 231 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
229 | u32 val, mask, bit; | 232 | u32 val, mask, bit; |
233 | unsigned long flags; | ||
230 | 234 | ||
231 | if (!force) | 235 | if (!force) |
232 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | 236 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
@@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
236 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) | 240 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
237 | return -EINVAL; | 241 | return -EINVAL; |
238 | 242 | ||
239 | raw_spin_lock(&irq_controller_lock); | 243 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
240 | mask = 0xff << shift; | 244 | mask = 0xff << shift; |
241 | bit = gic_cpu_map[cpu] << shift; | 245 | bit = gic_cpu_map[cpu] << shift; |
242 | val = readl_relaxed(reg) & ~mask; | 246 | val = readl_relaxed(reg) & ~mask; |
243 | writel_relaxed(val | bit, reg); | 247 | writel_relaxed(val | bit, reg); |
244 | raw_spin_unlock(&irq_controller_lock); | 248 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
245 | 249 | ||
246 | return IRQ_SET_MASK_OK; | 250 | return IRQ_SET_MASK_OK; |
247 | } | 251 | } |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -526,6 +526,7 @@ config MTD_NAND_SUNXI | |||
526 | 526 | ||
527 | config MTD_NAND_HISI504 | 527 | config MTD_NAND_HISI504 |
528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" | 528 | tristate "Support for NAND controller on Hisilicon SoC Hip04" |
529 | depends on HAS_DMA | ||
529 | help | 530 | help |
530 | Enables support for NAND controller on Hisilicon SoC Hip04. | 531 | Enables support for NAND controller on Hisilicon SoC Hip04. |
531 | 532 | ||
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
480 | nand_writel(info, NDCR, ndcr | int_mask); | 480 | nand_writel(info, NDCR, ndcr | int_mask); |
481 | } | 481 | } |
482 | 482 | ||
483 | static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) | ||
484 | { | ||
485 | if (info->ecc_bch) { | ||
486 | int timeout; | ||
487 | |||
488 | /* | ||
489 | * According to the datasheet, when reading from NDDB | ||
490 | * with BCH enabled, after each 32 bytes reads, we | ||
491 | * have to make sure that the NDSR.RDDREQ bit is set. | ||
492 | * | ||
493 | * Drain the FIFO 8 32 bits reads at a time, and skip | ||
494 | * the polling on the last read. | ||
495 | */ | ||
496 | while (len > 8) { | ||
497 | __raw_readsl(info->mmio_base + NDDB, data, 8); | ||
498 | |||
499 | for (timeout = 0; | ||
500 | !(nand_readl(info, NDSR) & NDSR_RDDREQ); | ||
501 | timeout++) { | ||
502 | if (timeout >= 5) { | ||
503 | dev_err(&info->pdev->dev, | ||
504 | "Timeout on RDDREQ while draining the FIFO\n"); | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | mdelay(1); | ||
509 | } | ||
510 | |||
511 | data += 32; | ||
512 | len -= 8; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | __raw_readsl(info->mmio_base + NDDB, data, len); | ||
517 | } | ||
518 | |||
483 | static void handle_data_pio(struct pxa3xx_nand_info *info) | 519 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
484 | { | 520 | { |
485 | unsigned int do_bytes = min(info->data_size, info->chunk_size); | 521 | unsigned int do_bytes = min(info->data_size, info->chunk_size); |
@@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) | |||
496 | DIV_ROUND_UP(info->oob_size, 4)); | 532 | DIV_ROUND_UP(info->oob_size, 4)); |
497 | break; | 533 | break; |
498 | case STATE_PIO_READING: | 534 | case STATE_PIO_READING: |
499 | __raw_readsl(info->mmio_base + NDDB, | 535 | drain_fifo(info, |
500 | info->data_buff + info->data_buff_pos, | 536 | info->data_buff + info->data_buff_pos, |
501 | DIV_ROUND_UP(do_bytes, 4)); | 537 | DIV_ROUND_UP(do_bytes, 4)); |
502 | 538 | ||
503 | if (info->oob_size > 0) | 539 | if (info->oob_size > 0) |
504 | __raw_readsl(info->mmio_base + NDDB, | 540 | drain_fifo(info, |
505 | info->oob_buff + info->oob_buff_pos, | 541 | info->oob_buff + info->oob_buff_pos, |
506 | DIV_ROUND_UP(info->oob_size, 4)); | 542 | DIV_ROUND_UP(info->oob_size, 4)); |
507 | break; | 543 | break; |
508 | default: | 544 | default: |
509 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, | 545 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
@@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) | |||
1572 | int ret, irq, cs; | 1608 | int ret, irq, cs; |
1573 | 1609 | ||
1574 | pdata = dev_get_platdata(&pdev->dev); | 1610 | pdata = dev_get_platdata(&pdev->dev); |
1611 | if (pdata->num_cs <= 0) | ||
1612 | return -ENODEV; | ||
1575 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + | 1613 | info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + |
1576 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); | 1614 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); |
1577 | if (!info) | 1615 | if (!info) |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) | |||
579 | skb->pkt_type = PACKET_BROADCAST; | 579 | skb->pkt_type = PACKET_BROADCAST; |
580 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 580 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
581 | 581 | ||
582 | skb_reset_mac_header(skb); | ||
583 | skb_reset_network_header(skb); | ||
584 | skb_reset_transport_header(skb); | ||
585 | |||
582 | can_skb_reserve(skb); | 586 | can_skb_reserve(skb); |
583 | can_skb_prv(skb)->ifindex = dev->ifindex; | 587 | can_skb_prv(skb)->ifindex = dev->ifindex; |
584 | 588 | ||
@@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, | |||
603 | skb->pkt_type = PACKET_BROADCAST; | 607 | skb->pkt_type = PACKET_BROADCAST; |
604 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 608 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
605 | 609 | ||
610 | skb_reset_mac_header(skb); | ||
611 | skb_reset_network_header(skb); | ||
612 | skb_reset_transport_header(skb); | ||
613 | |||
606 | can_skb_reserve(skb); | 614 | can_skb_reserve(skb); |
607 | can_skb_prv(skb)->ifindex = dev->ifindex; | 615 | can_skb_prv(skb)->ifindex = dev->ifindex; |
608 | 616 | ||
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..a316fa4b91ab 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * Copyright (C) 2015 Valeo S.A. | 14 | * Copyright (C) 2015 Valeo S.A. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kernel.h> | ||
17 | #include <linux/completion.h> | 18 | #include <linux/completion.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
@@ -584,8 +585,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
584 | while (pos <= actual_len - MSG_HEADER_LEN) { | 585 | while (pos <= actual_len - MSG_HEADER_LEN) { |
585 | tmp = buf + pos; | 586 | tmp = buf + pos; |
586 | 587 | ||
587 | if (!tmp->len) | 588 | /* Handle messages crossing the USB endpoint max packet |
588 | break; | 589 | * size boundary. Check kvaser_usb_read_bulk_callback() |
590 | * for further details. | ||
591 | */ | ||
592 | if (tmp->len == 0) { | ||
593 | pos = round_up(pos, | ||
594 | dev->bulk_in->wMaxPacketSize); | ||
595 | continue; | ||
596 | } | ||
589 | 597 | ||
590 | if (pos + tmp->len > actual_len) { | 598 | if (pos + tmp->len > actual_len) { |
591 | dev_err(dev->udev->dev.parent, | 599 | dev_err(dev->udev->dev.parent, |
@@ -787,7 +795,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
787 | netdev_err(netdev, "Error transmitting URB\n"); | 795 | netdev_err(netdev, "Error transmitting URB\n"); |
788 | usb_unanchor_urb(urb); | 796 | usb_unanchor_urb(urb); |
789 | usb_free_urb(urb); | 797 | usb_free_urb(urb); |
790 | kfree(buf); | ||
791 | return err; | 798 | return err; |
792 | } | 799 | } |
793 | 800 | ||
@@ -1317,8 +1324,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1317 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { | 1324 | while (pos <= urb->actual_length - MSG_HEADER_LEN) { |
1318 | msg = urb->transfer_buffer + pos; | 1325 | msg = urb->transfer_buffer + pos; |
1319 | 1326 | ||
1320 | if (!msg->len) | 1327 | /* The Kvaser firmware can only read and write messages that |
1321 | break; | 1328 | * does not cross the USB's endpoint wMaxPacketSize boundary. |
1329 | * If a follow-up command crosses such boundary, firmware puts | ||
1330 | * a placeholder zero-length command in its place then aligns | ||
1331 | * the real command to the next max packet size. | ||
1332 | * | ||
1333 | * Handle such cases or we're going to miss a significant | ||
1334 | * number of events in case of a heavy rx load on the bus. | ||
1335 | */ | ||
1336 | if (msg->len == 0) { | ||
1337 | pos = round_up(pos, dev->bulk_in->wMaxPacketSize); | ||
1338 | continue; | ||
1339 | } | ||
1322 | 1340 | ||
1323 | if (pos + msg->len > urb->actual_length) { | 1341 | if (pos + msg->len > urb->actual_length) { |
1324 | dev_err(dev->udev->dev.parent, "Format error\n"); | 1342 | dev_err(dev->udev->dev.parent, "Format error\n"); |
@@ -1326,7 +1344,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1326 | } | 1344 | } |
1327 | 1345 | ||
1328 | kvaser_usb_handle_message(dev, msg); | 1346 | kvaser_usb_handle_message(dev, msg); |
1329 | |||
1330 | pos += msg->len; | 1347 | pos += msg->len; |
1331 | } | 1348 | } |
1332 | 1349 | ||
@@ -1615,8 +1632,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1615 | struct urb *urb; | 1632 | struct urb *urb; |
1616 | void *buf; | 1633 | void *buf; |
1617 | struct kvaser_msg *msg; | 1634 | struct kvaser_msg *msg; |
1618 | int i, err; | 1635 | int i, err, ret = NETDEV_TX_OK; |
1619 | int ret = NETDEV_TX_OK; | ||
1620 | u8 *msg_tx_can_flags = NULL; /* GCC */ | 1636 | u8 *msg_tx_can_flags = NULL; /* GCC */ |
1621 | 1637 | ||
1622 | if (can_dropped_invalid_skb(netdev, skb)) | 1638 | if (can_dropped_invalid_skb(netdev, skb)) |
@@ -1634,7 +1650,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1634 | if (!buf) { | 1650 | if (!buf) { |
1635 | stats->tx_dropped++; | 1651 | stats->tx_dropped++; |
1636 | dev_kfree_skb(skb); | 1652 | dev_kfree_skb(skb); |
1637 | goto nobufmem; | 1653 | goto freeurb; |
1638 | } | 1654 | } |
1639 | 1655 | ||
1640 | msg = buf; | 1656 | msg = buf; |
@@ -1681,8 +1697,10 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1681 | /* This should never happen; it implies a flow control bug */ | 1697 | /* This should never happen; it implies a flow control bug */ |
1682 | if (!context) { | 1698 | if (!context) { |
1683 | netdev_warn(netdev, "cannot find free context\n"); | 1699 | netdev_warn(netdev, "cannot find free context\n"); |
1700 | |||
1701 | kfree(buf); | ||
1684 | ret = NETDEV_TX_BUSY; | 1702 | ret = NETDEV_TX_BUSY; |
1685 | goto releasebuf; | 1703 | goto freeurb; |
1686 | } | 1704 | } |
1687 | 1705 | ||
1688 | context->priv = priv; | 1706 | context->priv = priv; |
@@ -1719,16 +1737,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1719 | else | 1737 | else |
1720 | netdev_warn(netdev, "Failed tx_urb %d\n", err); | 1738 | netdev_warn(netdev, "Failed tx_urb %d\n", err); |
1721 | 1739 | ||
1722 | goto releasebuf; | 1740 | goto freeurb; |
1723 | } | 1741 | } |
1724 | 1742 | ||
1725 | usb_free_urb(urb); | 1743 | ret = NETDEV_TX_OK; |
1726 | |||
1727 | return NETDEV_TX_OK; | ||
1728 | 1744 | ||
1729 | releasebuf: | 1745 | freeurb: |
1730 | kfree(buf); | ||
1731 | nobufmem: | ||
1732 | usb_free_urb(urb); | 1746 | usb_free_urb(urb); |
1733 | return ret; | 1747 | return ret; |
1734 | } | 1748 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
879 | 879 | ||
880 | pdev->usb_if = ppdev->usb_if; | 880 | pdev->usb_if = ppdev->usb_if; |
881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; | 881 | pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; |
882 | |||
883 | /* do a copy of the ctrlmode[_supported] too */ | ||
884 | dev->can.ctrlmode = ppdev->dev.can.ctrlmode; | ||
885 | dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; | ||
882 | } | 886 | } |
883 | 887 | ||
884 | pdev->usb_if->dev[dev->ctrl_idx] = dev; | 888 | pdev->usb_if->dev[dev->ctrl_idx] = dev; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) | |||
593 | if (!xgene_ring_mgr_init(pdata)) | 593 | if (!xgene_ring_mgr_init(pdata)) |
594 | return -ENODEV; | 594 | return -ENODEV; |
595 | 595 | ||
596 | if (!efi_enabled(EFI_BOOT)) { | 596 | if (pdata->clk) { |
597 | clk_prepare_enable(pdata->clk); | 597 | clk_prepare_enable(pdata->clk); |
598 | clk_disable_unprepare(pdata->clk); | 598 | clk_disable_unprepare(pdata->clk); |
599 | clk_prepare_enable(pdata->clk); | 599 | clk_prepare_enable(pdata->clk); |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) | |||
1025 | #ifdef CONFIG_ACPI | 1025 | #ifdef CONFIG_ACPI |
1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { | 1026 | static const struct acpi_device_id xgene_enet_acpi_match[] = { |
1027 | { "APMC0D05", }, | 1027 | { "APMC0D05", }, |
1028 | { "APMC0D30", }, | ||
1029 | { "APMC0D31", }, | ||
1028 | { } | 1030 | { } |
1029 | }; | 1031 | }; |
1030 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | 1032 | MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); |
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); | |||
1033 | #ifdef CONFIG_OF | 1035 | #ifdef CONFIG_OF |
1034 | static struct of_device_id xgene_enet_of_match[] = { | 1036 | static struct of_device_id xgene_enet_of_match[] = { |
1035 | {.compatible = "apm,xgene-enet",}, | 1037 | {.compatible = "apm,xgene-enet",}, |
1038 | {.compatible = "apm,xgene1-sgenet",}, | ||
1039 | {.compatible = "apm,xgene1-xgenet",}, | ||
1036 | {}, | 1040 | {}, |
1037 | }; | 1041 | }; |
1038 | 1042 | ||
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c | |||
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
486 | { | 486 | { |
487 | struct bcm_enet_priv *priv; | 487 | struct bcm_enet_priv *priv; |
488 | struct net_device *dev; | 488 | struct net_device *dev; |
489 | int tx_work_done, rx_work_done; | 489 | int rx_work_done; |
490 | 490 | ||
491 | priv = container_of(napi, struct bcm_enet_priv, napi); | 491 | priv = container_of(napi, struct bcm_enet_priv, napi); |
492 | dev = priv->net_dev; | 492 | dev = priv->net_dev; |
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) | |||
498 | ENETDMAC_IR, priv->tx_chan); | 498 | ENETDMAC_IR, priv->tx_chan); |
499 | 499 | ||
500 | /* reclaim sent skb */ | 500 | /* reclaim sent skb */ |
501 | tx_work_done = bcm_enet_tx_reclaim(dev, 0); | 501 | bcm_enet_tx_reclaim(dev, 0); |
502 | 502 | ||
503 | spin_lock(&priv->rx_lock); | 503 | spin_lock(&priv->rx_lock); |
504 | rx_work_done = bcm_enet_receive_queue(dev, budget); | 504 | rx_work_done = bcm_enet_receive_queue(dev, budget); |
505 | spin_unlock(&priv->rx_lock); | 505 | spin_unlock(&priv->rx_lock); |
506 | 506 | ||
507 | if (rx_work_done >= budget || tx_work_done > 0) { | 507 | if (rx_work_done >= budget) { |
508 | /* rx/tx queue is not yet empty/clean */ | 508 | /* rx queue is not yet empty/clean */ |
509 | return rx_work_done; | 509 | return rx_work_done; |
510 | } | 510 | } |
511 | 511 | ||
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, | |||
302 | slot->skb = skb; | 302 | slot->skb = skb; |
303 | slot->dma_addr = dma_addr; | 303 | slot->dma_addr = dma_addr; |
304 | 304 | ||
305 | if (slot->dma_addr & 0xC0000000) | ||
306 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
307 | |||
308 | return 0; | 305 | return 0; |
309 | } | 306 | } |
310 | 307 | ||
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
505 | ring->mmio_base); | 502 | ring->mmio_base); |
506 | goto err_dma_free; | 503 | goto err_dma_free; |
507 | } | 504 | } |
508 | if (ring->dma_base & 0xC0000000) | ||
509 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
510 | 505 | ||
511 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 506 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
512 | BGMAC_DMA_RING_TX); | 507 | BGMAC_DMA_RING_TX); |
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) | |||
536 | err = -ENOMEM; | 531 | err = -ENOMEM; |
537 | goto err_dma_free; | 532 | goto err_dma_free; |
538 | } | 533 | } |
539 | if (ring->dma_base & 0xC0000000) | ||
540 | bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); | ||
541 | 534 | ||
542 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, | 535 | ring->unaligned = bgmac_dma_unaligned(bgmac, ring, |
543 | BGMAC_DMA_RING_RX); | 536 | BGMAC_DMA_RING_RX); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..bef750a09027 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, | |||
12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, | 12722 | pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, |
12723 | PCICFG_VENDOR_ID_OFFSET); | 12723 | PCICFG_VENDOR_ID_OFFSET); |
12724 | 12724 | ||
12725 | /* Set PCIe reset type to fundamental for EEH recovery */ | ||
12726 | pdev->needs_freset = 1; | ||
12727 | |||
12725 | /* AER (Advanced Error reporting) configuration */ | 12728 | /* AER (Advanced Error reporting) configuration */ |
12726 | rc = pci_enable_pcie_error_reporting(pdev); | 12729 | rc = pci_enable_pcie_error_reporting(pdev); |
12727 | if (!rc) | 12730 | if (!rc) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | |||
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) | 73 | if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | 75 | ||
76 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
76 | if (wol->wolopts & WAKE_MAGICSECURE) { | 77 | if (wol->wolopts & WAKE_MAGICSECURE) { |
77 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | 78 | bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), |
78 | UMAC_MPD_PW_MS); | 79 | UMAC_MPD_PW_MS); |
79 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | 80 | bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), |
80 | UMAC_MPD_PW_LS); | 81 | UMAC_MPD_PW_LS); |
81 | reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); | ||
82 | reg |= MPD_PW_EN; | 82 | reg |= MPD_PW_EN; |
83 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | 83 | } else { |
84 | reg &= ~MPD_PW_EN; | ||
84 | } | 85 | } |
86 | bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); | ||
85 | 87 | ||
86 | /* Flag the device and relevant IRQ as wakeup capable */ | 88 | /* Flag the device and relevant IRQ as wakeup capable */ |
87 | if (wol->wolopts) { | 89 | if (wol->wolopts) { |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { | |||
2113 | }; | 2113 | }; |
2114 | 2114 | ||
2115 | #if defined(CONFIG_OF) | 2115 | #if defined(CONFIG_OF) |
2116 | static struct macb_config pc302gem_config = { | 2116 | static const struct macb_config pc302gem_config = { |
2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2117 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2118 | .dma_burst_length = 16, | 2118 | .dma_burst_length = 16, |
2119 | }; | 2119 | }; |
2120 | 2120 | ||
2121 | static struct macb_config sama5d3_config = { | 2121 | static const struct macb_config sama5d3_config = { |
2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, | 2122 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, |
2123 | .dma_burst_length = 16, | 2123 | .dma_burst_length = 16, |
2124 | }; | 2124 | }; |
2125 | 2125 | ||
2126 | static struct macb_config sama5d4_config = { | 2126 | static const struct macb_config sama5d4_config = { |
2127 | .caps = 0, | 2127 | .caps = 0, |
2128 | .dma_burst_length = 4, | 2128 | .dma_burst_length = 4, |
2129 | }; | 2129 | }; |
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) | |||
2154 | if (bp->pdev->dev.of_node) { | 2154 | if (bp->pdev->dev.of_node) { |
2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); | 2155 | match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); |
2156 | if (match && match->data) { | 2156 | if (match && match->data) { |
2157 | config = (const struct macb_config *)match->data; | 2157 | config = match->data; |
2158 | 2158 | ||
2159 | bp->caps = config->caps; | 2159 | bp->caps = config->caps; |
2160 | /* | 2160 | /* |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -351,7 +351,7 @@ | |||
351 | 351 | ||
352 | /* Bitfields in MID */ | 352 | /* Bitfields in MID */ |
353 | #define MACB_IDNUM_OFFSET 16 | 353 | #define MACB_IDNUM_OFFSET 16 |
354 | #define MACB_IDNUM_SIZE 16 | 354 | #define MACB_IDNUM_SIZE 12 |
355 | #define MACB_REV_OFFSET 0 | 355 | #define MACB_REV_OFFSET 0 |
356 | #define MACB_REV_SIZE 16 | 356 | #define MACB_REV_SIZE 16 |
357 | 357 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..99492b7e3713 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1597,7 +1597,7 @@ fec_enet_interrupt(int irq, void *dev_id) | |||
1597 | writel(int_events, fep->hwp + FEC_IEVENT); | 1597 | writel(int_events, fep->hwp + FEC_IEVENT); |
1598 | fec_enet_collect_events(fep, int_events); | 1598 | fec_enet_collect_events(fep, int_events); |
1599 | 1599 | ||
1600 | if (fep->work_tx || fep->work_rx) { | 1600 | if ((fep->work_tx || fep->work_rx) && fep->link) { |
1601 | ret = IRQ_HANDLED; | 1601 | ret = IRQ_HANDLED; |
1602 | 1602 | ||
1603 | if (napi_schedule_prep(&fep->napi)) { | 1603 | if (napi_schedule_prep(&fep->napi)) { |
@@ -3383,7 +3383,6 @@ fec_drv_remove(struct platform_device *pdev) | |||
3383 | regulator_disable(fep->reg_phy); | 3383 | regulator_disable(fep->reg_phy); |
3384 | if (fep->ptp_clock) | 3384 | if (fep->ptp_clock) |
3385 | ptp_clock_unregister(fep->ptp_clock); | 3385 | ptp_clock_unregister(fep->ptp_clock); |
3386 | fec_enet_clk_enable(ndev, false); | ||
3387 | of_node_put(fep->phy_node); | 3386 | of_node_put(fep->phy_node); |
3388 | free_netdev(ndev); | 3387 | free_netdev(ndev); |
3389 | 3388 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 178e54028d10..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, | |||
747 | return 0; | 747 | return 0; |
748 | } | 748 | } |
749 | 749 | ||
750 | static int gfar_of_group_count(struct device_node *np) | ||
751 | { | ||
752 | struct device_node *child; | ||
753 | int num = 0; | ||
754 | |||
755 | for_each_available_child_of_node(np, child) | ||
756 | if (!of_node_cmp(child->name, "queue-group")) | ||
757 | num++; | ||
758 | |||
759 | return num; | ||
760 | } | ||
761 | |||
750 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | 762 | static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) |
751 | { | 763 | { |
752 | const char *model; | 764 | const char *model; |
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
784 | num_rx_qs = 1; | 796 | num_rx_qs = 1; |
785 | } else { /* MQ_MG_MODE */ | 797 | } else { /* MQ_MG_MODE */ |
786 | /* get the actual number of supported groups */ | 798 | /* get the actual number of supported groups */ |
787 | unsigned int num_grps = of_get_available_child_count(np); | 799 | unsigned int num_grps = gfar_of_group_count(np); |
788 | 800 | ||
789 | if (num_grps == 0 || num_grps > MAXGROUPS) { | 801 | if (num_grps == 0 || num_grps > MAXGROUPS) { |
790 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", | 802 | dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", |
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
851 | 863 | ||
852 | /* Parse and initialize group specific information */ | 864 | /* Parse and initialize group specific information */ |
853 | if (priv->mode == MQ_MG_MODE) { | 865 | if (priv->mode == MQ_MG_MODE) { |
854 | for_each_child_of_node(np, child) { | 866 | for_each_available_child_of_node(np, child) { |
867 | if (of_node_cmp(child->name, "queue-group")) | ||
868 | continue; | ||
869 | |||
855 | err = gfar_parse_group(child, priv, model); | 870 | err = gfar_parse_group(child, priv, model); |
856 | if (err) | 871 | if (err) |
857 | goto err_grp_init; | 872 | goto err_grp_init; |
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 209ee1b27f8d..5d093dc0f5f5 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
@@ -92,6 +92,7 @@ static const char version[] = | |||
92 | #include "smc91x.h" | 92 | #include "smc91x.h" |
93 | 93 | ||
94 | #if defined(CONFIG_ASSABET_NEPONSET) | 94 | #if defined(CONFIG_ASSABET_NEPONSET) |
95 | #include <mach/assabet.h> | ||
95 | #include <mach/neponset.h> | 96 | #include <mach/neponset.h> |
96 | #endif | 97 | #endif |
97 | 98 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
272 | struct stmmac_priv *priv = NULL; | 272 | struct stmmac_priv *priv = NULL; |
273 | struct plat_stmmacenet_data *plat_dat = NULL; | 273 | struct plat_stmmacenet_data *plat_dat = NULL; |
274 | const char *mac = NULL; | 274 | const char *mac = NULL; |
275 | int irq, wol_irq, lpi_irq; | ||
276 | |||
277 | /* Get IRQ information early to have an ability to ask for deferred | ||
278 | * probe if needed before we went too far with resource allocation. | ||
279 | */ | ||
280 | irq = platform_get_irq_byname(pdev, "macirq"); | ||
281 | if (irq < 0) { | ||
282 | if (irq != -EPROBE_DEFER) { | ||
283 | dev_err(dev, | ||
284 | "MAC IRQ configuration information not found\n"); | ||
285 | } | ||
286 | return irq; | ||
287 | } | ||
288 | |||
289 | /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
290 | * The external wake up irq can be passed through the platform code | ||
291 | * named as "eth_wake_irq" | ||
292 | * | ||
293 | * In case the wake up interrupt is not passed from the platform | ||
294 | * so the driver will continue to use the mac irq (ndev->irq) | ||
295 | */ | ||
296 | wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
297 | if (wol_irq < 0) { | ||
298 | if (wol_irq == -EPROBE_DEFER) | ||
299 | return -EPROBE_DEFER; | ||
300 | wol_irq = irq; | ||
301 | } | ||
302 | |||
303 | lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
304 | if (lpi_irq == -EPROBE_DEFER) | ||
305 | return -EPROBE_DEFER; | ||
275 | 306 | ||
276 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 307 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
277 | addr = devm_ioremap_resource(dev, res); | 308 | addr = devm_ioremap_resource(dev, res); |
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) | |||
323 | return PTR_ERR(priv); | 354 | return PTR_ERR(priv); |
324 | } | 355 | } |
325 | 356 | ||
357 | /* Copy IRQ values to priv structure which is now avaialble */ | ||
358 | priv->dev->irq = irq; | ||
359 | priv->wol_irq = wol_irq; | ||
360 | priv->lpi_irq = lpi_irq; | ||
361 | |||
326 | /* Get MAC address if available (DT) */ | 362 | /* Get MAC address if available (DT) */ |
327 | if (mac) | 363 | if (mac) |
328 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); | 364 | memcpy(priv->dev->dev_addr, mac, ETH_ALEN); |
329 | 365 | ||
330 | /* Get the MAC information */ | ||
331 | priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); | ||
332 | if (priv->dev->irq < 0) { | ||
333 | if (priv->dev->irq != -EPROBE_DEFER) { | ||
334 | netdev_err(priv->dev, | ||
335 | "MAC IRQ configuration information not found\n"); | ||
336 | } | ||
337 | return priv->dev->irq; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * On some platforms e.g. SPEAr the wake up irq differs from the mac irq | ||
342 | * The external wake up irq can be passed through the platform code | ||
343 | * named as "eth_wake_irq" | ||
344 | * | ||
345 | * In case the wake up interrupt is not passed from the platform | ||
346 | * so the driver will continue to use the mac irq (ndev->irq) | ||
347 | */ | ||
348 | priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); | ||
349 | if (priv->wol_irq < 0) { | ||
350 | if (priv->wol_irq == -EPROBE_DEFER) | ||
351 | return -EPROBE_DEFER; | ||
352 | priv->wol_irq = priv->dev->irq; | ||
353 | } | ||
354 | |||
355 | priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); | ||
356 | if (priv->lpi_irq == -EPROBE_DEFER) | ||
357 | return -EPROBE_DEFER; | ||
358 | |||
359 | platform_set_drvdata(pdev, priv->dev); | 366 | platform_set_drvdata(pdev, priv->dev); |
360 | 367 | ||
361 | pr_debug("STMMAC platform driver registration completed"); | 368 | pr_debug("STMMAC platform driver registration completed"); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1730,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) | |||
1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) | 1730 | if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) |
1731 | return -EADDRNOTAVAIL; | 1731 | return -EADDRNOTAVAIL; |
1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 1732 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1733 | rcu_read_lock(); | 1733 | mutex_lock(&team->lock); |
1734 | list_for_each_entry_rcu(port, &team->port_list, list) | 1734 | list_for_each_entry(port, &team->port_list, list) |
1735 | if (team->ops.port_change_dev_addr) | 1735 | if (team->ops.port_change_dev_addr) |
1736 | team->ops.port_change_dev_addr(team, port); | 1736 | team->ops.port_change_dev_addr(team, port); |
1737 | rcu_read_unlock(); | 1737 | mutex_unlock(&team->lock); |
1738 | return 0; | 1738 | return 0; |
1739 | } | 1739 | } |
1740 | 1740 | ||
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, | |||
340 | unsigned int num_queues = vif->num_queues; | 340 | unsigned int num_queues = vif->num_queues; |
341 | int i; | 341 | int i; |
342 | unsigned int queue_index; | 342 | unsigned int queue_index; |
343 | struct xenvif_stats *vif_stats; | ||
344 | 343 | ||
345 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { | 344 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
346 | unsigned long accum = 0; | 345 | unsigned long accum = 0; |
347 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 346 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
348 | vif_stats = &vif->queues[queue_index].stats; | 347 | void *vif_stats = &vif->queues[queue_index].stats; |
349 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); | 348 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
350 | } | 349 | } |
351 | data[i] = accum; | 350 | data[i] = accum; |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c4d68d768408..cab9f5257f57 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1349,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1349 | { | 1349 | { |
1350 | unsigned int offset = skb_headlen(skb); | 1350 | unsigned int offset = skb_headlen(skb); |
1351 | skb_frag_t frags[MAX_SKB_FRAGS]; | 1351 | skb_frag_t frags[MAX_SKB_FRAGS]; |
1352 | int i; | 1352 | int i, f; |
1353 | struct ubuf_info *uarg; | 1353 | struct ubuf_info *uarg; |
1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; | 1354 | struct sk_buff *nskb = skb_shinfo(skb)->frag_list; |
1355 | 1355 | ||
@@ -1389,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1389 | frags[i].page_offset = 0; | 1389 | frags[i].page_offset = 0; |
1390 | skb_frag_size_set(&frags[i], len); | 1390 | skb_frag_size_set(&frags[i], len); |
1391 | } | 1391 | } |
1392 | /* swap out with old one */ | ||
1393 | memcpy(skb_shinfo(skb)->frags, | ||
1394 | frags, | ||
1395 | i * sizeof(skb_frag_t)); | ||
1396 | skb_shinfo(skb)->nr_frags = i; | ||
1397 | skb->truesize += i * PAGE_SIZE; | ||
1398 | 1392 | ||
1399 | /* remove traces of mapped pages and frag_list */ | 1393 | /* Copied all the bits from the frag list -- free it. */ |
1400 | skb_frag_list_init(skb); | 1394 | skb_frag_list_init(skb); |
1395 | xenvif_skb_zerocopy_prepare(queue, nskb); | ||
1396 | kfree_skb(nskb); | ||
1397 | |||
1398 | /* Release all the original (foreign) frags. */ | ||
1399 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) | ||
1400 | skb_frag_unref(skb, f); | ||
1401 | uarg = skb_shinfo(skb)->destructor_arg; | 1401 | uarg = skb_shinfo(skb)->destructor_arg; |
1402 | /* increase inflight counter to offset decrement in callback */ | 1402 | /* increase inflight counter to offset decrement in callback */ |
1403 | atomic_inc(&queue->inflight_packets); | 1403 | atomic_inc(&queue->inflight_packets); |
1404 | uarg->callback(uarg, true); | 1404 | uarg->callback(uarg, true); |
1405 | skb_shinfo(skb)->destructor_arg = NULL; | 1405 | skb_shinfo(skb)->destructor_arg = NULL; |
1406 | 1406 | ||
1407 | xenvif_skb_zerocopy_prepare(queue, nskb); | 1407 | /* Fill the skb with the new (local) frags. */ |
1408 | kfree_skb(nskb); | 1408 | memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); |
1409 | skb_shinfo(skb)->nr_frags = i; | ||
1410 | skb->truesize += i * PAGE_SIZE; | ||
1409 | 1411 | ||
1410 | return 0; | 1412 | return 0; |
1411 | } | 1413 | } |
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig | |||
@@ -84,8 +84,7 @@ config OF_RESOLVE | |||
84 | bool | 84 | bool |
85 | 85 | ||
86 | config OF_OVERLAY | 86 | config OF_OVERLAY |
87 | bool | 87 | bool "Device Tree overlays" |
88 | depends on OF | ||
89 | select OF_DYNAMIC | 88 | select OF_DYNAMIC |
90 | select OF_RESOLVE | 89 | select OF_RESOLVE |
91 | 90 | ||
diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..adb8764861c0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, | |||
714 | const char *path) | 714 | const char *path) |
715 | { | 715 | { |
716 | struct device_node *child; | 716 | struct device_node *child; |
717 | int len = strchrnul(path, '/') - path; | 717 | int len; |
718 | int term; | 718 | const char *end; |
719 | 719 | ||
720 | end = strchr(path, ':'); | ||
721 | if (!end) | ||
722 | end = strchrnul(path, '/'); | ||
723 | |||
724 | len = end - path; | ||
720 | if (!len) | 725 | if (!len) |
721 | return NULL; | 726 | return NULL; |
722 | 727 | ||
723 | term = strchrnul(path, ':') - path; | ||
724 | if (term < len) | ||
725 | len = term; | ||
726 | |||
727 | __for_each_child_of_node(parent, child) { | 728 | __for_each_child_of_node(parent, child) { |
728 | const char *name = strrchr(child->full_name, '/'); | 729 | const char *name = strrchr(child->full_name, '/'); |
729 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) | 730 | if (WARN(!name, "malformed device_node %s\n", child->full_name)) |
@@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
768 | 769 | ||
769 | /* The path could begin with an alias */ | 770 | /* The path could begin with an alias */ |
770 | if (*path != '/') { | 771 | if (*path != '/') { |
771 | char *p = strchrnul(path, '/'); | 772 | int len; |
772 | int len = separator ? separator - path : p - path; | 773 | const char *p = separator; |
774 | |||
775 | if (!p) | ||
776 | p = strchrnul(path, '/'); | ||
777 | len = p - path; | ||
773 | 778 | ||
774 | /* of_aliases must not be NULL */ | 779 | /* of_aliases must not be NULL */ |
775 | if (!of_aliases) | 780 | if (!of_aliases) |
@@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt | |||
794 | path++; /* Increment past '/' delimiter */ | 799 | path++; /* Increment past '/' delimiter */ |
795 | np = __of_find_node_by_path(np, path); | 800 | np = __of_find_node_by_path(np, path); |
796 | path = strchrnul(path, '/'); | 801 | path = strchrnul(path, '/'); |
802 | if (separator && separator < path) | ||
803 | break; | ||
797 | } | 804 | } |
798 | raw_spin_unlock_irqrestore(&devtree_lock, flags); | 805 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
799 | return np; | 806 | return np; |
@@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) | |||
1886 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); | 1893 | name = of_get_property(of_chosen, "linux,stdout-path", NULL); |
1887 | if (IS_ENABLED(CONFIG_PPC) && !name) | 1894 | if (IS_ENABLED(CONFIG_PPC) && !name) |
1888 | name = of_get_property(of_aliases, "stdout", NULL); | 1895 | name = of_get_property(of_aliases, "stdout", NULL); |
1889 | if (name) | 1896 | if (name) { |
1890 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); | 1897 | of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); |
1898 | add_preferred_console("stdout-path", 0, NULL); | ||
1899 | } | ||
1891 | } | 1900 | } |
1892 | 1901 | ||
1893 | if (!of_aliases) | 1902 | if (!of_aliases) |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/idr.h> | ||
22 | 23 | ||
23 | #include "of_private.h" | 24 | #include "of_private.h" |
24 | 25 | ||
@@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, | |||
85 | struct device_node *target, struct device_node *child) | 86 | struct device_node *target, struct device_node *child) |
86 | { | 87 | { |
87 | const char *cname; | 88 | const char *cname; |
88 | struct device_node *tchild, *grandchild; | 89 | struct device_node *tchild; |
89 | int ret = 0; | 90 | int ret = 0; |
90 | 91 | ||
91 | cname = kbasename(child->full_name); | 92 | cname = kbasename(child->full_name); |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..aba8946cac46 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
@@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void) | |||
92 | "option path test failed\n"); | 92 | "option path test failed\n"); |
93 | of_node_put(np); | 93 | of_node_put(np); |
94 | 94 | ||
95 | np = of_find_node_opts_by_path("/testcase-data:test/option", &options); | ||
96 | selftest(np && !strcmp("test/option", options), | ||
97 | "option path test, subcase #1 failed\n"); | ||
98 | of_node_put(np); | ||
99 | |||
95 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); | 100 | np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); |
96 | selftest(np, "NULL option path test failed\n"); | 101 | selftest(np, "NULL option path test failed\n"); |
97 | of_node_put(np); | 102 | of_node_put(np); |
@@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void) | |||
102 | "option alias path test failed\n"); | 107 | "option alias path test failed\n"); |
103 | of_node_put(np); | 108 | of_node_put(np); |
104 | 109 | ||
110 | np = of_find_node_opts_by_path("testcase-alias:test/alias/option", | ||
111 | &options); | ||
112 | selftest(np && !strcmp("test/alias/option", options), | ||
113 | "option alias path test, subcase #1 failed\n"); | ||
114 | of_node_put(np); | ||
115 | |||
105 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); | 116 | np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); |
106 | selftest(np, "NULL option alias path test failed\n"); | 117 | selftest(np, "NULL option alias path test failed\n"); |
107 | of_node_put(np); | 118 | of_node_put(np); |
@@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void) | |||
378 | rc = of_property_match_string(np, "phandle-list-names", "first"); | 389 | rc = of_property_match_string(np, "phandle-list-names", "first"); |
379 | selftest(rc == 0, "first expected:0 got:%i\n", rc); | 390 | selftest(rc == 0, "first expected:0 got:%i\n", rc); |
380 | rc = of_property_match_string(np, "phandle-list-names", "second"); | 391 | rc = of_property_match_string(np, "phandle-list-names", "second"); |
381 | selftest(rc == 1, "second expected:0 got:%i\n", rc); | 392 | selftest(rc == 1, "second expected:1 got:%i\n", rc); |
382 | rc = of_property_match_string(np, "phandle-list-names", "third"); | 393 | rc = of_property_match_string(np, "phandle-list-names", "third"); |
383 | selftest(rc == 2, "third expected:0 got:%i\n", rc); | 394 | selftest(rc == 2, "third expected:2 got:%i\n", rc); |
384 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); | 395 | rc = of_property_match_string(np, "phandle-list-names", "fourth"); |
385 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); | 396 | selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); |
386 | rc = of_property_match_string(np, "missing-property", "blah"); | 397 | rc = of_property_match_string(np, "missing-property", "blah"); |
@@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void) | |||
478 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; | 489 | struct device_node *n1, *n2, *n21, *nremove, *parent, *np; |
479 | struct of_changeset chgset; | 490 | struct of_changeset chgset; |
480 | 491 | ||
481 | of_changeset_init(&chgset); | ||
482 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); | 492 | n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); |
483 | selftest(n1, "testcase setup failure\n"); | 493 | selftest(n1, "testcase setup failure\n"); |
484 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); | 494 | n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); |
@@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path) | |||
979 | return pdev != NULL; | 989 | return pdev != NULL; |
980 | } | 990 | } |
981 | 991 | ||
982 | #if IS_ENABLED(CONFIG_I2C) | 992 | #if IS_BUILTIN(CONFIG_I2C) |
983 | 993 | ||
984 | /* get the i2c client device instantiated at the path */ | 994 | /* get the i2c client device instantiated at the path */ |
985 | static struct i2c_client *of_path_to_i2c_client(const char *path) | 995 | static struct i2c_client *of_path_to_i2c_client(const char *path) |
@@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void) | |||
1445 | return; | 1455 | return; |
1446 | } | 1456 | } |
1447 | 1457 | ||
1448 | #if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) | 1458 | #if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) |
1449 | 1459 | ||
1450 | struct selftest_i2c_bus_data { | 1460 | struct selftest_i2c_bus_data { |
1451 | struct platform_device *pdev; | 1461 | struct platform_device *pdev; |
@@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { | |||
1584 | .id_table = selftest_i2c_dev_id, | 1594 | .id_table = selftest_i2c_dev_id, |
1585 | }; | 1595 | }; |
1586 | 1596 | ||
1587 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1597 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1588 | 1598 | ||
1589 | struct selftest_i2c_mux_data { | 1599 | struct selftest_i2c_mux_data { |
1590 | int nchans; | 1600 | int nchans; |
@@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
1695 | "could not register selftest i2c bus driver\n")) | 1705 | "could not register selftest i2c bus driver\n")) |
1696 | return ret; | 1706 | return ret; |
1697 | 1707 | ||
1698 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1708 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1699 | ret = i2c_add_driver(&selftest_i2c_mux_driver); | 1709 | ret = i2c_add_driver(&selftest_i2c_mux_driver); |
1700 | if (selftest(ret == 0, | 1710 | if (selftest(ret == 0, |
1701 | "could not register selftest i2c mux driver\n")) | 1711 | "could not register selftest i2c mux driver\n")) |
@@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void) | |||
1707 | 1717 | ||
1708 | static void of_selftest_overlay_i2c_cleanup(void) | 1718 | static void of_selftest_overlay_i2c_cleanup(void) |
1709 | { | 1719 | { |
1710 | #if IS_ENABLED(CONFIG_I2C_MUX) | 1720 | #if IS_BUILTIN(CONFIG_I2C_MUX) |
1711 | i2c_del_driver(&selftest_i2c_mux_driver); | 1721 | i2c_del_driver(&selftest_i2c_mux_driver); |
1712 | #endif | 1722 | #endif |
1713 | platform_driver_unregister(&selftest_i2c_bus_driver); | 1723 | platform_driver_unregister(&selftest_i2c_bus_driver); |
@@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void) | |||
1814 | of_selftest_overlay_10(); | 1824 | of_selftest_overlay_10(); |
1815 | of_selftest_overlay_11(); | 1825 | of_selftest_overlay_11(); |
1816 | 1826 | ||
1817 | #if IS_ENABLED(CONFIG_I2C) | 1827 | #if IS_BUILTIN(CONFIG_I2C) |
1818 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) | 1828 | if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) |
1819 | goto out; | 1829 | goto out; |
1820 | 1830 | ||
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
@@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | |||
127 | return false; | 127 | return false; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | 130 | static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
131 | int offset) | 131 | int offset) |
132 | { | 132 | { |
133 | struct xgene_pcie_port *port = bus->sysdata; | 133 | struct xgene_pcie_port *port = bus->sysdata; |
@@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | |||
137 | return NULL; | 137 | return NULL; |
138 | 138 | ||
139 | xgene_pcie_set_rtdid_reg(bus, devfn); | 139 | xgene_pcie_set_rtdid_reg(bus, devfn); |
140 | return xgene_pcie_get_cfg_base(bus); | 140 | return xgene_pcie_get_cfg_base(bus) + offset; |
141 | } | 141 | } |
142 | 142 | ||
143 | static struct pci_ops xgene_pcie_ops = { | 143 | static struct pci_ops xgene_pcie_ops = { |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, | |||
521 | struct pci_dev *pdev = to_pci_dev(dev); | 521 | struct pci_dev *pdev = to_pci_dev(dev); |
522 | char *driver_override, *old = pdev->driver_override, *cp; | 522 | char *driver_override, *old = pdev->driver_override, *cp; |
523 | 523 | ||
524 | if (count > PATH_MAX) | 524 | /* We need to keep extra room for a newline */ |
525 | if (count >= (PAGE_SIZE - 1)) | ||
525 | return -EINVAL; | 526 | return -EINVAL; |
526 | 527 | ||
527 | driver_override = kstrndup(buf, count, GFP_KERNEL); | 528 | driver_override = kstrndup(buf, count, GFP_KERNEL); |
@@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, | |||
549 | { | 550 | { |
550 | struct pci_dev *pdev = to_pci_dev(dev); | 551 | struct pci_dev *pdev = to_pci_dev(dev); |
551 | 552 | ||
552 | return sprintf(buf, "%s\n", pdev->driver_override); | 553 | return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); |
553 | } | 554 | } |
554 | static DEVICE_ATTR_RW(driver_override); | 555 | static DEVICE_ATTR_RW(driver_override); |
555 | 556 | ||
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..1245dca79009 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -3444,13 +3444,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, | |||
3444 | if (attr == &dev_attr_requested_microamps.attr) | 3444 | if (attr == &dev_attr_requested_microamps.attr) |
3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; | 3445 | return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; |
3446 | 3446 | ||
3447 | /* all the other attributes exist to support constraints; | ||
3448 | * don't show them if there are no constraints, or if the | ||
3449 | * relevant supporting methods are missing. | ||
3450 | */ | ||
3451 | if (!rdev->constraints) | ||
3452 | return 0; | ||
3453 | |||
3454 | /* constraints need specific supporting methods */ | 3447 | /* constraints need specific supporting methods */ |
3455 | if (attr == &dev_attr_min_microvolts.attr || | 3448 | if (attr == &dev_attr_min_microvolts.attr || |
3456 | attr == &dev_attr_max_microvolts.attr) | 3449 | attr == &dev_attr_max_microvolts.attr) |
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c | |||
@@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, | |||
152 | config.regmap = chip->regmap; | 152 | config.regmap = chip->regmap; |
153 | config.of_node = dev->of_node; | 153 | config.of_node = dev->of_node; |
154 | 154 | ||
155 | /* Mask all interrupt sources to deassert interrupt line */ | ||
156 | error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); | ||
157 | if (!error) | ||
158 | error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); | ||
159 | if (error) { | ||
160 | dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); | ||
161 | return error; | ||
162 | } | ||
163 | |||
155 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); | 164 | rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); |
156 | if (IS_ERR(rdev)) { | 165 | if (IS_ERR(rdev)) { |
157 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); | 166 | dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); |
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c | |||
@@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
235 | .vsel_mask = RK808_LDO_VSEL_MASK, | 235 | .vsel_mask = RK808_LDO_VSEL_MASK, |
236 | .enable_reg = RK808_LDO_EN_REG, | 236 | .enable_reg = RK808_LDO_EN_REG, |
237 | .enable_mask = BIT(0), | 237 | .enable_mask = BIT(0), |
238 | .enable_time = 400, | ||
238 | .owner = THIS_MODULE, | 239 | .owner = THIS_MODULE, |
239 | }, { | 240 | }, { |
240 | .name = "LDO_REG2", | 241 | .name = "LDO_REG2", |
@@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
249 | .vsel_mask = RK808_LDO_VSEL_MASK, | 250 | .vsel_mask = RK808_LDO_VSEL_MASK, |
250 | .enable_reg = RK808_LDO_EN_REG, | 251 | .enable_reg = RK808_LDO_EN_REG, |
251 | .enable_mask = BIT(1), | 252 | .enable_mask = BIT(1), |
253 | .enable_time = 400, | ||
252 | .owner = THIS_MODULE, | 254 | .owner = THIS_MODULE, |
253 | }, { | 255 | }, { |
254 | .name = "LDO_REG3", | 256 | .name = "LDO_REG3", |
@@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
263 | .vsel_mask = RK808_BUCK4_VSEL_MASK, | 265 | .vsel_mask = RK808_BUCK4_VSEL_MASK, |
264 | .enable_reg = RK808_LDO_EN_REG, | 266 | .enable_reg = RK808_LDO_EN_REG, |
265 | .enable_mask = BIT(2), | 267 | .enable_mask = BIT(2), |
268 | .enable_time = 400, | ||
266 | .owner = THIS_MODULE, | 269 | .owner = THIS_MODULE, |
267 | }, { | 270 | }, { |
268 | .name = "LDO_REG4", | 271 | .name = "LDO_REG4", |
@@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
277 | .vsel_mask = RK808_LDO_VSEL_MASK, | 280 | .vsel_mask = RK808_LDO_VSEL_MASK, |
278 | .enable_reg = RK808_LDO_EN_REG, | 281 | .enable_reg = RK808_LDO_EN_REG, |
279 | .enable_mask = BIT(3), | 282 | .enable_mask = BIT(3), |
283 | .enable_time = 400, | ||
280 | .owner = THIS_MODULE, | 284 | .owner = THIS_MODULE, |
281 | }, { | 285 | }, { |
282 | .name = "LDO_REG5", | 286 | .name = "LDO_REG5", |
@@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
291 | .vsel_mask = RK808_LDO_VSEL_MASK, | 295 | .vsel_mask = RK808_LDO_VSEL_MASK, |
292 | .enable_reg = RK808_LDO_EN_REG, | 296 | .enable_reg = RK808_LDO_EN_REG, |
293 | .enable_mask = BIT(4), | 297 | .enable_mask = BIT(4), |
298 | .enable_time = 400, | ||
294 | .owner = THIS_MODULE, | 299 | .owner = THIS_MODULE, |
295 | }, { | 300 | }, { |
296 | .name = "LDO_REG6", | 301 | .name = "LDO_REG6", |
@@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
305 | .vsel_mask = RK808_LDO_VSEL_MASK, | 310 | .vsel_mask = RK808_LDO_VSEL_MASK, |
306 | .enable_reg = RK808_LDO_EN_REG, | 311 | .enable_reg = RK808_LDO_EN_REG, |
307 | .enable_mask = BIT(5), | 312 | .enable_mask = BIT(5), |
313 | .enable_time = 400, | ||
308 | .owner = THIS_MODULE, | 314 | .owner = THIS_MODULE, |
309 | }, { | 315 | }, { |
310 | .name = "LDO_REG7", | 316 | .name = "LDO_REG7", |
@@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
319 | .vsel_mask = RK808_LDO_VSEL_MASK, | 325 | .vsel_mask = RK808_LDO_VSEL_MASK, |
320 | .enable_reg = RK808_LDO_EN_REG, | 326 | .enable_reg = RK808_LDO_EN_REG, |
321 | .enable_mask = BIT(6), | 327 | .enable_mask = BIT(6), |
328 | .enable_time = 400, | ||
322 | .owner = THIS_MODULE, | 329 | .owner = THIS_MODULE, |
323 | }, { | 330 | }, { |
324 | .name = "LDO_REG8", | 331 | .name = "LDO_REG8", |
@@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { | |||
333 | .vsel_mask = RK808_LDO_VSEL_MASK, | 340 | .vsel_mask = RK808_LDO_VSEL_MASK, |
334 | .enable_reg = RK808_LDO_EN_REG, | 341 | .enable_reg = RK808_LDO_EN_REG, |
335 | .enable_mask = BIT(7), | 342 | .enable_mask = BIT(7), |
343 | .enable_time = 400, | ||
336 | .owner = THIS_MODULE, | 344 | .owner = THIS_MODULE, |
337 | }, { | 345 | }, { |
338 | .name = "SWITCH_REG1", | 346 | .name = "SWITCH_REG1", |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { | |||
849 | 849 | ||
850 | static struct s3c_rtc_data const s3c6410_rtc_data = { | 850 | static struct s3c_rtc_data const s3c6410_rtc_data = { |
851 | .max_user_freq = 32768, | 851 | .max_user_freq = 32768, |
852 | .needs_src_clk = true, | ||
852 | .irq_handler = s3c6410_rtc_irq, | 853 | .irq_handler = s3c6410_rtc_irq, |
853 | .set_freq = s3c6410_rtc_setfreq, | 854 | .set_freq = s3c6410_rtc_setfreq, |
854 | .enable_tick = s3c6410_rtc_enable_tick, | 855 | .enable_tick = s3c6410_rtc_enable_tick, |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
547 | * parse input | 547 | * parse input |
548 | */ | 548 | */ |
549 | num_of_segments = 0; | 549 | num_of_segments = 0; |
550 | for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { | 550 | for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { |
551 | for (j = i; (buf[j] != ':') && | 551 | for (j = i; (buf[j] != ':') && |
552 | (buf[j] != '\0') && | 552 | (buf[j] != '\0') && |
553 | (buf[j] != '\n') && | 553 | (buf[j] != '\n') && |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) | |||
92 | add = 0; | 92 | add = 0; |
93 | continue; | 93 | continue; |
94 | } | 94 | } |
95 | for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | 95 | for (pos = 0; pos < iter->aob->request.msb_count; pos++) { |
96 | if (clusters_intersect(req, iter->request[pos]) && | 96 | if (clusters_intersect(req, iter->request[pos]) && |
97 | (rq_data_dir(req) == WRITE || | 97 | (rq_data_dir(req) == WRITE || |
98 | rq_data_dir(iter->request[pos]) == WRITE)) { | 98 | rq_data_dir(iter->request[pos]) == WRITE)) { |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); | 500 | struct sas_discovery_event *ev = to_sas_discovery_event(work); |
501 | struct asd_sas_port *port = ev->port; | 501 | struct asd_sas_port *port = ev->port; |
502 | struct sas_ha_struct *ha = port->ha; | 502 | struct sas_ha_struct *ha = port->ha; |
503 | struct domain_device *ddev = port->port_dev; | ||
503 | 504 | ||
504 | /* prevent revalidation from finding sata links in recovery */ | 505 | /* prevent revalidation from finding sata links in recovery */ |
505 | mutex_lock(&ha->disco_mutex); | 506 | mutex_lock(&ha->disco_mutex); |
@@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) | |||
514 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, | 515 | SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, |
515 | task_pid_nr(current)); | 516 | task_pid_nr(current)); |
516 | 517 | ||
517 | if (port->port_dev) | 518 | if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || |
518 | res = sas_ex_revalidate_domain(port->port_dev); | 519 | ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) |
520 | res = sas_ex_revalidate_domain(ddev); | ||
519 | 521 | ||
520 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", | 522 | SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", |
521 | port->id, task_pid_nr(current), res); | 523 | port->id, task_pid_nr(current), res); |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..06de34001c66 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
764 | (unsigned long long)xfer->rx_dma); | 764 | (unsigned long long)xfer->rx_dma); |
765 | } | 765 | } |
766 | 766 | ||
767 | /* REVISIT: We're waiting for ENDRX before we start the next | 767 | /* REVISIT: We're waiting for RXBUFF before we start the next |
768 | * transfer because we need to handle some difficult timing | 768 | * transfer because we need to handle some difficult timing |
769 | * issues otherwise. If we wait for ENDTX in one transfer and | 769 | * issues otherwise. If we wait for TXBUFE in one transfer and |
770 | * then starts waiting for ENDRX in the next, it's difficult | 770 | * then starts waiting for RXBUFF in the next, it's difficult |
771 | * to tell the difference between the ENDRX interrupt we're | 771 | * to tell the difference between the RXBUFF interrupt we're |
772 | * actually waiting for and the ENDRX interrupt of the | 772 | * actually waiting for and the RXBUFF interrupt of the |
773 | * previous transfer. | 773 | * previous transfer. |
774 | * | 774 | * |
775 | * It should be doable, though. Just not now... | 775 | * It should be doable, though. Just not now... |
776 | */ | 776 | */ |
777 | spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); | 777 | spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); |
778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 778 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
779 | } | 779 | } |
780 | 780 | ||
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..3ce39d10fafb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) | |||
139 | 1, | 139 | 1, |
140 | DMA_MEM_TO_DEV, | 140 | DMA_MEM_TO_DEV, |
141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 141 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
142 | if (!txdesc) | ||
143 | return NULL; | ||
144 | |||
142 | txdesc->callback = dw_spi_dma_tx_done; | 145 | txdesc->callback = dw_spi_dma_tx_done; |
143 | txdesc->callback_param = dws; | 146 | txdesc->callback_param = dws; |
144 | 147 | ||
@@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) | |||
184 | 1, | 187 | 1, |
185 | DMA_DEV_TO_MEM, | 188 | DMA_DEV_TO_MEM, |
186 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 189 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
190 | if (!rxdesc) | ||
191 | return NULL; | ||
192 | |||
187 | rxdesc->callback = dw_spi_dma_rx_done; | 193 | rxdesc->callback = dw_spi_dma_rx_done; |
188 | rxdesc->callback_param = dws; | 194 | rxdesc->callback_param = dws; |
189 | 195 | ||
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c | |||
@@ -36,13 +36,13 @@ struct spi_pci_desc { | |||
36 | 36 | ||
37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { | 37 | static struct spi_pci_desc spi_pci_mid_desc_1 = { |
38 | .setup = dw_spi_mid_init, | 38 | .setup = dw_spi_mid_init, |
39 | .num_cs = 32, | 39 | .num_cs = 5, |
40 | .bus_num = 0, | 40 | .bus_num = 0, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { | 43 | static struct spi_pci_desc spi_pci_mid_desc_2 = { |
44 | .setup = dw_spi_mid_init, | 44 | .setup = dw_spi_mid_init, |
45 | .num_cs = 4, | 45 | .num_cs = 2, |
46 | .bus_num = 1, | 46 | .bus_num = 1, |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..4847afba89f4 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) | |||
621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
622 | u32 fifo; | 622 | u32 fifo; |
623 | 623 | ||
624 | for (fifo = 2; fifo <= 256; fifo++) { | 624 | for (fifo = 1; fifo < 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
627 | break; | 627 | break; |
628 | } | 628 | } |
629 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 629 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
630 | 630 | ||
631 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; | 631 | dws->fifo_len = (fifo == 1) ? 0 : fifo; |
632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); | 632 | dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); |
633 | } | 633 | } |
634 | } | 634 | } |
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..e649bc7d4c08 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master, | |||
459 | unsigned long flags; | 459 | unsigned long flags; |
460 | int ret; | 460 | int ret; |
461 | 461 | ||
462 | if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { | ||
463 | dev_err(spfi->dev, | ||
464 | "Transfer length (%d) is greater than the max supported (%d)", | ||
465 | xfer->len, SPFI_TRANSACTION_TSIZE_MASK); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
462 | /* | 469 | /* |
463 | * Stop all DMA and reset the controller if the previous transaction | 470 | * Stop all DMA and reset the controller if the previous transaction |
464 | * timed-out and never completed it's DMA. | 471 | * timed-out and never completed it's DMA. |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..ee513a85296b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) | |||
534 | pl022->cur_msg = NULL; | 534 | pl022->cur_msg = NULL; |
535 | pl022->cur_transfer = NULL; | 535 | pl022->cur_transfer = NULL; |
536 | pl022->cur_chip = NULL; | 536 | pl022->cur_chip = NULL; |
537 | spi_finalize_current_message(pl022->master); | ||
538 | 537 | ||
539 | /* disable the SPI/SSP operation */ | 538 | /* disable the SPI/SSP operation */ |
540 | writew((readw(SSP_CR1(pl022->virtbase)) & | 539 | writew((readw(SSP_CR1(pl022->virtbase)) & |
541 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | 540 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); |
542 | 541 | ||
542 | spi_finalize_current_message(pl022->master); | ||
543 | } | 543 | } |
544 | 544 | ||
545 | /** | 545 | /** |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
@@ -101,6 +101,7 @@ struct ti_qspi { | |||
101 | #define QSPI_FLEN(n) ((n - 1) << 0) | 101 | #define QSPI_FLEN(n) ((n - 1) << 0) |
102 | 102 | ||
103 | /* STATUS REGISTER */ | 103 | /* STATUS REGISTER */ |
104 | #define BUSY 0x01 | ||
104 | #define WC 0x02 | 105 | #define WC 0x02 |
105 | 106 | ||
106 | /* INTERRUPT REGISTER */ | 107 | /* INTERRUPT REGISTER */ |
@@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) | |||
199 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); | 200 | ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); |
200 | } | 201 | } |
201 | 202 | ||
203 | static inline u32 qspi_is_busy(struct ti_qspi *qspi) | ||
204 | { | ||
205 | u32 stat; | ||
206 | unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; | ||
207 | |||
208 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
209 | while ((stat & BUSY) && time_after(timeout, jiffies)) { | ||
210 | cpu_relax(); | ||
211 | stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); | ||
212 | } | ||
213 | |||
214 | WARN(stat & BUSY, "qspi busy\n"); | ||
215 | return stat & BUSY; | ||
216 | } | ||
217 | |||
202 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | 218 | static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) |
203 | { | 219 | { |
204 | int wlen, count; | 220 | int wlen, count; |
@@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
211 | wlen = t->bits_per_word >> 3; /* in bytes */ | 227 | wlen = t->bits_per_word >> 3; /* in bytes */ |
212 | 228 | ||
213 | while (count) { | 229 | while (count) { |
230 | if (qspi_is_busy(qspi)) | ||
231 | return -EBUSY; | ||
232 | |||
214 | switch (wlen) { | 233 | switch (wlen) { |
215 | case 1: | 234 | case 1: |
216 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", | 235 | dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", |
@@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) | |||
266 | 285 | ||
267 | while (count) { | 286 | while (count) { |
268 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); | 287 | dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); |
288 | if (qspi_is_busy(qspi)) | ||
289 | return -EBUSY; | ||
290 | |||
269 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); | 291 | ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); |
270 | if (!wait_for_completion_timeout(&qspi->transfer_complete, | 292 | if (!wait_for_completion_timeout(&qspi->transfer_complete, |
271 | QSPI_COMPLETION_TIMEOUT)) { | 293 | QSPI_COMPLETION_TIMEOUT)) { |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -144,10 +144,9 @@ struct ffs_io_data { | |||
144 | bool read; | 144 | bool read; |
145 | 145 | ||
146 | struct kiocb *kiocb; | 146 | struct kiocb *kiocb; |
147 | const struct iovec *iovec; | 147 | struct iov_iter data; |
148 | unsigned long nr_segs; | 148 | const void *to_free; |
149 | char __user *buf; | 149 | char *buf; |
150 | size_t len; | ||
151 | 150 | ||
152 | struct mm_struct *mm; | 151 | struct mm_struct *mm; |
153 | struct work_struct work; | 152 | struct work_struct work; |
@@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
649 | io_data->req->actual; | 648 | io_data->req->actual; |
650 | 649 | ||
651 | if (io_data->read && ret > 0) { | 650 | if (io_data->read && ret > 0) { |
652 | int i; | ||
653 | size_t pos = 0; | ||
654 | |||
655 | /* | ||
656 | * Since req->length may be bigger than io_data->len (after | ||
657 | * being rounded up to maxpacketsize), we may end up with more | ||
658 | * data then user space has space for. | ||
659 | */ | ||
660 | ret = min_t(int, ret, io_data->len); | ||
661 | |||
662 | use_mm(io_data->mm); | 651 | use_mm(io_data->mm); |
663 | for (i = 0; i < io_data->nr_segs; i++) { | 652 | ret = copy_to_iter(io_data->buf, ret, &io_data->data); |
664 | size_t len = min_t(size_t, ret - pos, | 653 | if (iov_iter_count(&io_data->data)) |
665 | io_data->iovec[i].iov_len); | 654 | ret = -EFAULT; |
666 | if (!len) | ||
667 | break; | ||
668 | if (unlikely(copy_to_user(io_data->iovec[i].iov_base, | ||
669 | &io_data->buf[pos], len))) { | ||
670 | ret = -EFAULT; | ||
671 | break; | ||
672 | } | ||
673 | pos += len; | ||
674 | } | ||
675 | unuse_mm(io_data->mm); | 655 | unuse_mm(io_data->mm); |
676 | } | 656 | } |
677 | 657 | ||
@@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) | |||
684 | 664 | ||
685 | io_data->kiocb->private = NULL; | 665 | io_data->kiocb->private = NULL; |
686 | if (io_data->read) | 666 | if (io_data->read) |
687 | kfree(io_data->iovec); | 667 | kfree(io_data->to_free); |
688 | kfree(io_data->buf); | 668 | kfree(io_data->buf); |
689 | kfree(io_data); | 669 | kfree(io_data); |
690 | } | 670 | } |
@@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
743 | * before the waiting completes, so do not assign to 'gadget' earlier | 723 | * before the waiting completes, so do not assign to 'gadget' earlier |
744 | */ | 724 | */ |
745 | struct usb_gadget *gadget = epfile->ffs->gadget; | 725 | struct usb_gadget *gadget = epfile->ffs->gadget; |
726 | size_t copied; | ||
746 | 727 | ||
747 | spin_lock_irq(&epfile->ffs->eps_lock); | 728 | spin_lock_irq(&epfile->ffs->eps_lock); |
748 | /* In the meantime, endpoint got disabled or changed. */ | 729 | /* In the meantime, endpoint got disabled or changed. */ |
@@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
750 | spin_unlock_irq(&epfile->ffs->eps_lock); | 731 | spin_unlock_irq(&epfile->ffs->eps_lock); |
751 | return -ESHUTDOWN; | 732 | return -ESHUTDOWN; |
752 | } | 733 | } |
734 | data_len = iov_iter_count(&io_data->data); | ||
753 | /* | 735 | /* |
754 | * Controller may require buffer size to be aligned to | 736 | * Controller may require buffer size to be aligned to |
755 | * maxpacketsize of an out endpoint. | 737 | * maxpacketsize of an out endpoint. |
756 | */ | 738 | */ |
757 | data_len = io_data->read ? | 739 | if (io_data->read) |
758 | usb_ep_align_maybe(gadget, ep->ep, io_data->len) : | 740 | data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); |
759 | io_data->len; | ||
760 | spin_unlock_irq(&epfile->ffs->eps_lock); | 741 | spin_unlock_irq(&epfile->ffs->eps_lock); |
761 | 742 | ||
762 | data = kmalloc(data_len, GFP_KERNEL); | 743 | data = kmalloc(data_len, GFP_KERNEL); |
763 | if (unlikely(!data)) | 744 | if (unlikely(!data)) |
764 | return -ENOMEM; | 745 | return -ENOMEM; |
765 | if (io_data->aio && !io_data->read) { | 746 | if (!io_data->read) { |
766 | int i; | 747 | copied = copy_from_iter(data, data_len, &io_data->data); |
767 | size_t pos = 0; | 748 | if (copied != data_len) { |
768 | for (i = 0; i < io_data->nr_segs; i++) { | ||
769 | if (unlikely(copy_from_user(&data[pos], | ||
770 | io_data->iovec[i].iov_base, | ||
771 | io_data->iovec[i].iov_len))) { | ||
772 | ret = -EFAULT; | ||
773 | goto error; | ||
774 | } | ||
775 | pos += io_data->iovec[i].iov_len; | ||
776 | } | ||
777 | } else { | ||
778 | if (!io_data->read && | ||
779 | unlikely(__copy_from_user(data, io_data->buf, | ||
780 | io_data->len))) { | ||
781 | ret = -EFAULT; | 749 | ret = -EFAULT; |
782 | goto error; | 750 | goto error; |
783 | } | 751 | } |
@@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) | |||
876 | */ | 844 | */ |
877 | ret = ep->status; | 845 | ret = ep->status; |
878 | if (io_data->read && ret > 0) { | 846 | if (io_data->read && ret > 0) { |
879 | ret = min_t(size_t, ret, io_data->len); | 847 | ret = copy_to_iter(data, ret, &io_data->data); |
880 | 848 | if (unlikely(iov_iter_count(&io_data->data))) | |
881 | if (unlikely(copy_to_user(io_data->buf, | ||
882 | data, ret))) | ||
883 | ret = -EFAULT; | 849 | ret = -EFAULT; |
884 | } | 850 | } |
885 | } | 851 | } |
@@ -898,37 +864,6 @@ error: | |||
898 | return ret; | 864 | return ret; |
899 | } | 865 | } |
900 | 866 | ||
901 | static ssize_t | ||
902 | ffs_epfile_write(struct file *file, const char __user *buf, size_t len, | ||
903 | loff_t *ptr) | ||
904 | { | ||
905 | struct ffs_io_data io_data; | ||
906 | |||
907 | ENTER(); | ||
908 | |||
909 | io_data.aio = false; | ||
910 | io_data.read = false; | ||
911 | io_data.buf = (char * __user)buf; | ||
912 | io_data.len = len; | ||
913 | |||
914 | return ffs_epfile_io(file, &io_data); | ||
915 | } | ||
916 | |||
917 | static ssize_t | ||
918 | ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) | ||
919 | { | ||
920 | struct ffs_io_data io_data; | ||
921 | |||
922 | ENTER(); | ||
923 | |||
924 | io_data.aio = false; | ||
925 | io_data.read = true; | ||
926 | io_data.buf = buf; | ||
927 | io_data.len = len; | ||
928 | |||
929 | return ffs_epfile_io(file, &io_data); | ||
930 | } | ||
931 | |||
932 | static int | 867 | static int |
933 | ffs_epfile_open(struct inode *inode, struct file *file) | 868 | ffs_epfile_open(struct inode *inode, struct file *file) |
934 | { | 869 | { |
@@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) | |||
965 | return value; | 900 | return value; |
966 | } | 901 | } |
967 | 902 | ||
968 | static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, | 903 | static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
969 | const struct iovec *iovec, | ||
970 | unsigned long nr_segs, loff_t loff) | ||
971 | { | 904 | { |
972 | struct ffs_io_data *io_data; | 905 | struct ffs_io_data io_data, *p = &io_data; |
906 | ssize_t res; | ||
973 | 907 | ||
974 | ENTER(); | 908 | ENTER(); |
975 | 909 | ||
976 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 910 | if (!is_sync_kiocb(kiocb)) { |
977 | if (unlikely(!io_data)) | 911 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
978 | return -ENOMEM; | 912 | if (unlikely(!p)) |
913 | return -ENOMEM; | ||
914 | p->aio = true; | ||
915 | } else { | ||
916 | p->aio = false; | ||
917 | } | ||
979 | 918 | ||
980 | io_data->aio = true; | 919 | p->read = false; |
981 | io_data->read = false; | 920 | p->kiocb = kiocb; |
982 | io_data->kiocb = kiocb; | 921 | p->data = *from; |
983 | io_data->iovec = iovec; | 922 | p->mm = current->mm; |
984 | io_data->nr_segs = nr_segs; | ||
985 | io_data->len = kiocb->ki_nbytes; | ||
986 | io_data->mm = current->mm; | ||
987 | 923 | ||
988 | kiocb->private = io_data; | 924 | kiocb->private = p; |
989 | 925 | ||
990 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 926 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
991 | 927 | ||
992 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 928 | res = ffs_epfile_io(kiocb->ki_filp, p); |
929 | if (res == -EIOCBQUEUED) | ||
930 | return res; | ||
931 | if (p->aio) | ||
932 | kfree(p); | ||
933 | else | ||
934 | *from = p->data; | ||
935 | return res; | ||
993 | } | 936 | } |
994 | 937 | ||
995 | static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, | 938 | static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) |
996 | const struct iovec *iovec, | ||
997 | unsigned long nr_segs, loff_t loff) | ||
998 | { | 939 | { |
999 | struct ffs_io_data *io_data; | 940 | struct ffs_io_data io_data, *p = &io_data; |
1000 | struct iovec *iovec_copy; | 941 | ssize_t res; |
1001 | 942 | ||
1002 | ENTER(); | 943 | ENTER(); |
1003 | 944 | ||
1004 | iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); | 945 | if (!is_sync_kiocb(kiocb)) { |
1005 | if (unlikely(!iovec_copy)) | 946 | p = kmalloc(sizeof(io_data), GFP_KERNEL); |
1006 | return -ENOMEM; | 947 | if (unlikely(!p)) |
1007 | 948 | return -ENOMEM; | |
1008 | memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); | 949 | p->aio = true; |
1009 | 950 | } else { | |
1010 | io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); | 951 | p->aio = false; |
1011 | if (unlikely(!io_data)) { | ||
1012 | kfree(iovec_copy); | ||
1013 | return -ENOMEM; | ||
1014 | } | 952 | } |
1015 | 953 | ||
1016 | io_data->aio = true; | 954 | p->read = true; |
1017 | io_data->read = true; | 955 | p->kiocb = kiocb; |
1018 | io_data->kiocb = kiocb; | 956 | if (p->aio) { |
1019 | io_data->iovec = iovec_copy; | 957 | p->to_free = dup_iter(&p->data, to, GFP_KERNEL); |
1020 | io_data->nr_segs = nr_segs; | 958 | if (!p->to_free) { |
1021 | io_data->len = kiocb->ki_nbytes; | 959 | kfree(p); |
1022 | io_data->mm = current->mm; | 960 | return -ENOMEM; |
961 | } | ||
962 | } else { | ||
963 | p->data = *to; | ||
964 | p->to_free = NULL; | ||
965 | } | ||
966 | p->mm = current->mm; | ||
1023 | 967 | ||
1024 | kiocb->private = io_data; | 968 | kiocb->private = p; |
1025 | 969 | ||
1026 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); | 970 | kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); |
1027 | 971 | ||
1028 | return ffs_epfile_io(kiocb->ki_filp, io_data); | 972 | res = ffs_epfile_io(kiocb->ki_filp, p); |
973 | if (res == -EIOCBQUEUED) | ||
974 | return res; | ||
975 | |||
976 | if (p->aio) { | ||
977 | kfree(p->to_free); | ||
978 | kfree(p); | ||
979 | } else { | ||
980 | *to = p->data; | ||
981 | } | ||
982 | return res; | ||
1029 | } | 983 | } |
1030 | 984 | ||
1031 | static int | 985 | static int |
@@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { | |||
1105 | .llseek = no_llseek, | 1059 | .llseek = no_llseek, |
1106 | 1060 | ||
1107 | .open = ffs_epfile_open, | 1061 | .open = ffs_epfile_open, |
1108 | .write = ffs_epfile_write, | 1062 | .write = new_sync_write, |
1109 | .read = ffs_epfile_read, | 1063 | .read = new_sync_read, |
1110 | .aio_write = ffs_epfile_aio_write, | 1064 | .write_iter = ffs_epfile_write_iter, |
1111 | .aio_read = ffs_epfile_aio_read, | 1065 | .read_iter = ffs_epfile_read_iter, |
1112 | .release = ffs_epfile_release, | 1066 | .release = ffs_epfile_release, |
1113 | .unlocked_ioctl = ffs_epfile_ioctl, | 1067 | .unlocked_ioctl = ffs_epfile_ioctl, |
1114 | }; | 1068 | }; |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); | |||
74 | MODULE_AUTHOR ("David Brownell"); | 74 | MODULE_AUTHOR ("David Brownell"); |
75 | MODULE_LICENSE ("GPL"); | 75 | MODULE_LICENSE ("GPL"); |
76 | 76 | ||
77 | static int ep_open(struct inode *, struct file *); | ||
78 | |||
77 | 79 | ||
78 | /*----------------------------------------------------------------------*/ | 80 | /*----------------------------------------------------------------------*/ |
79 | 81 | ||
@@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) | |||
283 | * still need dev->lock to use epdata->ep. | 285 | * still need dev->lock to use epdata->ep. |
284 | */ | 286 | */ |
285 | static int | 287 | static int |
286 | get_ready_ep (unsigned f_flags, struct ep_data *epdata) | 288 | get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) |
287 | { | 289 | { |
288 | int val; | 290 | int val; |
289 | 291 | ||
290 | if (f_flags & O_NONBLOCK) { | 292 | if (f_flags & O_NONBLOCK) { |
291 | if (!mutex_trylock(&epdata->lock)) | 293 | if (!mutex_trylock(&epdata->lock)) |
292 | goto nonblock; | 294 | goto nonblock; |
293 | if (epdata->state != STATE_EP_ENABLED) { | 295 | if (epdata->state != STATE_EP_ENABLED && |
296 | (!is_write || epdata->state != STATE_EP_READY)) { | ||
294 | mutex_unlock(&epdata->lock); | 297 | mutex_unlock(&epdata->lock); |
295 | nonblock: | 298 | nonblock: |
296 | val = -EAGAIN; | 299 | val = -EAGAIN; |
@@ -305,18 +308,20 @@ nonblock: | |||
305 | 308 | ||
306 | switch (epdata->state) { | 309 | switch (epdata->state) { |
307 | case STATE_EP_ENABLED: | 310 | case STATE_EP_ENABLED: |
311 | return 0; | ||
312 | case STATE_EP_READY: /* not configured yet */ | ||
313 | if (is_write) | ||
314 | return 0; | ||
315 | // FALLTHRU | ||
316 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
308 | break; | 317 | break; |
309 | // case STATE_EP_DISABLED: /* "can't happen" */ | 318 | // case STATE_EP_DISABLED: /* "can't happen" */ |
310 | // case STATE_EP_READY: /* "can't happen" */ | ||
311 | default: /* error! */ | 319 | default: /* error! */ |
312 | pr_debug ("%s: ep %p not available, state %d\n", | 320 | pr_debug ("%s: ep %p not available, state %d\n", |
313 | shortname, epdata, epdata->state); | 321 | shortname, epdata, epdata->state); |
314 | // FALLTHROUGH | ||
315 | case STATE_EP_UNBOUND: /* clean disconnect */ | ||
316 | val = -ENODEV; | ||
317 | mutex_unlock(&epdata->lock); | ||
318 | } | 322 | } |
319 | return val; | 323 | mutex_unlock(&epdata->lock); |
324 | return -ENODEV; | ||
320 | } | 325 | } |
321 | 326 | ||
322 | static ssize_t | 327 | static ssize_t |
@@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) | |||
363 | return value; | 368 | return value; |
364 | } | 369 | } |
365 | 370 | ||
366 | |||
367 | /* handle a synchronous OUT bulk/intr/iso transfer */ | ||
368 | static ssize_t | ||
369 | ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | ||
370 | { | ||
371 | struct ep_data *data = fd->private_data; | ||
372 | void *kbuf; | ||
373 | ssize_t value; | ||
374 | |||
375 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
376 | return value; | ||
377 | |||
378 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
379 | if (usb_endpoint_dir_in(&data->desc)) { | ||
380 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
381 | mutex_unlock(&data->lock); | ||
382 | return -EINVAL; | ||
383 | } | ||
384 | DBG (data->dev, "%s halt\n", data->name); | ||
385 | spin_lock_irq (&data->dev->lock); | ||
386 | if (likely (data->ep != NULL)) | ||
387 | usb_ep_set_halt (data->ep); | ||
388 | spin_unlock_irq (&data->dev->lock); | ||
389 | mutex_unlock(&data->lock); | ||
390 | return -EBADMSG; | ||
391 | } | ||
392 | |||
393 | /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ | ||
394 | |||
395 | value = -ENOMEM; | ||
396 | kbuf = kmalloc (len, GFP_KERNEL); | ||
397 | if (unlikely (!kbuf)) | ||
398 | goto free1; | ||
399 | |||
400 | value = ep_io (data, kbuf, len); | ||
401 | VDEBUG (data->dev, "%s read %zu OUT, status %d\n", | ||
402 | data->name, len, (int) value); | ||
403 | if (value >= 0 && copy_to_user (buf, kbuf, value)) | ||
404 | value = -EFAULT; | ||
405 | |||
406 | free1: | ||
407 | mutex_unlock(&data->lock); | ||
408 | kfree (kbuf); | ||
409 | return value; | ||
410 | } | ||
411 | |||
412 | /* handle a synchronous IN bulk/intr/iso transfer */ | ||
413 | static ssize_t | ||
414 | ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | ||
415 | { | ||
416 | struct ep_data *data = fd->private_data; | ||
417 | void *kbuf; | ||
418 | ssize_t value; | ||
419 | |||
420 | if ((value = get_ready_ep (fd->f_flags, data)) < 0) | ||
421 | return value; | ||
422 | |||
423 | /* halt any endpoint by doing a "wrong direction" i/o call */ | ||
424 | if (!usb_endpoint_dir_in(&data->desc)) { | ||
425 | if (usb_endpoint_xfer_isoc(&data->desc)) { | ||
426 | mutex_unlock(&data->lock); | ||
427 | return -EINVAL; | ||
428 | } | ||
429 | DBG (data->dev, "%s halt\n", data->name); | ||
430 | spin_lock_irq (&data->dev->lock); | ||
431 | if (likely (data->ep != NULL)) | ||
432 | usb_ep_set_halt (data->ep); | ||
433 | spin_unlock_irq (&data->dev->lock); | ||
434 | mutex_unlock(&data->lock); | ||
435 | return -EBADMSG; | ||
436 | } | ||
437 | |||
438 | /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ | ||
439 | |||
440 | value = -ENOMEM; | ||
441 | kbuf = memdup_user(buf, len); | ||
442 | if (IS_ERR(kbuf)) { | ||
443 | value = PTR_ERR(kbuf); | ||
444 | kbuf = NULL; | ||
445 | goto free1; | ||
446 | } | ||
447 | |||
448 | value = ep_io (data, kbuf, len); | ||
449 | VDEBUG (data->dev, "%s write %zu IN, status %d\n", | ||
450 | data->name, len, (int) value); | ||
451 | free1: | ||
452 | mutex_unlock(&data->lock); | ||
453 | kfree (kbuf); | ||
454 | return value; | ||
455 | } | ||
456 | |||
457 | static int | 371 | static int |
458 | ep_release (struct inode *inode, struct file *fd) | 372 | ep_release (struct inode *inode, struct file *fd) |
459 | { | 373 | { |
@@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) | |||
481 | struct ep_data *data = fd->private_data; | 395 | struct ep_data *data = fd->private_data; |
482 | int status; | 396 | int status; |
483 | 397 | ||
484 | if ((status = get_ready_ep (fd->f_flags, data)) < 0) | 398 | if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) |
485 | return status; | 399 | return status; |
486 | 400 | ||
487 | spin_lock_irq (&data->dev->lock); | 401 | spin_lock_irq (&data->dev->lock); |
@@ -517,8 +431,8 @@ struct kiocb_priv { | |||
517 | struct mm_struct *mm; | 431 | struct mm_struct *mm; |
518 | struct work_struct work; | 432 | struct work_struct work; |
519 | void *buf; | 433 | void *buf; |
520 | const struct iovec *iv; | 434 | struct iov_iter to; |
521 | unsigned long nr_segs; | 435 | const void *to_free; |
522 | unsigned actual; | 436 | unsigned actual; |
523 | }; | 437 | }; |
524 | 438 | ||
@@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) | |||
541 | return value; | 455 | return value; |
542 | } | 456 | } |
543 | 457 | ||
544 | static ssize_t ep_copy_to_user(struct kiocb_priv *priv) | ||
545 | { | ||
546 | ssize_t len, total; | ||
547 | void *to_copy; | ||
548 | int i; | ||
549 | |||
550 | /* copy stuff into user buffers */ | ||
551 | total = priv->actual; | ||
552 | len = 0; | ||
553 | to_copy = priv->buf; | ||
554 | for (i=0; i < priv->nr_segs; i++) { | ||
555 | ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); | ||
556 | |||
557 | if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { | ||
558 | if (len == 0) | ||
559 | len = -EFAULT; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | total -= this; | ||
564 | len += this; | ||
565 | to_copy += this; | ||
566 | if (total == 0) | ||
567 | break; | ||
568 | } | ||
569 | |||
570 | return len; | ||
571 | } | ||
572 | |||
573 | static void ep_user_copy_worker(struct work_struct *work) | 458 | static void ep_user_copy_worker(struct work_struct *work) |
574 | { | 459 | { |
575 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); | 460 | struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); |
@@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) | |||
578 | size_t ret; | 463 | size_t ret; |
579 | 464 | ||
580 | use_mm(mm); | 465 | use_mm(mm); |
581 | ret = ep_copy_to_user(priv); | 466 | ret = copy_to_iter(priv->buf, priv->actual, &priv->to); |
582 | unuse_mm(mm); | 467 | unuse_mm(mm); |
468 | if (!ret) | ||
469 | ret = -EFAULT; | ||
583 | 470 | ||
584 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ | 471 | /* completing the iocb can drop the ctx and mm, don't touch mm after */ |
585 | aio_complete(iocb, ret, ret); | 472 | aio_complete(iocb, ret, ret); |
586 | 473 | ||
587 | kfree(priv->buf); | 474 | kfree(priv->buf); |
475 | kfree(priv->to_free); | ||
588 | kfree(priv); | 476 | kfree(priv); |
589 | } | 477 | } |
590 | 478 | ||
@@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
603 | * don't need to copy anything to userspace, so we can | 491 | * don't need to copy anything to userspace, so we can |
604 | * complete the aio request immediately. | 492 | * complete the aio request immediately. |
605 | */ | 493 | */ |
606 | if (priv->iv == NULL || unlikely(req->actual == 0)) { | 494 | if (priv->to_free == NULL || unlikely(req->actual == 0)) { |
607 | kfree(req->buf); | 495 | kfree(req->buf); |
496 | kfree(priv->to_free); | ||
608 | kfree(priv); | 497 | kfree(priv); |
609 | iocb->private = NULL; | 498 | iocb->private = NULL; |
610 | /* aio_complete() reports bytes-transferred _and_ faults */ | 499 | /* aio_complete() reports bytes-transferred _and_ faults */ |
@@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
618 | 507 | ||
619 | priv->buf = req->buf; | 508 | priv->buf = req->buf; |
620 | priv->actual = req->actual; | 509 | priv->actual = req->actual; |
510 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
621 | schedule_work(&priv->work); | 511 | schedule_work(&priv->work); |
622 | } | 512 | } |
623 | spin_unlock(&epdata->dev->lock); | 513 | spin_unlock(&epdata->dev->lock); |
@@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) | |||
626 | put_ep(epdata); | 516 | put_ep(epdata); |
627 | } | 517 | } |
628 | 518 | ||
629 | static ssize_t | 519 | static ssize_t ep_aio(struct kiocb *iocb, |
630 | ep_aio_rwtail( | 520 | struct kiocb_priv *priv, |
631 | struct kiocb *iocb, | 521 | struct ep_data *epdata, |
632 | char *buf, | 522 | char *buf, |
633 | size_t len, | 523 | size_t len) |
634 | struct ep_data *epdata, | ||
635 | const struct iovec *iv, | ||
636 | unsigned long nr_segs | ||
637 | ) | ||
638 | { | 524 | { |
639 | struct kiocb_priv *priv; | 525 | struct usb_request *req; |
640 | struct usb_request *req; | 526 | ssize_t value; |
641 | ssize_t value; | ||
642 | 527 | ||
643 | priv = kmalloc(sizeof *priv, GFP_KERNEL); | ||
644 | if (!priv) { | ||
645 | value = -ENOMEM; | ||
646 | fail: | ||
647 | kfree(buf); | ||
648 | return value; | ||
649 | } | ||
650 | iocb->private = priv; | 528 | iocb->private = priv; |
651 | priv->iocb = iocb; | 529 | priv->iocb = iocb; |
652 | priv->iv = iv; | ||
653 | priv->nr_segs = nr_segs; | ||
654 | INIT_WORK(&priv->work, ep_user_copy_worker); | ||
655 | |||
656 | value = get_ready_ep(iocb->ki_filp->f_flags, epdata); | ||
657 | if (unlikely(value < 0)) { | ||
658 | kfree(priv); | ||
659 | goto fail; | ||
660 | } | ||
661 | 530 | ||
662 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); | 531 | kiocb_set_cancel_fn(iocb, ep_aio_cancel); |
663 | get_ep(epdata); | 532 | get_ep(epdata); |
@@ -669,75 +538,154 @@ fail: | |||
669 | * allocate or submit those if the host disconnected. | 538 | * allocate or submit those if the host disconnected. |
670 | */ | 539 | */ |
671 | spin_lock_irq(&epdata->dev->lock); | 540 | spin_lock_irq(&epdata->dev->lock); |
672 | if (likely(epdata->ep)) { | 541 | value = -ENODEV; |
673 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 542 | if (unlikely(epdata->ep)) |
674 | if (likely(req)) { | 543 | goto fail; |
675 | priv->req = req; | ||
676 | req->buf = buf; | ||
677 | req->length = len; | ||
678 | req->complete = ep_aio_complete; | ||
679 | req->context = iocb; | ||
680 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
681 | if (unlikely(0 != value)) | ||
682 | usb_ep_free_request(epdata->ep, req); | ||
683 | } else | ||
684 | value = -EAGAIN; | ||
685 | } else | ||
686 | value = -ENODEV; | ||
687 | spin_unlock_irq(&epdata->dev->lock); | ||
688 | 544 | ||
689 | mutex_unlock(&epdata->lock); | 545 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
546 | value = -ENOMEM; | ||
547 | if (unlikely(!req)) | ||
548 | goto fail; | ||
690 | 549 | ||
691 | if (unlikely(value)) { | 550 | priv->req = req; |
692 | kfree(priv); | 551 | req->buf = buf; |
693 | put_ep(epdata); | 552 | req->length = len; |
694 | } else | 553 | req->complete = ep_aio_complete; |
695 | value = -EIOCBQUEUED; | 554 | req->context = iocb; |
555 | value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); | ||
556 | if (unlikely(0 != value)) { | ||
557 | usb_ep_free_request(epdata->ep, req); | ||
558 | goto fail; | ||
559 | } | ||
560 | spin_unlock_irq(&epdata->dev->lock); | ||
561 | return -EIOCBQUEUED; | ||
562 | |||
563 | fail: | ||
564 | spin_unlock_irq(&epdata->dev->lock); | ||
565 | kfree(priv->to_free); | ||
566 | kfree(priv); | ||
567 | put_ep(epdata); | ||
696 | return value; | 568 | return value; |
697 | } | 569 | } |
698 | 570 | ||
699 | static ssize_t | 571 | static ssize_t |
700 | ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | 572 | ep_read_iter(struct kiocb *iocb, struct iov_iter *to) |
701 | unsigned long nr_segs, loff_t o) | ||
702 | { | 573 | { |
703 | struct ep_data *epdata = iocb->ki_filp->private_data; | 574 | struct file *file = iocb->ki_filp; |
704 | char *buf; | 575 | struct ep_data *epdata = file->private_data; |
576 | size_t len = iov_iter_count(to); | ||
577 | ssize_t value; | ||
578 | char *buf; | ||
705 | 579 | ||
706 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 580 | if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) |
707 | return -EINVAL; | 581 | return value; |
708 | 582 | ||
709 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 583 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
710 | if (unlikely(!buf)) | 584 | if (usb_endpoint_dir_in(&epdata->desc)) { |
711 | return -ENOMEM; | 585 | if (usb_endpoint_xfer_isoc(&epdata->desc) || |
586 | !is_sync_kiocb(iocb)) { | ||
587 | mutex_unlock(&epdata->lock); | ||
588 | return -EINVAL; | ||
589 | } | ||
590 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
591 | spin_lock_irq(&epdata->dev->lock); | ||
592 | if (likely(epdata->ep != NULL)) | ||
593 | usb_ep_set_halt(epdata->ep); | ||
594 | spin_unlock_irq(&epdata->dev->lock); | ||
595 | mutex_unlock(&epdata->lock); | ||
596 | return -EBADMSG; | ||
597 | } | ||
712 | 598 | ||
713 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); | 599 | buf = kmalloc(len, GFP_KERNEL); |
600 | if (unlikely(!buf)) { | ||
601 | mutex_unlock(&epdata->lock); | ||
602 | return -ENOMEM; | ||
603 | } | ||
604 | if (is_sync_kiocb(iocb)) { | ||
605 | value = ep_io(epdata, buf, len); | ||
606 | if (value >= 0 && copy_to_iter(buf, value, to)) | ||
607 | value = -EFAULT; | ||
608 | } else { | ||
609 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
610 | value = -ENOMEM; | ||
611 | if (!priv) | ||
612 | goto fail; | ||
613 | priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); | ||
614 | if (!priv->to_free) { | ||
615 | kfree(priv); | ||
616 | goto fail; | ||
617 | } | ||
618 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
619 | if (value == -EIOCBQUEUED) | ||
620 | buf = NULL; | ||
621 | } | ||
622 | fail: | ||
623 | kfree(buf); | ||
624 | mutex_unlock(&epdata->lock); | ||
625 | return value; | ||
714 | } | 626 | } |
715 | 627 | ||
628 | static ssize_t ep_config(struct ep_data *, const char *, size_t); | ||
629 | |||
716 | static ssize_t | 630 | static ssize_t |
717 | ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | 631 | ep_write_iter(struct kiocb *iocb, struct iov_iter *from) |
718 | unsigned long nr_segs, loff_t o) | ||
719 | { | 632 | { |
720 | struct ep_data *epdata = iocb->ki_filp->private_data; | 633 | struct file *file = iocb->ki_filp; |
721 | char *buf; | 634 | struct ep_data *epdata = file->private_data; |
722 | size_t len = 0; | 635 | size_t len = iov_iter_count(from); |
723 | int i = 0; | 636 | bool configured; |
637 | ssize_t value; | ||
638 | char *buf; | ||
639 | |||
640 | if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) | ||
641 | return value; | ||
724 | 642 | ||
725 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 643 | configured = epdata->state == STATE_EP_ENABLED; |
726 | return -EINVAL; | ||
727 | 644 | ||
728 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); | 645 | /* halt any endpoint by doing a "wrong direction" i/o call */ |
729 | if (unlikely(!buf)) | 646 | if (configured && !usb_endpoint_dir_in(&epdata->desc)) { |
647 | if (usb_endpoint_xfer_isoc(&epdata->desc) || | ||
648 | !is_sync_kiocb(iocb)) { | ||
649 | mutex_unlock(&epdata->lock); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | DBG (epdata->dev, "%s halt\n", epdata->name); | ||
653 | spin_lock_irq(&epdata->dev->lock); | ||
654 | if (likely(epdata->ep != NULL)) | ||
655 | usb_ep_set_halt(epdata->ep); | ||
656 | spin_unlock_irq(&epdata->dev->lock); | ||
657 | mutex_unlock(&epdata->lock); | ||
658 | return -EBADMSG; | ||
659 | } | ||
660 | |||
661 | buf = kmalloc(len, GFP_KERNEL); | ||
662 | if (unlikely(!buf)) { | ||
663 | mutex_unlock(&epdata->lock); | ||
730 | return -ENOMEM; | 664 | return -ENOMEM; |
665 | } | ||
731 | 666 | ||
732 | for (i=0; i < nr_segs; i++) { | 667 | if (unlikely(copy_from_iter(buf, len, from) != len)) { |
733 | if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, | 668 | value = -EFAULT; |
734 | iov[i].iov_len) != 0)) { | 669 | goto out; |
735 | kfree(buf); | 670 | } |
736 | return -EFAULT; | 671 | |
672 | if (unlikely(!configured)) { | ||
673 | value = ep_config(epdata, buf, len); | ||
674 | } else if (is_sync_kiocb(iocb)) { | ||
675 | value = ep_io(epdata, buf, len); | ||
676 | } else { | ||
677 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | ||
678 | value = -ENOMEM; | ||
679 | if (priv) { | ||
680 | value = ep_aio(iocb, priv, epdata, buf, len); | ||
681 | if (value == -EIOCBQUEUED) | ||
682 | buf = NULL; | ||
737 | } | 683 | } |
738 | len += iov[i].iov_len; | ||
739 | } | 684 | } |
740 | return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); | 685 | out: |
686 | kfree(buf); | ||
687 | mutex_unlock(&epdata->lock); | ||
688 | return value; | ||
741 | } | 689 | } |
742 | 690 | ||
743 | /*----------------------------------------------------------------------*/ | 691 | /*----------------------------------------------------------------------*/ |
@@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
745 | /* used after endpoint configuration */ | 693 | /* used after endpoint configuration */ |
746 | static const struct file_operations ep_io_operations = { | 694 | static const struct file_operations ep_io_operations = { |
747 | .owner = THIS_MODULE, | 695 | .owner = THIS_MODULE, |
748 | .llseek = no_llseek, | ||
749 | 696 | ||
750 | .read = ep_read, | 697 | .open = ep_open, |
751 | .write = ep_write, | ||
752 | .unlocked_ioctl = ep_ioctl, | ||
753 | .release = ep_release, | 698 | .release = ep_release, |
754 | 699 | .llseek = no_llseek, | |
755 | .aio_read = ep_aio_read, | 700 | .read = new_sync_read, |
756 | .aio_write = ep_aio_write, | 701 | .write = new_sync_write, |
702 | .unlocked_ioctl = ep_ioctl, | ||
703 | .read_iter = ep_read_iter, | ||
704 | .write_iter = ep_write_iter, | ||
757 | }; | 705 | }; |
758 | 706 | ||
759 | /* ENDPOINT INITIALIZATION | 707 | /* ENDPOINT INITIALIZATION |
@@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { | |||
770 | * speed descriptor, then optional high speed descriptor. | 718 | * speed descriptor, then optional high speed descriptor. |
771 | */ | 719 | */ |
772 | static ssize_t | 720 | static ssize_t |
773 | ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | 721 | ep_config (struct ep_data *data, const char *buf, size_t len) |
774 | { | 722 | { |
775 | struct ep_data *data = fd->private_data; | ||
776 | struct usb_ep *ep; | 723 | struct usb_ep *ep; |
777 | u32 tag; | 724 | u32 tag; |
778 | int value, length = len; | 725 | int value, length = len; |
779 | 726 | ||
780 | value = mutex_lock_interruptible(&data->lock); | ||
781 | if (value < 0) | ||
782 | return value; | ||
783 | |||
784 | if (data->state != STATE_EP_READY) { | 727 | if (data->state != STATE_EP_READY) { |
785 | value = -EL2HLT; | 728 | value = -EL2HLT; |
786 | goto fail; | 729 | goto fail; |
@@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
791 | goto fail0; | 734 | goto fail0; |
792 | 735 | ||
793 | /* we might need to change message format someday */ | 736 | /* we might need to change message format someday */ |
794 | if (copy_from_user (&tag, buf, 4)) { | 737 | memcpy(&tag, buf, 4); |
795 | goto fail1; | ||
796 | } | ||
797 | if (tag != 1) { | 738 | if (tag != 1) { |
798 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); | 739 | DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); |
799 | goto fail0; | 740 | goto fail0; |
@@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
806 | */ | 747 | */ |
807 | 748 | ||
808 | /* full/low speed descriptor, then high speed */ | 749 | /* full/low speed descriptor, then high speed */ |
809 | if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { | 750 | memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); |
810 | goto fail1; | ||
811 | } | ||
812 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE | 751 | if (data->desc.bLength != USB_DT_ENDPOINT_SIZE |
813 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) | 752 | || data->desc.bDescriptorType != USB_DT_ENDPOINT) |
814 | goto fail0; | 753 | goto fail0; |
815 | if (len != USB_DT_ENDPOINT_SIZE) { | 754 | if (len != USB_DT_ENDPOINT_SIZE) { |
816 | if (len != 2 * USB_DT_ENDPOINT_SIZE) | 755 | if (len != 2 * USB_DT_ENDPOINT_SIZE) |
817 | goto fail0; | 756 | goto fail0; |
818 | if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, | 757 | memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, |
819 | USB_DT_ENDPOINT_SIZE)) { | 758 | USB_DT_ENDPOINT_SIZE); |
820 | goto fail1; | ||
821 | } | ||
822 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE | 759 | if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE |
823 | || data->hs_desc.bDescriptorType | 760 | || data->hs_desc.bDescriptorType |
824 | != USB_DT_ENDPOINT) { | 761 | != USB_DT_ENDPOINT) { |
@@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
840 | case USB_SPEED_LOW: | 777 | case USB_SPEED_LOW: |
841 | case USB_SPEED_FULL: | 778 | case USB_SPEED_FULL: |
842 | ep->desc = &data->desc; | 779 | ep->desc = &data->desc; |
843 | value = usb_ep_enable(ep); | ||
844 | if (value == 0) | ||
845 | data->state = STATE_EP_ENABLED; | ||
846 | break; | 780 | break; |
847 | case USB_SPEED_HIGH: | 781 | case USB_SPEED_HIGH: |
848 | /* fails if caller didn't provide that descriptor... */ | 782 | /* fails if caller didn't provide that descriptor... */ |
849 | ep->desc = &data->hs_desc; | 783 | ep->desc = &data->hs_desc; |
850 | value = usb_ep_enable(ep); | ||
851 | if (value == 0) | ||
852 | data->state = STATE_EP_ENABLED; | ||
853 | break; | 784 | break; |
854 | default: | 785 | default: |
855 | DBG(data->dev, "unconnected, %s init abandoned\n", | 786 | DBG(data->dev, "unconnected, %s init abandoned\n", |
856 | data->name); | 787 | data->name); |
857 | value = -EINVAL; | 788 | value = -EINVAL; |
789 | goto gone; | ||
858 | } | 790 | } |
791 | value = usb_ep_enable(ep); | ||
859 | if (value == 0) { | 792 | if (value == 0) { |
860 | fd->f_op = &ep_io_operations; | 793 | data->state = STATE_EP_ENABLED; |
861 | value = length; | 794 | value = length; |
862 | } | 795 | } |
863 | gone: | 796 | gone: |
@@ -867,14 +800,10 @@ fail: | |||
867 | data->desc.bDescriptorType = 0; | 800 | data->desc.bDescriptorType = 0; |
868 | data->hs_desc.bDescriptorType = 0; | 801 | data->hs_desc.bDescriptorType = 0; |
869 | } | 802 | } |
870 | mutex_unlock(&data->lock); | ||
871 | return value; | 803 | return value; |
872 | fail0: | 804 | fail0: |
873 | value = -EINVAL; | 805 | value = -EINVAL; |
874 | goto fail; | 806 | goto fail; |
875 | fail1: | ||
876 | value = -EFAULT; | ||
877 | goto fail; | ||
878 | } | 807 | } |
879 | 808 | ||
880 | static int | 809 | static int |
@@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) | |||
902 | return value; | 831 | return value; |
903 | } | 832 | } |
904 | 833 | ||
905 | /* used before endpoint configuration */ | ||
906 | static const struct file_operations ep_config_operations = { | ||
907 | .llseek = no_llseek, | ||
908 | |||
909 | .open = ep_open, | ||
910 | .write = ep_config, | ||
911 | .release = ep_release, | ||
912 | }; | ||
913 | |||
914 | /*----------------------------------------------------------------------*/ | 834 | /*----------------------------------------------------------------------*/ |
915 | 835 | ||
916 | /* EP0 IMPLEMENTATION can be partly in userspace. | 836 | /* EP0 IMPLEMENTATION can be partly in userspace. |
@@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
989 | enum ep0_state state; | 909 | enum ep0_state state; |
990 | 910 | ||
991 | spin_lock_irq (&dev->lock); | 911 | spin_lock_irq (&dev->lock); |
912 | if (dev->state <= STATE_DEV_OPENED) { | ||
913 | retval = -EINVAL; | ||
914 | goto done; | ||
915 | } | ||
992 | 916 | ||
993 | /* report fd mode change before acting on it */ | 917 | /* report fd mode change before acting on it */ |
994 | if (dev->setup_abort) { | 918 | if (dev->setup_abort) { |
@@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1187 | struct dev_data *dev = fd->private_data; | 1111 | struct dev_data *dev = fd->private_data; |
1188 | ssize_t retval = -ESRCH; | 1112 | ssize_t retval = -ESRCH; |
1189 | 1113 | ||
1190 | spin_lock_irq (&dev->lock); | ||
1191 | |||
1192 | /* report fd mode change before acting on it */ | 1114 | /* report fd mode change before acting on it */ |
1193 | if (dev->setup_abort) { | 1115 | if (dev->setup_abort) { |
1194 | dev->setup_abort = 0; | 1116 | dev->setup_abort = 0; |
@@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1234 | } else | 1156 | } else |
1235 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); | 1157 | DBG (dev, "fail %s, state %d\n", __func__, dev->state); |
1236 | 1158 | ||
1237 | spin_unlock_irq (&dev->lock); | ||
1238 | return retval; | 1159 | return retval; |
1239 | } | 1160 | } |
1240 | 1161 | ||
@@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) | |||
1281 | struct dev_data *dev = fd->private_data; | 1202 | struct dev_data *dev = fd->private_data; |
1282 | int mask = 0; | 1203 | int mask = 0; |
1283 | 1204 | ||
1205 | if (dev->state <= STATE_DEV_OPENED) | ||
1206 | return DEFAULT_POLLMASK; | ||
1207 | |||
1284 | poll_wait(fd, &dev->wait, wait); | 1208 | poll_wait(fd, &dev->wait, wait); |
1285 | 1209 | ||
1286 | spin_lock_irq (&dev->lock); | 1210 | spin_lock_irq (&dev->lock); |
@@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) | |||
1316 | return ret; | 1240 | return ret; |
1317 | } | 1241 | } |
1318 | 1242 | ||
1319 | /* used after device configuration */ | ||
1320 | static const struct file_operations ep0_io_operations = { | ||
1321 | .owner = THIS_MODULE, | ||
1322 | .llseek = no_llseek, | ||
1323 | |||
1324 | .read = ep0_read, | ||
1325 | .write = ep0_write, | ||
1326 | .fasync = ep0_fasync, | ||
1327 | .poll = ep0_poll, | ||
1328 | .unlocked_ioctl = dev_ioctl, | ||
1329 | .release = dev_release, | ||
1330 | }; | ||
1331 | |||
1332 | /*----------------------------------------------------------------------*/ | 1243 | /*----------------------------------------------------------------------*/ |
1333 | 1244 | ||
1334 | /* The in-kernel gadget driver handles most ep0 issues, in particular | 1245 | /* The in-kernel gadget driver handles most ep0 issues, in particular |
@@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) | |||
1650 | goto enomem1; | 1561 | goto enomem1; |
1651 | 1562 | ||
1652 | data->dentry = gadgetfs_create_file (dev->sb, data->name, | 1563 | data->dentry = gadgetfs_create_file (dev->sb, data->name, |
1653 | data, &ep_config_operations); | 1564 | data, &ep_io_operations); |
1654 | if (!data->dentry) | 1565 | if (!data->dentry) |
1655 | goto enomem2; | 1566 | goto enomem2; |
1656 | list_add_tail (&data->epfiles, &dev->epfiles); | 1567 | list_add_tail (&data->epfiles, &dev->epfiles); |
@@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1852 | u32 tag; | 1763 | u32 tag; |
1853 | char *kbuf; | 1764 | char *kbuf; |
1854 | 1765 | ||
1766 | spin_lock_irq(&dev->lock); | ||
1767 | if (dev->state > STATE_DEV_OPENED) { | ||
1768 | value = ep0_write(fd, buf, len, ptr); | ||
1769 | spin_unlock_irq(&dev->lock); | ||
1770 | return value; | ||
1771 | } | ||
1772 | spin_unlock_irq(&dev->lock); | ||
1773 | |||
1855 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) | 1774 | if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) |
1856 | return -EINVAL; | 1775 | return -EINVAL; |
1857 | 1776 | ||
@@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) | |||
1925 | * on, they can work ... except in cleanup paths that | 1844 | * on, they can work ... except in cleanup paths that |
1926 | * kick in after the ep0 descriptor is closed. | 1845 | * kick in after the ep0 descriptor is closed. |
1927 | */ | 1846 | */ |
1928 | fd->f_op = &ep0_io_operations; | ||
1929 | value = len; | 1847 | value = len; |
1930 | } | 1848 | } |
1931 | return value; | 1849 | return value; |
@@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) | |||
1956 | return value; | 1874 | return value; |
1957 | } | 1875 | } |
1958 | 1876 | ||
1959 | static const struct file_operations dev_init_operations = { | 1877 | static const struct file_operations ep0_operations = { |
1960 | .llseek = no_llseek, | 1878 | .llseek = no_llseek, |
1961 | 1879 | ||
1962 | .open = dev_open, | 1880 | .open = dev_open, |
1881 | .read = ep0_read, | ||
1963 | .write = dev_config, | 1882 | .write = dev_config, |
1964 | .fasync = ep0_fasync, | 1883 | .fasync = ep0_fasync, |
1884 | .poll = ep0_poll, | ||
1965 | .unlocked_ioctl = dev_ioctl, | 1885 | .unlocked_ioctl = dev_ioctl, |
1966 | .release = dev_release, | 1886 | .release = dev_release, |
1967 | }; | 1887 | }; |
@@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) | |||
2077 | goto Enomem; | 1997 | goto Enomem; |
2078 | 1998 | ||
2079 | dev->sb = sb; | 1999 | dev->sb = sb; |
2080 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); | 2000 | dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); |
2081 | if (!dev->dentry) { | 2001 | if (!dev->dentry) { |
2082 | put_dev(dev); | 2002 | put_dev(dev); |
2083 | goto Enomem; | 2003 | goto Enomem; |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, | |||
868 | func = vfio_pci_set_err_trigger; | 868 | func = vfio_pci_set_err_trigger; |
869 | break; | 869 | break; |
870 | } | 870 | } |
871 | break; | ||
871 | case VFIO_PCI_REQ_IRQ_INDEX: | 872 | case VFIO_PCI_REQ_IRQ_INDEX: |
872 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { | 873 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { |
873 | case VFIO_IRQ_SET_ACTION_TRIGGER: | 874 | case VFIO_IRQ_SET_ACTION_TRIGGER: |
874 | func = vfio_pci_set_req_trigger; | 875 | func = vfio_pci_set_req_trigger; |
875 | break; | 876 | break; |
876 | } | 877 | } |
878 | break; | ||
877 | } | 879 | } |
878 | 880 | ||
879 | if (!func) | 881 | if (!func) |
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c | |||
@@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, | |||
599 | 599 | ||
600 | len = clcdfb_snprintf_mode(NULL, 0, mode); | 600 | len = clcdfb_snprintf_mode(NULL, 0, mode); |
601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); | 601 | name = devm_kzalloc(dev, len + 1, GFP_KERNEL); |
602 | if (!name) | ||
603 | return -ENOMEM; | ||
604 | |||
602 | clcdfb_snprintf_mode(name, len + 1, mode); | 605 | clcdfb_snprintf_mode(name, len + 1, mode); |
603 | mode->name = name; | 606 | mode->name = name; |
604 | 607 | ||
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c | |||
@@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
624 | int num = 0, i, first = 1; | 624 | int num = 0, i, first = 1; |
625 | int ver, rev; | 625 | int ver, rev; |
626 | 626 | ||
627 | ver = edid[EDID_STRUCT_VERSION]; | ||
628 | rev = edid[EDID_STRUCT_REVISION]; | ||
629 | |||
630 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); | 627 | mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); |
631 | if (mode == NULL) | 628 | if (mode == NULL) |
632 | return NULL; | 629 | return NULL; |
@@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, | |||
637 | return NULL; | 634 | return NULL; |
638 | } | 635 | } |
639 | 636 | ||
637 | ver = edid[EDID_STRUCT_VERSION]; | ||
638 | rev = edid[EDID_STRUCT_REVISION]; | ||
639 | |||
640 | *dbsize = 0; | 640 | *dbsize = 0; |
641 | 641 | ||
642 | DPRINTK(" Detailed Timings\n"); | 642 | DPRINTK(" Detailed Timings\n"); |
diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c | |||
@@ -28,44 +28,22 @@ | |||
28 | #include <video/omapdss.h> | 28 | #include <video/omapdss.h> |
29 | #include "dss.h" | 29 | #include "dss.h" |
30 | 30 | ||
31 | static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) | 31 | static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) |
32 | { | 32 | { |
33 | struct omap_dss_device *dssdev = NULL; | ||
34 | |||
35 | for_each_dss_dev(dssdev) { | ||
36 | if (dssdev->dev == dev) { | ||
37 | omap_dss_put_device(dssdev); | ||
38 | return dssdev; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | return NULL; | ||
43 | } | ||
44 | |||
45 | static ssize_t display_name_show(struct device *dev, | ||
46 | struct device_attribute *attr, char *buf) | ||
47 | { | ||
48 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
49 | |||
50 | return snprintf(buf, PAGE_SIZE, "%s\n", | 33 | return snprintf(buf, PAGE_SIZE, "%s\n", |
51 | dssdev->name ? | 34 | dssdev->name ? |
52 | dssdev->name : ""); | 35 | dssdev->name : ""); |
53 | } | 36 | } |
54 | 37 | ||
55 | static ssize_t display_enabled_show(struct device *dev, | 38 | static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) |
56 | struct device_attribute *attr, char *buf) | ||
57 | { | 39 | { |
58 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
59 | |||
60 | return snprintf(buf, PAGE_SIZE, "%d\n", | 40 | return snprintf(buf, PAGE_SIZE, "%d\n", |
61 | omapdss_device_is_enabled(dssdev)); | 41 | omapdss_device_is_enabled(dssdev)); |
62 | } | 42 | } |
63 | 43 | ||
64 | static ssize_t display_enabled_store(struct device *dev, | 44 | static ssize_t display_enabled_store(struct omap_dss_device *dssdev, |
65 | struct device_attribute *attr, | ||
66 | const char *buf, size_t size) | 45 | const char *buf, size_t size) |
67 | { | 46 | { |
68 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
69 | int r; | 47 | int r; |
70 | bool enable; | 48 | bool enable; |
71 | 49 | ||
@@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, | |||
90 | return size; | 68 | return size; |
91 | } | 69 | } |
92 | 70 | ||
93 | static ssize_t display_tear_show(struct device *dev, | 71 | static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) |
94 | struct device_attribute *attr, char *buf) | ||
95 | { | 72 | { |
96 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
97 | return snprintf(buf, PAGE_SIZE, "%d\n", | 73 | return snprintf(buf, PAGE_SIZE, "%d\n", |
98 | dssdev->driver->get_te ? | 74 | dssdev->driver->get_te ? |
99 | dssdev->driver->get_te(dssdev) : 0); | 75 | dssdev->driver->get_te(dssdev) : 0); |
100 | } | 76 | } |
101 | 77 | ||
102 | static ssize_t display_tear_store(struct device *dev, | 78 | static ssize_t display_tear_store(struct omap_dss_device *dssdev, |
103 | struct device_attribute *attr, const char *buf, size_t size) | 79 | const char *buf, size_t size) |
104 | { | 80 | { |
105 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
106 | int r; | 81 | int r; |
107 | bool te; | 82 | bool te; |
108 | 83 | ||
@@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, | |||
120 | return size; | 95 | return size; |
121 | } | 96 | } |
122 | 97 | ||
123 | static ssize_t display_timings_show(struct device *dev, | 98 | static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) |
124 | struct device_attribute *attr, char *buf) | ||
125 | { | 99 | { |
126 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
127 | struct omap_video_timings t; | 100 | struct omap_video_timings t; |
128 | 101 | ||
129 | if (!dssdev->driver->get_timings) | 102 | if (!dssdev->driver->get_timings) |
@@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, | |||
137 | t.y_res, t.vfp, t.vbp, t.vsw); | 110 | t.y_res, t.vfp, t.vbp, t.vsw); |
138 | } | 111 | } |
139 | 112 | ||
140 | static ssize_t display_timings_store(struct device *dev, | 113 | static ssize_t display_timings_store(struct omap_dss_device *dssdev, |
141 | struct device_attribute *attr, const char *buf, size_t size) | 114 | const char *buf, size_t size) |
142 | { | 115 | { |
143 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
144 | struct omap_video_timings t = dssdev->panel.timings; | 116 | struct omap_video_timings t = dssdev->panel.timings; |
145 | int r, found; | 117 | int r, found; |
146 | 118 | ||
@@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, | |||
176 | return size; | 148 | return size; |
177 | } | 149 | } |
178 | 150 | ||
179 | static ssize_t display_rotate_show(struct device *dev, | 151 | static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) |
180 | struct device_attribute *attr, char *buf) | ||
181 | { | 152 | { |
182 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
183 | int rotate; | 153 | int rotate; |
184 | if (!dssdev->driver->get_rotate) | 154 | if (!dssdev->driver->get_rotate) |
185 | return -ENOENT; | 155 | return -ENOENT; |
@@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, | |||
187 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); | 157 | return snprintf(buf, PAGE_SIZE, "%u\n", rotate); |
188 | } | 158 | } |
189 | 159 | ||
190 | static ssize_t display_rotate_store(struct device *dev, | 160 | static ssize_t display_rotate_store(struct omap_dss_device *dssdev, |
191 | struct device_attribute *attr, const char *buf, size_t size) | 161 | const char *buf, size_t size) |
192 | { | 162 | { |
193 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
194 | int rot, r; | 163 | int rot, r; |
195 | 164 | ||
196 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) | 165 | if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) |
@@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, | |||
207 | return size; | 176 | return size; |
208 | } | 177 | } |
209 | 178 | ||
210 | static ssize_t display_mirror_show(struct device *dev, | 179 | static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) |
211 | struct device_attribute *attr, char *buf) | ||
212 | { | 180 | { |
213 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
214 | int mirror; | 181 | int mirror; |
215 | if (!dssdev->driver->get_mirror) | 182 | if (!dssdev->driver->get_mirror) |
216 | return -ENOENT; | 183 | return -ENOENT; |
@@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, | |||
218 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); | 185 | return snprintf(buf, PAGE_SIZE, "%u\n", mirror); |
219 | } | 186 | } |
220 | 187 | ||
221 | static ssize_t display_mirror_store(struct device *dev, | 188 | static ssize_t display_mirror_store(struct omap_dss_device *dssdev, |
222 | struct device_attribute *attr, const char *buf, size_t size) | 189 | const char *buf, size_t size) |
223 | { | 190 | { |
224 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
225 | int r; | 191 | int r; |
226 | bool mirror; | 192 | bool mirror; |
227 | 193 | ||
@@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, | |||
239 | return size; | 205 | return size; |
240 | } | 206 | } |
241 | 207 | ||
242 | static ssize_t display_wss_show(struct device *dev, | 208 | static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) |
243 | struct device_attribute *attr, char *buf) | ||
244 | { | 209 | { |
245 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
246 | unsigned int wss; | 210 | unsigned int wss; |
247 | 211 | ||
248 | if (!dssdev->driver->get_wss) | 212 | if (!dssdev->driver->get_wss) |
@@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, | |||
253 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); | 217 | return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); |
254 | } | 218 | } |
255 | 219 | ||
256 | static ssize_t display_wss_store(struct device *dev, | 220 | static ssize_t display_wss_store(struct omap_dss_device *dssdev, |
257 | struct device_attribute *attr, const char *buf, size_t size) | 221 | const char *buf, size_t size) |
258 | { | 222 | { |
259 | struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); | ||
260 | u32 wss; | 223 | u32 wss; |
261 | int r; | 224 | int r; |
262 | 225 | ||
@@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, | |||
277 | return size; | 240 | return size; |
278 | } | 241 | } |
279 | 242 | ||
280 | static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); | 243 | struct display_attribute { |
281 | static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, | 244 | struct attribute attr; |
245 | ssize_t (*show)(struct omap_dss_device *, char *); | ||
246 | ssize_t (*store)(struct omap_dss_device *, const char *, size_t); | ||
247 | }; | ||
248 | |||
249 | #define DISPLAY_ATTR(_name, _mode, _show, _store) \ | ||
250 | struct display_attribute display_attr_##_name = \ | ||
251 | __ATTR(_name, _mode, _show, _store) | ||
252 | |||
253 | static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); | ||
254 | static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); | ||
255 | static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, | ||
282 | display_enabled_show, display_enabled_store); | 256 | display_enabled_show, display_enabled_store); |
283 | static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, | 257 | static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, |
284 | display_tear_show, display_tear_store); | 258 | display_tear_show, display_tear_store); |
285 | static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, | 259 | static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, |
286 | display_timings_show, display_timings_store); | 260 | display_timings_show, display_timings_store); |
287 | static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, | 261 | static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, |
288 | display_rotate_show, display_rotate_store); | 262 | display_rotate_show, display_rotate_store); |
289 | static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, | 263 | static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, |
290 | display_mirror_show, display_mirror_store); | 264 | display_mirror_show, display_mirror_store); |
291 | static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, | 265 | static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, |
292 | display_wss_show, display_wss_store); | 266 | display_wss_show, display_wss_store); |
293 | 267 | ||
294 | static const struct attribute *display_sysfs_attrs[] = { | 268 | static struct attribute *display_sysfs_attrs[] = { |
295 | &dev_attr_display_name.attr, | 269 | &display_attr_name.attr, |
296 | &dev_attr_enabled.attr, | 270 | &display_attr_display_name.attr, |
297 | &dev_attr_tear_elim.attr, | 271 | &display_attr_enabled.attr, |
298 | &dev_attr_timings.attr, | 272 | &display_attr_tear_elim.attr, |
299 | &dev_attr_rotate.attr, | 273 | &display_attr_timings.attr, |
300 | &dev_attr_mirror.attr, | 274 | &display_attr_rotate.attr, |
301 | &dev_attr_wss.attr, | 275 | &display_attr_mirror.attr, |
276 | &display_attr_wss.attr, | ||
302 | NULL | 277 | NULL |
303 | }; | 278 | }; |
304 | 279 | ||
280 | static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, | ||
281 | char *buf) | ||
282 | { | ||
283 | struct omap_dss_device *dssdev; | ||
284 | struct display_attribute *display_attr; | ||
285 | |||
286 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
287 | display_attr = container_of(attr, struct display_attribute, attr); | ||
288 | |||
289 | if (!display_attr->show) | ||
290 | return -ENOENT; | ||
291 | |||
292 | return display_attr->show(dssdev, buf); | ||
293 | } | ||
294 | |||
295 | static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, | ||
296 | const char *buf, size_t size) | ||
297 | { | ||
298 | struct omap_dss_device *dssdev; | ||
299 | struct display_attribute *display_attr; | ||
300 | |||
301 | dssdev = container_of(kobj, struct omap_dss_device, kobj); | ||
302 | display_attr = container_of(attr, struct display_attribute, attr); | ||
303 | |||
304 | if (!display_attr->store) | ||
305 | return -ENOENT; | ||
306 | |||
307 | return display_attr->store(dssdev, buf, size); | ||
308 | } | ||
309 | |||
310 | static const struct sysfs_ops display_sysfs_ops = { | ||
311 | .show = display_attr_show, | ||
312 | .store = display_attr_store, | ||
313 | }; | ||
314 | |||
315 | static struct kobj_type display_ktype = { | ||
316 | .sysfs_ops = &display_sysfs_ops, | ||
317 | .default_attrs = display_sysfs_attrs, | ||
318 | }; | ||
319 | |||
305 | int display_init_sysfs(struct platform_device *pdev) | 320 | int display_init_sysfs(struct platform_device *pdev) |
306 | { | 321 | { |
307 | struct omap_dss_device *dssdev = NULL; | 322 | struct omap_dss_device *dssdev = NULL; |
308 | int r; | 323 | int r; |
309 | 324 | ||
310 | for_each_dss_dev(dssdev) { | 325 | for_each_dss_dev(dssdev) { |
311 | struct kobject *kobj = &dssdev->dev->kobj; | 326 | r = kobject_init_and_add(&dssdev->kobj, &display_ktype, |
312 | 327 | &pdev->dev.kobj, dssdev->alias); | |
313 | r = sysfs_create_files(kobj, display_sysfs_attrs); | ||
314 | if (r) { | 328 | if (r) { |
315 | DSSERR("failed to create sysfs files\n"); | 329 | DSSERR("failed to create sysfs files\n"); |
316 | goto err; | 330 | omap_dss_put_device(dssdev); |
317 | } | ||
318 | |||
319 | r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); | ||
320 | if (r) { | ||
321 | sysfs_remove_files(kobj, display_sysfs_attrs); | ||
322 | |||
323 | DSSERR("failed to create sysfs display link\n"); | ||
324 | goto err; | 331 | goto err; |
325 | } | 332 | } |
326 | } | 333 | } |
@@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) | |||
338 | struct omap_dss_device *dssdev = NULL; | 345 | struct omap_dss_device *dssdev = NULL; |
339 | 346 | ||
340 | for_each_dss_dev(dssdev) { | 347 | for_each_dss_dev(dssdev) { |
341 | sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); | 348 | if (kobject_name(&dssdev->kobj) == NULL) |
342 | sysfs_remove_files(&dssdev->dev->kobj, | 349 | continue; |
343 | display_sysfs_attrs); | 350 | |
351 | kobject_del(&dssdev->kobj); | ||
352 | kobject_put(&dssdev->kobj); | ||
353 | |||
354 | memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); | ||
344 | } | 355 | } |
345 | } | 356 | } |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
526 | pirq_query_unmask(irq); | 526 | pirq_query_unmask(irq); |
527 | 527 | ||
528 | rc = set_evtchn_to_irq(evtchn, irq); | 528 | rc = set_evtchn_to_irq(evtchn, irq); |
529 | if (rc != 0) { | 529 | if (rc) |
530 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", | 530 | goto err; |
531 | irq, rc); | 531 | |
532 | xen_evtchn_close(evtchn); | ||
533 | return 0; | ||
534 | } | ||
535 | bind_evtchn_to_cpu(evtchn, 0); | 532 | bind_evtchn_to_cpu(evtchn, 0); |
536 | info->evtchn = evtchn; | 533 | info->evtchn = evtchn; |
537 | 534 | ||
535 | rc = xen_evtchn_port_setup(info); | ||
536 | if (rc) | ||
537 | goto err; | ||
538 | |||
538 | out: | 539 | out: |
539 | unmask_evtchn(evtchn); | 540 | unmask_evtchn(evtchn); |
540 | eoi_pirq(irq_get_irq_data(irq)); | 541 | eoi_pirq(irq_get_irq_data(irq)); |
541 | 542 | ||
542 | return 0; | 543 | return 0; |
544 | |||
545 | err: | ||
546 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); | ||
547 | xen_evtchn_close(evtchn); | ||
548 | return 0; | ||
543 | } | 549 | } |
544 | 550 | ||
545 | static unsigned int startup_pirq(struct irq_data *data) | 551 | static unsigned int startup_pirq(struct irq_data *data) |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include "conf_space.h" | 16 | #include "conf_space.h" |
17 | #include "conf_space_quirks.h" | 17 | #include "conf_space_quirks.h" |
18 | 18 | ||
19 | static bool permissive; | 19 | bool permissive; |
20 | module_param(permissive, bool, 0644); | 20 | module_param(permissive, bool, 0644); |
21 | 21 | ||
22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, | 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, |
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h | |||
@@ -64,6 +64,8 @@ struct config_field_entry { | |||
64 | void *data; | 64 | void *data; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | extern bool permissive; | ||
68 | |||
67 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) | 69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) |
68 | 70 | ||
69 | /* Add fields to a device - the add_fields macro expects to get a pointer to | 71 | /* Add fields to a device - the add_fields macro expects to get a pointer to |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -11,6 +11,10 @@ | |||
11 | #include "pciback.h" | 11 | #include "pciback.h" |
12 | #include "conf_space.h" | 12 | #include "conf_space.h" |
13 | 13 | ||
14 | struct pci_cmd_info { | ||
15 | u16 val; | ||
16 | }; | ||
17 | |||
14 | struct pci_bar_info { | 18 | struct pci_bar_info { |
15 | u32 val; | 19 | u32 val; |
16 | u32 len_val; | 20 | u32 len_val; |
@@ -20,22 +24,36 @@ struct pci_bar_info { | |||
20 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) | 24 | #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) |
21 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) | 25 | #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) |
22 | 26 | ||
23 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | 27 | /* Bits guests are allowed to control in permissive mode. */ |
28 | #define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ | ||
29 | PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ | ||
30 | PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) | ||
31 | |||
32 | static void *command_init(struct pci_dev *dev, int offset) | ||
24 | { | 33 | { |
25 | int i; | 34 | struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); |
26 | int ret; | 35 | int err; |
27 | 36 | ||
28 | ret = xen_pcibk_read_config_word(dev, offset, value, data); | 37 | if (!cmd) |
29 | if (!pci_is_enabled(dev)) | 38 | return ERR_PTR(-ENOMEM); |
30 | return ret; | 39 | |
31 | 40 | err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); | |
32 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 41 | if (err) { |
33 | if (dev->resource[i].flags & IORESOURCE_IO) | 42 | kfree(cmd); |
34 | *value |= PCI_COMMAND_IO; | 43 | return ERR_PTR(err); |
35 | if (dev->resource[i].flags & IORESOURCE_MEM) | ||
36 | *value |= PCI_COMMAND_MEMORY; | ||
37 | } | 44 | } |
38 | 45 | ||
46 | return cmd; | ||
47 | } | ||
48 | |||
49 | static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) | ||
50 | { | ||
51 | int ret = pci_read_config_word(dev, offset, value); | ||
52 | const struct pci_cmd_info *cmd = data; | ||
53 | |||
54 | *value &= PCI_COMMAND_GUEST; | ||
55 | *value |= cmd->val & ~PCI_COMMAND_GUEST; | ||
56 | |||
39 | return ret; | 57 | return ret; |
40 | } | 58 | } |
41 | 59 | ||
@@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
43 | { | 61 | { |
44 | struct xen_pcibk_dev_data *dev_data; | 62 | struct xen_pcibk_dev_data *dev_data; |
45 | int err; | 63 | int err; |
64 | u16 val; | ||
65 | struct pci_cmd_info *cmd = data; | ||
46 | 66 | ||
47 | dev_data = pci_get_drvdata(dev); | 67 | dev_data = pci_get_drvdata(dev); |
48 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { | 68 | if (!pci_is_enabled(dev) && is_enable_cmd(value)) { |
@@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
83 | } | 103 | } |
84 | } | 104 | } |
85 | 105 | ||
106 | cmd->val = value; | ||
107 | |||
108 | if (!permissive && (!dev_data || !dev_data->permissive)) | ||
109 | return 0; | ||
110 | |||
111 | /* Only allow the guest to control certain bits. */ | ||
112 | err = pci_read_config_word(dev, offset, &val); | ||
113 | if (err || val == value) | ||
114 | return err; | ||
115 | |||
116 | value &= PCI_COMMAND_GUEST; | ||
117 | value |= val & ~PCI_COMMAND_GUEST; | ||
118 | |||
86 | return pci_write_config_word(dev, offset, value); | 119 | return pci_write_config_word(dev, offset, value); |
87 | } | 120 | } |
88 | 121 | ||
@@ -282,6 +315,8 @@ static const struct config_field header_common[] = { | |||
282 | { | 315 | { |
283 | .offset = PCI_COMMAND, | 316 | .offset = PCI_COMMAND, |
284 | .size = 2, | 317 | .size = 2, |
318 | .init = command_init, | ||
319 | .release = bar_release, | ||
285 | .u.w.read = command_read, | 320 | .u.w.read = command_read, |
286 | .u.w.write = command_write, | 321 | .u.w.write = command_write, |
287 | }, | 322 | }, |
diff --git a/fs/locks.c b/fs/locks.c index f1bad681fc1c..528fedfda15e 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -1728,7 +1728,7 @@ static int generic_delete_lease(struct file *filp, void *owner) | |||
1728 | break; | 1728 | break; |
1729 | } | 1729 | } |
1730 | } | 1730 | } |
1731 | trace_generic_delete_lease(inode, fl); | 1731 | trace_generic_delete_lease(inode, victim); |
1732 | if (victim) | 1732 | if (victim) |
1733 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); | 1733 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); |
1734 | spin_unlock(&ctx->flc_lock); | 1734 | spin_unlock(&ctx->flc_lock); |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 469086b9f99b..0c3f303baf32 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -1907,6 +1907,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1907 | struct the_nilfs *nilfs) | 1907 | struct the_nilfs *nilfs) |
1908 | { | 1908 | { |
1909 | struct nilfs_inode_info *ii, *n; | 1909 | struct nilfs_inode_info *ii, *n; |
1910 | int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE); | ||
1910 | int defer_iput = false; | 1911 | int defer_iput = false; |
1911 | 1912 | ||
1912 | spin_lock(&nilfs->ns_inode_lock); | 1913 | spin_lock(&nilfs->ns_inode_lock); |
@@ -1919,10 +1920,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci, | |||
1919 | brelse(ii->i_bh); | 1920 | brelse(ii->i_bh); |
1920 | ii->i_bh = NULL; | 1921 | ii->i_bh = NULL; |
1921 | list_del_init(&ii->i_dirty); | 1922 | list_del_init(&ii->i_dirty); |
1922 | if (!ii->vfs_inode.i_nlink) { | 1923 | if (!ii->vfs_inode.i_nlink || during_mount) { |
1923 | /* | 1924 | /* |
1924 | * Defer calling iput() to avoid a deadlock | 1925 | * Defer calling iput() to avoid deadlocks if |
1925 | * over I_SYNC flag for inodes with i_nlink == 0 | 1926 | * i_nlink == 0 or mount is not yet finished. |
1926 | */ | 1927 | */ |
1927 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); | 1928 | list_add_tail(&ii->i_dirty, &sci->sc_iput_queue); |
1928 | defer_iput = true; | 1929 | defer_iput = true; |
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 9a66ff79ff27..d2f97ecca6a5 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c | |||
@@ -143,7 +143,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, | |||
143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) | 143 | !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) |
144 | return false; | 144 | return false; |
145 | 145 | ||
146 | if (event_mask & marks_mask & ~marks_ignored_mask) | 146 | if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask & |
147 | ~marks_ignored_mask) | ||
147 | return true; | 148 | return true; |
148 | 149 | ||
149 | return false; | 150 | return false; |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 8490c64d34fe..460c6c37e683 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -502,7 +502,7 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) | |||
502 | 502 | ||
503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) | 503 | static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) |
504 | { | 504 | { |
505 | if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | 505 | if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO) |
506 | return 1; | 506 | return 1; |
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 20e37a3ed26f..db64ce2d4667 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h | |||
@@ -102,11 +102,11 @@ | |||
102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ | 102 | | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ |
103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ | 103 | | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE \ |
104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ | 104 | | OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG \ |
105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) | 105 | | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO \ |
106 | | OCFS2_FEATURE_INCOMPAT_APPEND_DIO) | ||
106 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | 107 | #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ |
107 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | 108 | | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ |
108 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ | 109 | | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) |
109 | | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) | ||
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Heartbeat-only devices are missing journals and other files. The | 112 | * Heartbeat-only devices are missing journals and other files. The |
@@ -179,6 +179,11 @@ | |||
179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 | 179 | #define OCFS2_FEATURE_INCOMPAT_CLUSTERINFO 0x4000 |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Append Direct IO support | ||
183 | */ | ||
184 | #define OCFS2_FEATURE_INCOMPAT_APPEND_DIO 0x8000 | ||
185 | |||
186 | /* | ||
182 | * backup superblock flag is used to indicate that this volume | 187 | * backup superblock flag is used to indicate that this volume |
183 | * has backup superblocks. | 188 | * has backup superblocks. |
184 | */ | 189 | */ |
@@ -200,10 +205,6 @@ | |||
200 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 | 205 | #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 |
201 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 | 206 | #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 |
202 | 207 | ||
203 | /* | ||
204 | * Append Direct IO support | ||
205 | */ | ||
206 | #define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 | ||
207 | 208 | ||
208 | /* The byte offset of the first backup block will be 1G. | 209 | /* The byte offset of the first backup block will be 1G. |
209 | * The following will be 4G, 16G, 64G, 256G and 1T. | 210 | * The following will be 4G, 16G, 64G, 256G and 1T. |
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index f2e47fd56751..613372375ada 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h | |||
@@ -208,40 +208,41 @@ | |||
208 | #define INTEL_VLV_D_IDS(info) \ | 208 | #define INTEL_VLV_D_IDS(info) \ |
209 | INTEL_VGA_DEVICE(0x0155, info) | 209 | INTEL_VGA_DEVICE(0x0155, info) |
210 | 210 | ||
211 | #define _INTEL_BDW_M(gt, id, info) \ | 211 | #define INTEL_BDW_GT12M_IDS(info) \ |
212 | INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) | 212 | INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ |
213 | #define _INTEL_BDW_D(gt, id, info) \ | 213 | INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ |
214 | INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) | 214 | INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \ |
215 | 215 | INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \ | |
216 | #define _INTEL_BDW_M_IDS(gt, info) \ | 216 | INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ |
217 | _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \ | 217 | INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ |
218 | _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \ | 218 | INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \ |
219 | _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \ | 219 | INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */ |
220 | _INTEL_BDW_M(gt, 0x160E, info) /* ULX */ | ||
221 | |||
222 | #define _INTEL_BDW_D_IDS(gt, info) \ | ||
223 | _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \ | ||
224 | _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */ | ||
225 | |||
226 | #define INTEL_BDW_GT12M_IDS(info) \ | ||
227 | _INTEL_BDW_M_IDS(1, info), \ | ||
228 | _INTEL_BDW_M_IDS(2, info) | ||
229 | 220 | ||
230 | #define INTEL_BDW_GT12D_IDS(info) \ | 221 | #define INTEL_BDW_GT12D_IDS(info) \ |
231 | _INTEL_BDW_D_IDS(1, info), \ | 222 | INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ |
232 | _INTEL_BDW_D_IDS(2, info) | 223 | INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \ |
224 | INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ | ||
225 | INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ | ||
233 | 226 | ||
234 | #define INTEL_BDW_GT3M_IDS(info) \ | 227 | #define INTEL_BDW_GT3M_IDS(info) \ |
235 | _INTEL_BDW_M_IDS(3, info) | 228 | INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ |
229 | INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ | ||
230 | INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \ | ||
231 | INTEL_VGA_DEVICE(0x162E, info) /* ULX */ | ||
236 | 232 | ||
237 | #define INTEL_BDW_GT3D_IDS(info) \ | 233 | #define INTEL_BDW_GT3D_IDS(info) \ |
238 | _INTEL_BDW_D_IDS(3, info) | 234 | INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ |
235 | INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ | ||
239 | 236 | ||
240 | #define INTEL_BDW_RSVDM_IDS(info) \ | 237 | #define INTEL_BDW_RSVDM_IDS(info) \ |
241 | _INTEL_BDW_M_IDS(4, info) | 238 | INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ |
239 | INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ | ||
240 | INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ | ||
241 | INTEL_VGA_DEVICE(0x163E, info) /* ULX */ | ||
242 | 242 | ||
243 | #define INTEL_BDW_RSVDD_IDS(info) \ | 243 | #define INTEL_BDW_RSVDD_IDS(info) \ |
244 | _INTEL_BDW_D_IDS(4, info) | 244 | INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ |
245 | INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ | ||
245 | 246 | ||
246 | #define INTEL_BDW_M_IDS(info) \ | 247 | #define INTEL_BDW_M_IDS(info) \ |
247 | INTEL_BDW_GT12M_IDS(info), \ | 248 | INTEL_BDW_GT12M_IDS(info), \ |
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h index 2fbc804e1a45..226f77246a70 100644 --- a/include/dt-bindings/pinctrl/am33xx.h +++ b/include/dt-bindings/pinctrl/am33xx.h | |||
@@ -13,7 +13,8 @@ | |||
13 | 13 | ||
14 | #define PULL_DISABLE (1 << 3) | 14 | #define PULL_DISABLE (1 << 3) |
15 | #define INPUT_EN (1 << 5) | 15 | #define INPUT_EN (1 << 5) |
16 | #define SLEWCTRL_FAST (1 << 6) | 16 | #define SLEWCTRL_SLOW (1 << 6) |
17 | #define SLEWCTRL_FAST 0 | ||
17 | 18 | ||
18 | /* update macro depending on INPUT_EN and PULL_ENA */ | 19 | /* update macro depending on INPUT_EN and PULL_ENA */ |
19 | #undef PIN_OUTPUT | 20 | #undef PIN_OUTPUT |
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index 9c2e4f82381e..5f4d01898c9c 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h | |||
@@ -18,7 +18,8 @@ | |||
18 | #define PULL_DISABLE (1 << 16) | 18 | #define PULL_DISABLE (1 << 16) |
19 | #define PULL_UP (1 << 17) | 19 | #define PULL_UP (1 << 17) |
20 | #define INPUT_EN (1 << 18) | 20 | #define INPUT_EN (1 << 18) |
21 | #define SLEWCTRL_FAST (1 << 19) | 21 | #define SLEWCTRL_SLOW (1 << 19) |
22 | #define SLEWCTRL_FAST 0 | ||
22 | #define DS0_PULL_UP_DOWN_EN (1 << 27) | 23 | #define DS0_PULL_UP_DOWN_EN (1 << 27) |
23 | 24 | ||
24 | #define PIN_OUTPUT (PULL_DISABLE) | 25 | #define PIN_OUTPUT (PULL_DISABLE) |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 8381bbfbc308..68c16a6bedb3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
@@ -125,6 +125,19 @@ int clk_set_phase(struct clk *clk, int degrees); | |||
125 | */ | 125 | */ |
126 | int clk_get_phase(struct clk *clk); | 126 | int clk_get_phase(struct clk *clk); |
127 | 127 | ||
128 | /** | ||
129 | * clk_is_match - check if two clk's point to the same hardware clock | ||
130 | * @p: clk compared against q | ||
131 | * @q: clk compared against p | ||
132 | * | ||
133 | * Returns true if the two struct clk pointers both point to the same hardware | ||
134 | * clock node. Put differently, returns true if struct clk *p and struct clk *q | ||
135 | * share the same struct clk_core object. | ||
136 | * | ||
137 | * Returns false otherwise. Note that two NULL clks are treated as matching. | ||
138 | */ | ||
139 | bool clk_is_match(const struct clk *p, const struct clk *q); | ||
140 | |||
128 | #else | 141 | #else |
129 | 142 | ||
130 | static inline long clk_get_accuracy(struct clk *clk) | 143 | static inline long clk_get_accuracy(struct clk *clk) |
@@ -142,6 +155,11 @@ static inline long clk_get_phase(struct clk *clk) | |||
142 | return -ENOTSUPP; | 155 | return -ENOTSUPP; |
143 | } | 156 | } |
144 | 157 | ||
158 | static inline bool clk_is_match(const struct clk *p, const struct clk *q) | ||
159 | { | ||
160 | return p == q; | ||
161 | } | ||
162 | |||
145 | #endif | 163 | #endif |
146 | 164 | ||
147 | /** | 165 | /** |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 800544bc7bfd..781974afff9f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -166,6 +166,11 @@ | |||
166 | 166 | ||
167 | #define GITS_TRANSLATER 0x10040 | 167 | #define GITS_TRANSLATER 0x10040 |
168 | 168 | ||
169 | #define GITS_CTLR_ENABLE (1U << 0) | ||
170 | #define GITS_CTLR_QUIESCENT (1U << 31) | ||
171 | |||
172 | #define GITS_TYPER_DEVBITS_SHIFT 13 | ||
173 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) | ||
169 | #define GITS_TYPER_PTA (1UL << 19) | 174 | #define GITS_TYPER_PTA (1UL << 19) |
170 | 175 | ||
171 | #define GITS_CBASER_VALID (1UL << 63) | 176 | #define GITS_CBASER_VALID (1UL << 63) |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 72ba725ddf9c..5bb074431eb0 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | struct kmem_cache; | 6 | struct kmem_cache; |
7 | struct page; | 7 | struct page; |
8 | struct vm_struct; | ||
8 | 9 | ||
9 | #ifdef CONFIG_KASAN | 10 | #ifdef CONFIG_KASAN |
10 | 11 | ||
@@ -49,15 +50,11 @@ void kasan_krealloc(const void *object, size_t new_size); | |||
49 | void kasan_slab_alloc(struct kmem_cache *s, void *object); | 50 | void kasan_slab_alloc(struct kmem_cache *s, void *object); |
50 | void kasan_slab_free(struct kmem_cache *s, void *object); | 51 | void kasan_slab_free(struct kmem_cache *s, void *object); |
51 | 52 | ||
52 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
53 | |||
54 | int kasan_module_alloc(void *addr, size_t size); | 53 | int kasan_module_alloc(void *addr, size_t size); |
55 | void kasan_module_free(void *addr); | 54 | void kasan_free_shadow(const struct vm_struct *vm); |
56 | 55 | ||
57 | #else /* CONFIG_KASAN */ | 56 | #else /* CONFIG_KASAN */ |
58 | 57 | ||
59 | #define MODULE_ALIGN 1 | ||
60 | |||
61 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} | 58 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} |
62 | 59 | ||
63 | static inline void kasan_enable_current(void) {} | 60 | static inline void kasan_enable_current(void) {} |
@@ -82,7 +79,7 @@ static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {} | |||
82 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} | 79 | static inline void kasan_slab_free(struct kmem_cache *s, void *object) {} |
83 | 80 | ||
84 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } | 81 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } |
85 | static inline void kasan_module_free(void *addr) {} | 82 | static inline void kasan_free_shadow(const struct vm_struct *vm) {} |
86 | 83 | ||
87 | #endif /* CONFIG_KASAN */ | 84 | #endif /* CONFIG_KASAN */ |
88 | 85 | ||
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index f7556261fe3c..4d0cb9bba93e 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h | |||
@@ -84,4 +84,12 @@ void module_arch_cleanup(struct module *mod); | |||
84 | 84 | ||
85 | /* Any cleanup before freeing mod->module_init */ | 85 | /* Any cleanup before freeing mod->module_init */ |
86 | void module_arch_freeing_init(struct module *mod); | 86 | void module_arch_freeing_init(struct module *mod); |
87 | |||
88 | #ifdef CONFIG_KASAN | ||
89 | #include <linux/kasan.h> | ||
90 | #define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) | ||
91 | #else | ||
92 | #define MODULE_ALIGN PAGE_SIZE | ||
93 | #endif | ||
94 | |||
87 | #endif | 95 | #endif |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 8a860f096c35..611a691145c4 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -84,7 +84,7 @@ static inline int of_platform_populate(struct device_node *root, | |||
84 | static inline void of_platform_depopulate(struct device *parent) { } | 84 | static inline void of_platform_depopulate(struct device *parent) { } |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | #ifdef CONFIG_OF_DYNAMIC | 87 | #if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS) |
88 | extern void of_platform_register_reconfig_notifier(void); | 88 | extern void of_platform_register_reconfig_notifier(void); |
89 | #else | 89 | #else |
90 | static inline void of_platform_register_reconfig_notifier(void) { } | 90 | static inline void of_platform_register_reconfig_notifier(void) { } |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ed9489d893a4..856d34dde79b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -649,7 +649,7 @@ struct spi_transfer { | |||
649 | * sequence completes. On some systems, many such sequences can execute as | 649 | * sequence completes. On some systems, many such sequences can execute as |
650 | * as single programmed DMA transfer. On all systems, these messages are | 650 | * as single programmed DMA transfer. On all systems, these messages are |
651 | * queued, and might complete after transactions to other devices. Messages | 651 | * queued, and might complete after transactions to other devices. Messages |
652 | * sent to a given spi_device are alway executed in FIFO order. | 652 | * sent to a given spi_device are always executed in FIFO order. |
653 | * | 653 | * |
654 | * The code that submits an spi_message (and its spi_transfers) | 654 | * The code that submits an spi_message (and its spi_transfers) |
655 | * to the lower layers is responsible for managing its memory. | 655 | * to the lower layers is responsible for managing its memory. |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 07a022641996..71880299ed48 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
@@ -98,6 +98,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, | |||
98 | size_t maxsize, size_t *start); | 98 | size_t maxsize, size_t *start); |
99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); | 99 | int iov_iter_npages(const struct iov_iter *i, int maxpages); |
100 | 100 | ||
101 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); | ||
102 | |||
101 | static inline size_t iov_iter_count(struct iov_iter *i) | 103 | static inline size_t iov_iter_count(struct iov_iter *i) |
102 | { | 104 | { |
103 | return i->count; | 105 | return i->count; |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 7d7acb35603d..0ec598381f97 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -17,6 +17,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | |||
17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ | 17 | #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ | 18 | #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ | 19 | #define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
20 | #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ | ||
20 | /* bits [20..32] reserved for arch specific ioremap internals */ | 21 | /* bits [20..32] reserved for arch specific ioremap internals */ |
21 | 22 | ||
22 | /* | 23 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74db135f9957..f597846ff605 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -70,7 +70,8 @@ enum { | |||
70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ | 70 | /* data contains off-queue information when !WORK_STRUCT_PWQ */ |
71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, | 71 | WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, |
72 | 72 | ||
73 | WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), | 73 | __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE, |
74 | WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING), | ||
74 | 75 | ||
75 | /* | 76 | /* |
76 | * When a work item is off queue, its high bits point to the last | 77 | * When a work item is off queue, its high bits point to the last |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 9eaaa7884586..decb9a095ae7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -119,6 +119,22 @@ int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg, | |||
119 | const struct nft_data *data, | 119 | const struct nft_data *data, |
120 | enum nft_data_types type); | 120 | enum nft_data_types type); |
121 | 121 | ||
122 | |||
123 | /** | ||
124 | * struct nft_userdata - user defined data associated with an object | ||
125 | * | ||
126 | * @len: length of the data | ||
127 | * @data: content | ||
128 | * | ||
129 | * The presence of user data is indicated in an object specific fashion, | ||
130 | * so a length of zero can't occur and the value "len" indicates data | ||
131 | * of length len + 1. | ||
132 | */ | ||
133 | struct nft_userdata { | ||
134 | u8 len; | ||
135 | unsigned char data[0]; | ||
136 | }; | ||
137 | |||
122 | /** | 138 | /** |
123 | * struct nft_set_elem - generic representation of set elements | 139 | * struct nft_set_elem - generic representation of set elements |
124 | * | 140 | * |
@@ -380,7 +396,7 @@ static inline void *nft_expr_priv(const struct nft_expr *expr) | |||
380 | * @handle: rule handle | 396 | * @handle: rule handle |
381 | * @genmask: generation mask | 397 | * @genmask: generation mask |
382 | * @dlen: length of expression data | 398 | * @dlen: length of expression data |
383 | * @ulen: length of user data (used for comments) | 399 | * @udata: user data is appended to the rule |
384 | * @data: expression data | 400 | * @data: expression data |
385 | */ | 401 | */ |
386 | struct nft_rule { | 402 | struct nft_rule { |
@@ -388,7 +404,7 @@ struct nft_rule { | |||
388 | u64 handle:42, | 404 | u64 handle:42, |
389 | genmask:2, | 405 | genmask:2, |
390 | dlen:12, | 406 | dlen:12, |
391 | ulen:8; | 407 | udata:1; |
392 | unsigned char data[] | 408 | unsigned char data[] |
393 | __attribute__((aligned(__alignof__(struct nft_expr)))); | 409 | __attribute__((aligned(__alignof__(struct nft_expr)))); |
394 | }; | 410 | }; |
@@ -476,7 +492,7 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule) | |||
476 | return (struct nft_expr *)&rule->data[rule->dlen]; | 492 | return (struct nft_expr *)&rule->data[rule->dlen]; |
477 | } | 493 | } |
478 | 494 | ||
479 | static inline void *nft_userdata(const struct nft_rule *rule) | 495 | static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) |
480 | { | 496 | { |
481 | return (void *)&rule->data[rule->dlen]; | 497 | return (void *)&rule->data[rule->dlen]; |
482 | } | 498 | } |
diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h index 0210797abf2e..dc10c52e0e91 100644 --- a/include/soc/at91/at91sam9_ddrsdr.h +++ b/include/soc/at91/at91sam9_ddrsdr.h | |||
@@ -92,7 +92,7 @@ | |||
92 | #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ | 92 | #define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ |
93 | 93 | ||
94 | #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ | 94 | #define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ |
95 | #define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ | 95 | #define AT91_DDRSDRC_MD (7 << 0) /* Memory Device Type */ |
96 | #define AT91_DDRSDRC_MD_SDR 0 | 96 | #define AT91_DDRSDRC_MD_SDR 0 |
97 | #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 | 97 | #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 |
98 | #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 | 98 | #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 |
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index e6efac23c7ea..07735822a28f 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h | |||
@@ -151,7 +151,7 @@ | |||
151 | /* add more to the end as needed */ | 151 | /* add more to the end as needed */ |
152 | 152 | ||
153 | #define fourcc_mod_code(vendor, val) \ | 153 | #define fourcc_mod_code(vendor, val) \ |
154 | ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffL)) | 154 | ((((u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Format Modifier tokens: | 157 | * Format Modifier tokens: |
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 6eed16b92a24..8d1be9073380 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h | |||
@@ -347,6 +347,9 @@ typedef struct drm_i915_irq_wait { | |||
347 | #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 | 347 | #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 |
348 | #define I915_PARAM_MMAP_VERSION 30 | 348 | #define I915_PARAM_MMAP_VERSION 30 |
349 | #define I915_PARAM_HAS_BSD2 31 | 349 | #define I915_PARAM_HAS_BSD2 31 |
350 | #define I915_PARAM_REVISION 32 | ||
351 | #define I915_PARAM_SUBSLICE_TOTAL 33 | ||
352 | #define I915_PARAM_EU_TOTAL 34 | ||
350 | 353 | ||
351 | typedef struct drm_i915_getparam { | 354 | typedef struct drm_i915_getparam { |
352 | int param; | 355 | int param; |
diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 60de61fea8e3..c8ed15daad02 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h | |||
@@ -689,6 +689,7 @@ struct omapdss_dsi_ops { | |||
689 | }; | 689 | }; |
690 | 690 | ||
691 | struct omap_dss_device { | 691 | struct omap_dss_device { |
692 | struct kobject kobj; | ||
692 | struct device *dev; | 693 | struct device *dev; |
693 | 694 | ||
694 | struct module *owner; | 695 | struct module *owner; |
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b78f21caf55a..b0f1c9e5d687 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h | |||
@@ -114,9 +114,9 @@ int __must_check __xenbus_register_backend(struct xenbus_driver *drv, | |||
114 | const char *mod_name); | 114 | const char *mod_name); |
115 | 115 | ||
116 | #define xenbus_register_frontend(drv) \ | 116 | #define xenbus_register_frontend(drv) \ |
117 | __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME); | 117 | __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME) |
118 | #define xenbus_register_backend(drv) \ | 118 | #define xenbus_register_backend(drv) \ |
119 | __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME); | 119 | __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME) |
120 | 120 | ||
121 | void xenbus_unregister_driver(struct xenbus_driver *drv); | 121 | void xenbus_unregister_driver(struct xenbus_driver *drv); |
122 | 122 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1d1fe9361d29..fc7f4748d34a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -548,9 +548,6 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, | |||
548 | 548 | ||
549 | rcu_read_lock(); | 549 | rcu_read_lock(); |
550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { | 550 | cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { |
551 | if (cp == root_cs) | ||
552 | continue; | ||
553 | |||
554 | /* skip the whole subtree if @cp doesn't have any CPU */ | 551 | /* skip the whole subtree if @cp doesn't have any CPU */ |
555 | if (cpumask_empty(cp->cpus_allowed)) { | 552 | if (cpumask_empty(cp->cpus_allowed)) { |
556 | pos_css = css_rightmost_descendant(pos_css); | 553 | pos_css = css_rightmost_descendant(pos_css); |
@@ -873,7 +870,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) | |||
873 | * If it becomes empty, inherit the effective mask of the | 870 | * If it becomes empty, inherit the effective mask of the |
874 | * parent, which is guaranteed to have some CPUs. | 871 | * parent, which is guaranteed to have some CPUs. |
875 | */ | 872 | */ |
876 | if (cpumask_empty(new_cpus)) | 873 | if (cgroup_on_dfl(cp->css.cgroup) && cpumask_empty(new_cpus)) |
877 | cpumask_copy(new_cpus, parent->effective_cpus); | 874 | cpumask_copy(new_cpus, parent->effective_cpus); |
878 | 875 | ||
879 | /* Skip the whole subtree if the cpumask remains the same. */ | 876 | /* Skip the whole subtree if the cpumask remains the same. */ |
@@ -1129,7 +1126,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) | |||
1129 | * If it becomes empty, inherit the effective mask of the | 1126 | * If it becomes empty, inherit the effective mask of the |
1130 | * parent, which is guaranteed to have some MEMs. | 1127 | * parent, which is guaranteed to have some MEMs. |
1131 | */ | 1128 | */ |
1132 | if (nodes_empty(*new_mems)) | 1129 | if (cgroup_on_dfl(cp->css.cgroup) && nodes_empty(*new_mems)) |
1133 | *new_mems = parent->effective_mems; | 1130 | *new_mems = parent->effective_mems; |
1134 | 1131 | ||
1135 | /* Skip the whole subtree if the nodemask remains the same. */ | 1132 | /* Skip the whole subtree if the nodemask remains the same. */ |
@@ -1979,7 +1976,9 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) | |||
1979 | 1976 | ||
1980 | spin_lock_irq(&callback_lock); | 1977 | spin_lock_irq(&callback_lock); |
1981 | cs->mems_allowed = parent->mems_allowed; | 1978 | cs->mems_allowed = parent->mems_allowed; |
1979 | cs->effective_mems = parent->mems_allowed; | ||
1982 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); | 1980 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
1981 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); | ||
1983 | spin_unlock_irq(&callback_lock); | 1982 | spin_unlock_irq(&callback_lock); |
1984 | out_unlock: | 1983 | out_unlock: |
1985 | mutex_unlock(&cpuset_mutex); | 1984 | mutex_unlock(&cpuset_mutex); |
diff --git a/kernel/module.c b/kernel/module.c index cc93cf68653c..b3d634ed06c9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <linux/async.h> | 56 | #include <linux/async.h> |
57 | #include <linux/percpu.h> | 57 | #include <linux/percpu.h> |
58 | #include <linux/kmemleak.h> | 58 | #include <linux/kmemleak.h> |
59 | #include <linux/kasan.h> | ||
60 | #include <linux/jump_label.h> | 59 | #include <linux/jump_label.h> |
61 | #include <linux/pfn.h> | 60 | #include <linux/pfn.h> |
62 | #include <linux/bsearch.h> | 61 | #include <linux/bsearch.h> |
@@ -1814,7 +1813,6 @@ static void unset_module_init_ro_nx(struct module *mod) { } | |||
1814 | void __weak module_memfree(void *module_region) | 1813 | void __weak module_memfree(void *module_region) |
1815 | { | 1814 | { |
1816 | vfree(module_region); | 1815 | vfree(module_region); |
1817 | kasan_module_free(module_region); | ||
1818 | } | 1816 | } |
1819 | 1817 | ||
1820 | void __weak module_arch_cleanup(struct module *mod) | 1818 | void __weak module_arch_cleanup(struct module *mod) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45e5cb143d17..4f228024055b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1059,6 +1059,12 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1059 | 1059 | ||
1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1060 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1061 | 1061 | ||
1062 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1063 | static int ftrace_graph_active; | ||
1064 | #else | ||
1065 | # define ftrace_graph_active 0 | ||
1066 | #endif | ||
1067 | |||
1062 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1063 | 1069 | ||
1064 | static struct ftrace_ops *removed_ops; | 1070 | static struct ftrace_ops *removed_ops; |
@@ -2041,8 +2047,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | |||
2041 | if (!ftrace_rec_count(rec)) | 2047 | if (!ftrace_rec_count(rec)) |
2042 | rec->flags = 0; | 2048 | rec->flags = 0; |
2043 | else | 2049 | else |
2044 | /* Just disable the record (keep REGS state) */ | 2050 | /* |
2045 | rec->flags &= ~FTRACE_FL_ENABLED; | 2051 | * Just disable the record, but keep the ops TRAMP |
2052 | * and REGS states. The _EN flags must be disabled though. | ||
2053 | */ | ||
2054 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | | ||
2055 | FTRACE_FL_REGS_EN); | ||
2046 | } | 2056 | } |
2047 | 2057 | ||
2048 | return FTRACE_UPDATE_MAKE_NOP; | 2058 | return FTRACE_UPDATE_MAKE_NOP; |
@@ -2688,24 +2698,36 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2688 | 2698 | ||
2689 | static void ftrace_startup_sysctl(void) | 2699 | static void ftrace_startup_sysctl(void) |
2690 | { | 2700 | { |
2701 | int command; | ||
2702 | |||
2691 | if (unlikely(ftrace_disabled)) | 2703 | if (unlikely(ftrace_disabled)) |
2692 | return; | 2704 | return; |
2693 | 2705 | ||
2694 | /* Force update next time */ | 2706 | /* Force update next time */ |
2695 | saved_ftrace_func = NULL; | 2707 | saved_ftrace_func = NULL; |
2696 | /* ftrace_start_up is true if we want ftrace running */ | 2708 | /* ftrace_start_up is true if we want ftrace running */ |
2697 | if (ftrace_start_up) | 2709 | if (ftrace_start_up) { |
2698 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 2710 | command = FTRACE_UPDATE_CALLS; |
2711 | if (ftrace_graph_active) | ||
2712 | command |= FTRACE_START_FUNC_RET; | ||
2713 | ftrace_startup_enable(command); | ||
2714 | } | ||
2699 | } | 2715 | } |
2700 | 2716 | ||
2701 | static void ftrace_shutdown_sysctl(void) | 2717 | static void ftrace_shutdown_sysctl(void) |
2702 | { | 2718 | { |
2719 | int command; | ||
2720 | |||
2703 | if (unlikely(ftrace_disabled)) | 2721 | if (unlikely(ftrace_disabled)) |
2704 | return; | 2722 | return; |
2705 | 2723 | ||
2706 | /* ftrace_start_up is true if ftrace is running */ | 2724 | /* ftrace_start_up is true if ftrace is running */ |
2707 | if (ftrace_start_up) | 2725 | if (ftrace_start_up) { |
2708 | ftrace_run_update_code(FTRACE_DISABLE_CALLS); | 2726 | command = FTRACE_DISABLE_CALLS; |
2727 | if (ftrace_graph_active) | ||
2728 | command |= FTRACE_STOP_FUNC_RET; | ||
2729 | ftrace_run_update_code(command); | ||
2730 | } | ||
2709 | } | 2731 | } |
2710 | 2732 | ||
2711 | static cycle_t ftrace_update_time; | 2733 | static cycle_t ftrace_update_time; |
@@ -5558,12 +5580,12 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
5558 | 5580 | ||
5559 | if (ftrace_enabled) { | 5581 | if (ftrace_enabled) { |
5560 | 5582 | ||
5561 | ftrace_startup_sysctl(); | ||
5562 | |||
5563 | /* we are starting ftrace again */ | 5583 | /* we are starting ftrace again */ |
5564 | if (ftrace_ops_list != &ftrace_list_end) | 5584 | if (ftrace_ops_list != &ftrace_list_end) |
5565 | update_ftrace_function(); | 5585 | update_ftrace_function(); |
5566 | 5586 | ||
5587 | ftrace_startup_sysctl(); | ||
5588 | |||
5567 | } else { | 5589 | } else { |
5568 | /* stopping ftrace calls (just send to ftrace_stub) */ | 5590 | /* stopping ftrace calls (just send to ftrace_stub) */ |
5569 | ftrace_trace_function = ftrace_stub; | 5591 | ftrace_trace_function = ftrace_stub; |
@@ -5590,8 +5612,6 @@ static struct ftrace_ops graph_ops = { | |||
5590 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5612 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
5591 | }; | 5613 | }; |
5592 | 5614 | ||
5593 | static int ftrace_graph_active; | ||
5594 | |||
5595 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | 5615 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) |
5596 | { | 5616 | { |
5597 | return 0; | 5617 | return 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f28849394791..41ff75b478c6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2728,19 +2728,57 @@ bool flush_work(struct work_struct *work) | |||
2728 | } | 2728 | } |
2729 | EXPORT_SYMBOL_GPL(flush_work); | 2729 | EXPORT_SYMBOL_GPL(flush_work); |
2730 | 2730 | ||
2731 | struct cwt_wait { | ||
2732 | wait_queue_t wait; | ||
2733 | struct work_struct *work; | ||
2734 | }; | ||
2735 | |||
2736 | static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
2737 | { | ||
2738 | struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); | ||
2739 | |||
2740 | if (cwait->work != key) | ||
2741 | return 0; | ||
2742 | return autoremove_wake_function(wait, mode, sync, key); | ||
2743 | } | ||
2744 | |||
2731 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | 2745 | static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) |
2732 | { | 2746 | { |
2747 | static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); | ||
2733 | unsigned long flags; | 2748 | unsigned long flags; |
2734 | int ret; | 2749 | int ret; |
2735 | 2750 | ||
2736 | do { | 2751 | do { |
2737 | ret = try_to_grab_pending(work, is_dwork, &flags); | 2752 | ret = try_to_grab_pending(work, is_dwork, &flags); |
2738 | /* | 2753 | /* |
2739 | * If someone else is canceling, wait for the same event it | 2754 | * If someone else is already canceling, wait for it to |
2740 | * would be waiting for before retrying. | 2755 | * finish. flush_work() doesn't work for PREEMPT_NONE |
2756 | * because we may get scheduled between @work's completion | ||
2757 | * and the other canceling task resuming and clearing | ||
2758 | * CANCELING - flush_work() will return false immediately | ||
2759 | * as @work is no longer busy, try_to_grab_pending() will | ||
2760 | * return -ENOENT as @work is still being canceled and the | ||
2761 | * other canceling task won't be able to clear CANCELING as | ||
2762 | * we're hogging the CPU. | ||
2763 | * | ||
2764 | * Let's wait for completion using a waitqueue. As this | ||
2765 | * may lead to the thundering herd problem, use a custom | ||
2766 | * wake function which matches @work along with exclusive | ||
2767 | * wait and wakeup. | ||
2741 | */ | 2768 | */ |
2742 | if (unlikely(ret == -ENOENT)) | 2769 | if (unlikely(ret == -ENOENT)) { |
2743 | flush_work(work); | 2770 | struct cwt_wait cwait; |
2771 | |||
2772 | init_wait(&cwait.wait); | ||
2773 | cwait.wait.func = cwt_wakefn; | ||
2774 | cwait.work = work; | ||
2775 | |||
2776 | prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, | ||
2777 | TASK_UNINTERRUPTIBLE); | ||
2778 | if (work_is_canceling(work)) | ||
2779 | schedule(); | ||
2780 | finish_wait(&cancel_waitq, &cwait.wait); | ||
2781 | } | ||
2744 | } while (unlikely(ret < 0)); | 2782 | } while (unlikely(ret < 0)); |
2745 | 2783 | ||
2746 | /* tell other tasks trying to grab @work to back off */ | 2784 | /* tell other tasks trying to grab @work to back off */ |
@@ -2749,6 +2787,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2749 | 2787 | ||
2750 | flush_work(work); | 2788 | flush_work(work); |
2751 | clear_work_data(work); | 2789 | clear_work_data(work); |
2790 | |||
2791 | /* | ||
2792 | * Paired with prepare_to_wait() above so that either | ||
2793 | * waitqueue_active() is visible here or !work_is_canceling() is | ||
2794 | * visible there. | ||
2795 | */ | ||
2796 | smp_mb(); | ||
2797 | if (waitqueue_active(&cancel_waitq)) | ||
2798 | __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); | ||
2799 | |||
2752 | return ret; | 2800 | return ret; |
2753 | } | 2801 | } |
2754 | 2802 | ||
diff --git a/lib/Makefile b/lib/Makefile index 87eb3bffc283..58f74d2dd396 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -24,7 +24,7 @@ obj-y += lockref.o | |||
24 | 24 | ||
25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
diff --git a/mm/iov_iter.c b/lib/iov_iter.c index 827732047da1..9d96e283520c 100644 --- a/mm/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -751,3 +751,18 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) | |||
751 | return npages; | 751 | return npages; |
752 | } | 752 | } |
753 | EXPORT_SYMBOL(iov_iter_npages); | 753 | EXPORT_SYMBOL(iov_iter_npages); |
754 | |||
755 | const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) | ||
756 | { | ||
757 | *new = *old; | ||
758 | if (new->type & ITER_BVEC) | ||
759 | return new->bvec = kmemdup(new->bvec, | ||
760 | new->nr_segs * sizeof(struct bio_vec), | ||
761 | flags); | ||
762 | else | ||
763 | /* iovec and kvec have identical layout */ | ||
764 | return new->iov = kmemdup(new->iov, | ||
765 | new->nr_segs * sizeof(struct iovec), | ||
766 | flags); | ||
767 | } | ||
768 | EXPORT_SYMBOL(dup_iter); | ||
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 88c0854bd752..5c94e1012a91 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
@@ -61,7 +61,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) | |||
61 | 61 | ||
62 | if (s->len < s->size) { | 62 | if (s->len < s->size) { |
63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); | 63 | len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); |
64 | if (seq_buf_can_fit(s, len)) { | 64 | if (s->len + len < s->size) { |
65 | s->len += len; | 65 | s->len += len; |
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
@@ -118,7 +118,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) | |||
118 | 118 | ||
119 | if (s->len < s->size) { | 119 | if (s->len < s->size) { |
120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 120 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); |
121 | if (seq_buf_can_fit(s, ret)) { | 121 | if (s->len + ret < s->size) { |
122 | s->len += ret; | 122 | s->len += ret; |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
diff --git a/mm/Makefile b/mm/Makefile index 3c1caa2693bd..15dbe9903c27 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ | |||
21 | mm_init.o mmu_context.o percpu.o slab_common.o \ | 21 | mm_init.o mmu_context.o percpu.o slab_common.o \ |
22 | compaction.o vmacache.o \ | 22 | compaction.o vmacache.o \ |
23 | interval_tree.o list_lru.o workingset.o \ | 23 | interval_tree.o list_lru.o workingset.o \ |
24 | iov_iter.o debug.o $(mmu-y) | 24 | debug.o $(mmu-y) |
25 | 25 | ||
26 | obj-y += init-mm.o | 26 | obj-y += init-mm.o |
27 | 27 | ||
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order) | |||
64 | return (1UL << (align_order - cma->order_per_bit)) - 1; | 64 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Find a PFN aligned to the specified order and return an offset represented in | ||
69 | * order_per_bits. | ||
70 | */ | ||
67 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) | 71 | static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order) |
68 | { | 72 | { |
69 | unsigned int alignment; | ||
70 | |||
71 | if (align_order <= cma->order_per_bit) | 73 | if (align_order <= cma->order_per_bit) |
72 | return 0; | 74 | return 0; |
73 | alignment = 1UL << (align_order - cma->order_per_bit); | 75 | |
74 | return ALIGN(cma->base_pfn, alignment) - | 76 | return (ALIGN(cma->base_pfn, (1UL << align_order)) |
75 | (cma->base_pfn >> cma->order_per_bit); | 77 | - cma->base_pfn) >> cma->order_per_bit; |
76 | } | 78 | } |
77 | 79 | ||
78 | static unsigned long cma_bitmap_maxno(struct cma *cma) | 80 | static unsigned long cma_bitmap_maxno(struct cma *cma) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fc00c8cb5a82..626e93db28ba 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1295,8 +1295,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1295 | * Avoid grouping on DSO/COW pages in specific and RO pages | 1295 | * Avoid grouping on DSO/COW pages in specific and RO pages |
1296 | * in general, RO pages shouldn't hurt as much anyway since | 1296 | * in general, RO pages shouldn't hurt as much anyway since |
1297 | * they can be in shared cache state. | 1297 | * they can be in shared cache state. |
1298 | * | ||
1299 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
1300 | * "is this a read-only page", since checking "pmd_write()" | ||
1301 | * is even more broken. We haven't actually turned this into | ||
1302 | * a writable page, so pmd_write() will always be false. | ||
1298 | */ | 1303 | */ |
1299 | if (!pmd_write(pmd)) | 1304 | if (!pmd_dirty(pmd)) |
1300 | flags |= TNF_NO_GROUP; | 1305 | flags |= TNF_NO_GROUP; |
1301 | 1306 | ||
1302 | /* | 1307 | /* |
@@ -1482,6 +1487,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1482 | 1487 | ||
1483 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { | 1488 | if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { |
1484 | pmd_t entry; | 1489 | pmd_t entry; |
1490 | ret = 1; | ||
1485 | 1491 | ||
1486 | /* | 1492 | /* |
1487 | * Avoid trapping faults against the zero page. The read-only | 1493 | * Avoid trapping faults against the zero page. The read-only |
@@ -1490,11 +1496,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
1490 | */ | 1496 | */ |
1491 | if (prot_numa && is_huge_zero_pmd(*pmd)) { | 1497 | if (prot_numa && is_huge_zero_pmd(*pmd)) { |
1492 | spin_unlock(ptl); | 1498 | spin_unlock(ptl); |
1493 | return 0; | 1499 | return ret; |
1494 | } | 1500 | } |
1495 | 1501 | ||
1496 | if (!prot_numa || !pmd_protnone(*pmd)) { | 1502 | if (!prot_numa || !pmd_protnone(*pmd)) { |
1497 | ret = 1; | ||
1498 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); | 1503 | entry = pmdp_get_and_clear_notify(mm, addr, pmd); |
1499 | entry = pmd_modify(entry, newprot); | 1504 | entry = pmd_modify(entry, newprot); |
1500 | ret = HPAGE_PMD_NR; | 1505 | ret = HPAGE_PMD_NR; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a9ac6c26832..c41b2a0ee273 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
917 | __SetPageHead(page); | 917 | __SetPageHead(page); |
918 | __ClearPageReserved(page); | 918 | __ClearPageReserved(page); |
919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 919 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
920 | __SetPageTail(p); | ||
921 | /* | 920 | /* |
922 | * For gigantic hugepages allocated through bootmem at | 921 | * For gigantic hugepages allocated through bootmem at |
923 | * boot, it's safer to be consistent with the not-gigantic | 922 | * boot, it's safer to be consistent with the not-gigantic |
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) | |||
933 | __ClearPageReserved(p); | 932 | __ClearPageReserved(p); |
934 | set_page_count(p, 0); | 933 | set_page_count(p, 0); |
935 | p->first_page = page; | 934 | p->first_page = page; |
935 | /* Make sure p->first_page is always valid for PageTail() */ | ||
936 | smp_wmb(); | ||
937 | __SetPageTail(p); | ||
936 | } | 938 | } |
937 | } | 939 | } |
938 | 940 | ||
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 78fee632a7ee..936d81661c47 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/stacktrace.h> | 29 | #include <linux/stacktrace.h> |
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/types.h> | 31 | #include <linux/types.h> |
32 | #include <linux/vmalloc.h> | ||
32 | #include <linux/kasan.h> | 33 | #include <linux/kasan.h> |
33 | 34 | ||
34 | #include "kasan.h" | 35 | #include "kasan.h" |
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size) | |||
414 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 415 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
415 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | 416 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
416 | __builtin_return_address(0)); | 417 | __builtin_return_address(0)); |
417 | return ret ? 0 : -ENOMEM; | 418 | |
419 | if (ret) { | ||
420 | find_vm_area(addr)->flags |= VM_KASAN; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | return -ENOMEM; | ||
418 | } | 425 | } |
419 | 426 | ||
420 | void kasan_module_free(void *addr) | 427 | void kasan_free_shadow(const struct vm_struct *vm) |
421 | { | 428 | { |
422 | vfree(kasan_mem_to_shadow(addr)); | 429 | if (vm->flags & VM_KASAN) |
430 | vfree(kasan_mem_to_shadow(vm->addr)); | ||
423 | } | 431 | } |
424 | 432 | ||
425 | static void register_global(struct kasan_global *global) | 433 | static void register_global(struct kasan_global *global) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9fe07692eaad..b34ef4a32a3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) | |||
5232 | * on for the root memcg is enough. | 5232 | * on for the root memcg is enough. |
5233 | */ | 5233 | */ |
5234 | if (cgroup_on_dfl(root_css->cgroup)) | 5234 | if (cgroup_on_dfl(root_css->cgroup)) |
5235 | mem_cgroup_from_css(root_css)->use_hierarchy = true; | 5235 | root_mem_cgroup->use_hierarchy = true; |
5236 | else | ||
5237 | root_mem_cgroup->use_hierarchy = false; | ||
5236 | } | 5238 | } |
5237 | 5239 | ||
5238 | static u64 memory_current_read(struct cgroup_subsys_state *css, | 5240 | static u64 memory_current_read(struct cgroup_subsys_state *css, |
diff --git a/mm/memory.c b/mm/memory.c index 8068893697bb..411144f977b1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3072,8 +3072,13 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3072 | * Avoid grouping on DSO/COW pages in specific and RO pages | 3072 | * Avoid grouping on DSO/COW pages in specific and RO pages |
3073 | * in general, RO pages shouldn't hurt as much anyway since | 3073 | * in general, RO pages shouldn't hurt as much anyway since |
3074 | * they can be in shared cache state. | 3074 | * they can be in shared cache state. |
3075 | * | ||
3076 | * FIXME! This checks "pmd_dirty()" as an approximation of | ||
3077 | * "is this a read-only page", since checking "pmd_write()" | ||
3078 | * is even more broken. We haven't actually turned this into | ||
3079 | * a writable page, so pmd_write() will always be false. | ||
3075 | */ | 3080 | */ |
3076 | if (!pte_write(pte)) | 3081 | if (!pte_dirty(pte)) |
3077 | flags |= TNF_NO_GROUP; | 3082 | flags |= TNF_NO_GROUP; |
3078 | 3083 | ||
3079 | /* | 3084 | /* |
diff --git a/mm/mlock.c b/mm/mlock.c index 73cf0987088c..8a54cd214925 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -26,10 +26,10 @@ | |||
26 | 26 | ||
27 | int can_do_mlock(void) | 27 | int can_do_mlock(void) |
28 | { | 28 | { |
29 | if (capable(CAP_IPC_LOCK)) | ||
30 | return 1; | ||
31 | if (rlimit(RLIMIT_MEMLOCK) != 0) | 29 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
32 | return 1; | 30 | return 1; |
31 | if (capable(CAP_IPC_LOCK)) | ||
32 | return 1; | ||
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
35 | EXPORT_SYMBOL(can_do_mlock); | 35 | EXPORT_SYMBOL(can_do_mlock); |
diff --git a/mm/nommu.c b/mm/nommu.c index 3e67e7538ecf..3fba2dc97c44 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -62,6 +62,7 @@ void *high_memory; | |||
62 | EXPORT_SYMBOL(high_memory); | 62 | EXPORT_SYMBOL(high_memory); |
63 | struct page *mem_map; | 63 | struct page *mem_map; |
64 | unsigned long max_mapnr; | 64 | unsigned long max_mapnr; |
65 | EXPORT_SYMBOL(max_mapnr); | ||
65 | unsigned long highest_memmap_pfn; | 66 | unsigned long highest_memmap_pfn; |
66 | struct percpu_counter vm_committed_as; | 67 | struct percpu_counter vm_committed_as; |
67 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 68 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7abfa70cdc1a..40e29429e7b0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2373 | goto out; | 2373 | goto out; |
2374 | } | 2374 | } |
2375 | /* Exhausted what can be done so it's blamo time */ | 2375 | /* Exhausted what can be done so it's blamo time */ |
2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)) | 2376 | if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) |
2377 | || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) | ||
2377 | *did_some_progress = 1; | 2378 | *did_some_progress = 1; |
2378 | out: | 2379 | out: |
2379 | oom_zonelist_unlock(ac->zonelist, gfp_mask); | 2380 | oom_zonelist_unlock(ac->zonelist, gfp_mask); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 35b25e1340ca..49abccf29a29 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1418 | spin_unlock(&vmap_area_lock); | 1418 | spin_unlock(&vmap_area_lock); |
1419 | 1419 | ||
1420 | vmap_debug_free_range(va->va_start, va->va_end); | 1420 | vmap_debug_free_range(va->va_start, va->va_end); |
1421 | kasan_free_shadow(vm); | ||
1421 | free_unmap_vmap_area(va); | 1422 | free_unmap_vmap_area(va); |
1422 | vm->size -= PAGE_SIZE; | 1423 | vm->size -= PAGE_SIZE; |
1423 | 1424 | ||
diff --git a/net/can/af_can.c b/net/can/af_can.c index 66e08040ced7..32d710eaf1fc 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -259,6 +259,9 @@ int can_send(struct sk_buff *skb, int loop) | |||
259 | goto inval_skb; | 259 | goto inval_skb; |
260 | } | 260 | } |
261 | 261 | ||
262 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
263 | |||
264 | skb_reset_mac_header(skb); | ||
262 | skb_reset_network_header(skb); | 265 | skb_reset_network_header(skb); |
263 | skb_reset_transport_header(skb); | 266 | skb_reset_transport_header(skb); |
264 | 267 | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 2c8d98e728c0..145a50c4d566 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag); | |||
659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) | 659 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
660 | { | 660 | { |
661 | struct iphdr iph; | 661 | struct iphdr iph; |
662 | int netoff; | ||
662 | u32 len; | 663 | u32 len; |
663 | 664 | ||
664 | if (skb->protocol != htons(ETH_P_IP)) | 665 | if (skb->protocol != htons(ETH_P_IP)) |
665 | return skb; | 666 | return skb; |
666 | 667 | ||
667 | if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0) | 668 | netoff = skb_network_offset(skb); |
669 | |||
670 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) | ||
668 | return skb; | 671 | return skb; |
669 | 672 | ||
670 | if (iph.ihl < 5 || iph.version != 4) | 673 | if (iph.ihl < 5 || iph.version != 4) |
671 | return skb; | 674 | return skb; |
672 | 675 | ||
673 | len = ntohs(iph.tot_len); | 676 | len = ntohs(iph.tot_len); |
674 | if (skb->len < len || len < (iph.ihl * 4)) | 677 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
675 | return skb; | 678 | return skb; |
676 | 679 | ||
677 | if (ip_is_fragment(&iph)) { | 680 | if (ip_is_fragment(&iph)) { |
678 | skb = skb_share_check(skb, GFP_ATOMIC); | 681 | skb = skb_share_check(skb, GFP_ATOMIC); |
679 | if (skb) { | 682 | if (skb) { |
680 | if (!pskb_may_pull(skb, iph.ihl*4)) | 683 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) |
681 | return skb; | 684 | return skb; |
682 | if (pskb_trim_rcsum(skb, len)) | 685 | if (pskb_trim_rcsum(skb, netoff + len)) |
683 | return skb; | 686 | return skb; |
684 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | 687 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
685 | if (ip_defrag(skb, user)) | 688 | if (ip_defrag(skb, user)) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 31d8c71986b4..5cd99271d3a6 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -432,17 +432,32 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
432 | kfree_skb(skb); | 432 | kfree_skb(skb); |
433 | } | 433 | } |
434 | 434 | ||
435 | static bool ipv4_pktinfo_prepare_errqueue(const struct sock *sk, | 435 | /* IPv4 supports cmsg on all imcp errors and some timestamps |
436 | const struct sk_buff *skb, | 436 | * |
437 | int ee_origin) | 437 | * Timestamp code paths do not initialize the fields expected by cmsg: |
438 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
439 | */ | ||
440 | static bool ipv4_datagram_support_cmsg(const struct sock *sk, | ||
441 | struct sk_buff *skb, | ||
442 | int ee_origin) | ||
438 | { | 443 | { |
439 | struct in_pktinfo *info = PKTINFO_SKB_CB(skb); | 444 | struct in_pktinfo *info; |
445 | |||
446 | if (ee_origin == SO_EE_ORIGIN_ICMP) | ||
447 | return true; | ||
440 | 448 | ||
441 | if ((ee_origin != SO_EE_ORIGIN_TIMESTAMPING) || | 449 | if (ee_origin == SO_EE_ORIGIN_LOCAL) |
442 | (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | 450 | return false; |
451 | |||
452 | /* Support IP_PKTINFO on tstamp packets if requested, to correlate | ||
453 | * timestamp with egress dev. Not possible for packets without dev | ||
454 | * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). | ||
455 | */ | ||
456 | if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || | ||
443 | (!skb->dev)) | 457 | (!skb->dev)) |
444 | return false; | 458 | return false; |
445 | 459 | ||
460 | info = PKTINFO_SKB_CB(skb); | ||
446 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; | 461 | info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; |
447 | info->ipi_ifindex = skb->dev->ifindex; | 462 | info->ipi_ifindex = skb->dev->ifindex; |
448 | return true; | 463 | return true; |
@@ -483,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
483 | 498 | ||
484 | serr = SKB_EXT_ERR(skb); | 499 | serr = SKB_EXT_ERR(skb); |
485 | 500 | ||
486 | if (sin && skb->len) { | 501 | if (sin && serr->port) { |
487 | sin->sin_family = AF_INET; | 502 | sin->sin_family = AF_INET; |
488 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + | 503 | sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + |
489 | serr->addr_offset); | 504 | serr->addr_offset); |
@@ -496,9 +511,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
496 | sin = &errhdr.offender; | 511 | sin = &errhdr.offender; |
497 | memset(sin, 0, sizeof(*sin)); | 512 | memset(sin, 0, sizeof(*sin)); |
498 | 513 | ||
499 | if (skb->len && | 514 | if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) { |
500 | (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || | ||
501 | ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin))) { | ||
502 | sin->sin_family = AF_INET; | 515 | sin->sin_family = AF_INET; |
503 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 516 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
504 | if (inet_sk(sk)->cmsg_flags) | 517 | if (inet_sk(sk)->cmsg_flags) |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e9f66e1cda50..208d5439e59b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk) | |||
259 | kgid_t low, high; | 259 | kgid_t low, high; |
260 | int ret = 0; | 260 | int ret = 0; |
261 | 261 | ||
262 | if (sk->sk_family == AF_INET6) | ||
263 | sk->sk_ipv6only = 1; | ||
264 | |||
262 | inet_get_ping_group_range_net(net, &low, &high); | 265 | inet_get_ping_group_range_net(net, &low, &high); |
263 | if (gid_lte(low, group) && gid_lte(group, high)) | 266 | if (gid_lte(low, group) && gid_lte(group, high)) |
264 | return 0; | 267 | return 0; |
@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
305 | if (addr_len < sizeof(*addr)) | 308 | if (addr_len < sizeof(*addr)) |
306 | return -EINVAL; | 309 | return -EINVAL; |
307 | 310 | ||
311 | if (addr->sin_family != AF_INET && | ||
312 | !(addr->sin_family == AF_UNSPEC && | ||
313 | addr->sin_addr.s_addr == htonl(INADDR_ANY))) | ||
314 | return -EAFNOSUPPORT; | ||
315 | |||
308 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", | 316 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", |
309 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); | 317 | sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); |
310 | 318 | ||
@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, | |||
330 | return -EINVAL; | 338 | return -EINVAL; |
331 | 339 | ||
332 | if (addr->sin6_family != AF_INET6) | 340 | if (addr->sin6_family != AF_INET6) |
333 | return -EINVAL; | 341 | return -EAFNOSUPPORT; |
334 | 342 | ||
335 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", | 343 | pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", |
336 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); | 344 | sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); |
@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
716 | if (msg->msg_namelen < sizeof(*usin)) | 724 | if (msg->msg_namelen < sizeof(*usin)) |
717 | return -EINVAL; | 725 | return -EINVAL; |
718 | if (usin->sin_family != AF_INET) | 726 | if (usin->sin_family != AF_INET) |
719 | return -EINVAL; | 727 | return -EAFNOSUPPORT; |
720 | daddr = usin->sin_addr.s_addr; | 728 | daddr = usin->sin_addr.s_addr; |
721 | /* no remote port */ | 729 | /* no remote port */ |
722 | } else { | 730 | } else { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9d72a0fcd928..995a2259bcfc 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -835,17 +835,13 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, | |||
835 | int large_allowed) | 835 | int large_allowed) |
836 | { | 836 | { |
837 | struct tcp_sock *tp = tcp_sk(sk); | 837 | struct tcp_sock *tp = tcp_sk(sk); |
838 | u32 new_size_goal, size_goal, hlen; | 838 | u32 new_size_goal, size_goal; |
839 | 839 | ||
840 | if (!large_allowed || !sk_can_gso(sk)) | 840 | if (!large_allowed || !sk_can_gso(sk)) |
841 | return mss_now; | 841 | return mss_now; |
842 | 842 | ||
843 | /* Maybe we should/could use sk->sk_prot->max_header here ? */ | 843 | /* Note : tcp_tso_autosize() will eventually split this later */ |
844 | hlen = inet_csk(sk)->icsk_af_ops->net_header_len + | 844 | new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; |
845 | inet_csk(sk)->icsk_ext_hdr_len + | ||
846 | tp->tcp_header_len; | ||
847 | |||
848 | new_size_goal = sk->sk_gso_max_size - 1 - hlen; | ||
849 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); | 845 | new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); |
850 | 846 | ||
851 | /* We try hard to avoid divides here */ | 847 | /* We try hard to avoid divides here */ |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index c215be70cac0..ace8daca5c83 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -325,14 +325,34 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) | |||
325 | kfree_skb(skb); | 325 | kfree_skb(skb); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void ip6_datagram_prepare_pktinfo_errqueue(struct sk_buff *skb) | 328 | /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL. |
329 | * | ||
330 | * At one point, excluding local errors was a quick test to identify icmp/icmp6 | ||
331 | * errors. This is no longer true, but the test remained, so the v6 stack, | ||
332 | * unlike v4, also honors cmsg requests on all wifi and timestamp errors. | ||
333 | * | ||
334 | * Timestamp code paths do not initialize the fields expected by cmsg: | ||
335 | * the PKTINFO fields in skb->cb[]. Fill those in here. | ||
336 | */ | ||
337 | static bool ip6_datagram_support_cmsg(struct sk_buff *skb, | ||
338 | struct sock_exterr_skb *serr) | ||
329 | { | 339 | { |
330 | int ifindex = skb->dev ? skb->dev->ifindex : -1; | 340 | if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || |
341 | serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) | ||
342 | return true; | ||
343 | |||
344 | if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) | ||
345 | return false; | ||
346 | |||
347 | if (!skb->dev) | ||
348 | return false; | ||
331 | 349 | ||
332 | if (skb->protocol == htons(ETH_P_IPV6)) | 350 | if (skb->protocol == htons(ETH_P_IPV6)) |
333 | IP6CB(skb)->iif = ifindex; | 351 | IP6CB(skb)->iif = skb->dev->ifindex; |
334 | else | 352 | else |
335 | PKTINFO_SKB_CB(skb)->ipi_ifindex = ifindex; | 353 | PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; |
354 | |||
355 | return true; | ||
336 | } | 356 | } |
337 | 357 | ||
338 | /* | 358 | /* |
@@ -369,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
369 | 389 | ||
370 | serr = SKB_EXT_ERR(skb); | 390 | serr = SKB_EXT_ERR(skb); |
371 | 391 | ||
372 | if (sin && skb->len) { | 392 | if (sin && serr->port) { |
373 | const unsigned char *nh = skb_network_header(skb); | 393 | const unsigned char *nh = skb_network_header(skb); |
374 | sin->sin6_family = AF_INET6; | 394 | sin->sin6_family = AF_INET6; |
375 | sin->sin6_flowinfo = 0; | 395 | sin->sin6_flowinfo = 0; |
@@ -394,14 +414,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) | |||
394 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 414 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
395 | sin = &errhdr.offender; | 415 | sin = &errhdr.offender; |
396 | memset(sin, 0, sizeof(*sin)); | 416 | memset(sin, 0, sizeof(*sin)); |
397 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL && skb->len) { | 417 | |
418 | if (ip6_datagram_support_cmsg(skb, serr)) { | ||
398 | sin->sin6_family = AF_INET6; | 419 | sin->sin6_family = AF_INET6; |
399 | if (np->rxopt.all) { | 420 | if (np->rxopt.all) |
400 | if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && | ||
401 | serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) | ||
402 | ip6_datagram_prepare_pktinfo_errqueue(skb); | ||
403 | ip6_datagram_recv_common_ctl(sk, msg, skb); | 421 | ip6_datagram_recv_common_ctl(sk, msg, skb); |
404 | } | ||
405 | if (skb->protocol == htons(ETH_P_IPV6)) { | 422 | if (skb->protocol == htons(ETH_P_IPV6)) { |
406 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 423 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
407 | if (np->rxopt.all) | 424 | if (np->rxopt.all) |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index bd46f736f61d..a2dfff6ff227 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
102 | 102 | ||
103 | if (msg->msg_name) { | 103 | if (msg->msg_name) { |
104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); | 104 | DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); |
105 | if (msg->msg_namelen < sizeof(struct sockaddr_in6) || | 105 | if (msg->msg_namelen < sizeof(*u)) |
106 | u->sin6_family != AF_INET6) { | ||
107 | return -EINVAL; | 106 | return -EINVAL; |
107 | if (u->sin6_family != AF_INET6) { | ||
108 | return -EAFNOSUPPORT; | ||
108 | } | 109 | } |
109 | if (sk->sk_bound_dev_if && | 110 | if (sk->sk_bound_dev_if && |
110 | sk->sk_bound_dev_if != u->sin6_scope_id) { | 111 | sk->sk_bound_dev_if != u->sin6_scope_id) { |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index c47ffd7a0a70..d93ceeb3ef04 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -896,6 +896,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, | |||
896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); | 896 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); |
897 | return; | 897 | return; |
898 | } | 898 | } |
899 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | ||
900 | kfree(param->pe_data); | ||
899 | } | 901 | } |
900 | 902 | ||
901 | if (opt) | 903 | if (opt) |
@@ -1169,6 +1171,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) | |||
1169 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) | 1171 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) |
1170 | ); | 1172 | ); |
1171 | #endif | 1173 | #endif |
1174 | ip_vs_pe_put(param.pe); | ||
1172 | return 0; | 1175 | return 0; |
1173 | /* Error exit */ | 1176 | /* Error exit */ |
1174 | out: | 1177 | out: |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 199fd0f27b0e..6ab777912237 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -227,7 +227,7 @@ nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) | |||
227 | 227 | ||
228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) | 228 | static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) |
229 | { | 229 | { |
230 | rule->genmask = 0; | 230 | rule->genmask &= ~(1 << gencursor_next(net)); |
231 | } | 231 | } |
232 | 232 | ||
233 | static int | 233 | static int |
@@ -1711,9 +1711,12 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, | |||
1711 | } | 1711 | } |
1712 | nla_nest_end(skb, list); | 1712 | nla_nest_end(skb, list); |
1713 | 1713 | ||
1714 | if (rule->ulen && | 1714 | if (rule->udata) { |
1715 | nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) | 1715 | struct nft_userdata *udata = nft_userdata(rule); |
1716 | goto nla_put_failure; | 1716 | if (nla_put(skb, NFTA_RULE_USERDATA, udata->len + 1, |
1717 | udata->data) < 0) | ||
1718 | goto nla_put_failure; | ||
1719 | } | ||
1717 | 1720 | ||
1718 | nlmsg_end(skb, nlh); | 1721 | nlmsg_end(skb, nlh); |
1719 | return 0; | 1722 | return 0; |
@@ -1896,11 +1899,12 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1896 | struct nft_table *table; | 1899 | struct nft_table *table; |
1897 | struct nft_chain *chain; | 1900 | struct nft_chain *chain; |
1898 | struct nft_rule *rule, *old_rule = NULL; | 1901 | struct nft_rule *rule, *old_rule = NULL; |
1902 | struct nft_userdata *udata; | ||
1899 | struct nft_trans *trans = NULL; | 1903 | struct nft_trans *trans = NULL; |
1900 | struct nft_expr *expr; | 1904 | struct nft_expr *expr; |
1901 | struct nft_ctx ctx; | 1905 | struct nft_ctx ctx; |
1902 | struct nlattr *tmp; | 1906 | struct nlattr *tmp; |
1903 | unsigned int size, i, n, ulen = 0; | 1907 | unsigned int size, i, n, ulen = 0, usize = 0; |
1904 | int err, rem; | 1908 | int err, rem; |
1905 | bool create; | 1909 | bool create; |
1906 | u64 handle, pos_handle; | 1910 | u64 handle, pos_handle; |
@@ -1968,12 +1972,19 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1968 | n++; | 1972 | n++; |
1969 | } | 1973 | } |
1970 | } | 1974 | } |
1975 | /* Check for overflow of dlen field */ | ||
1976 | err = -EFBIG; | ||
1977 | if (size >= 1 << 12) | ||
1978 | goto err1; | ||
1971 | 1979 | ||
1972 | if (nla[NFTA_RULE_USERDATA]) | 1980 | if (nla[NFTA_RULE_USERDATA]) { |
1973 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); | 1981 | ulen = nla_len(nla[NFTA_RULE_USERDATA]); |
1982 | if (ulen > 0) | ||
1983 | usize = sizeof(struct nft_userdata) + ulen; | ||
1984 | } | ||
1974 | 1985 | ||
1975 | err = -ENOMEM; | 1986 | err = -ENOMEM; |
1976 | rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL); | 1987 | rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL); |
1977 | if (rule == NULL) | 1988 | if (rule == NULL) |
1978 | goto err1; | 1989 | goto err1; |
1979 | 1990 | ||
@@ -1981,10 +1992,13 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
1981 | 1992 | ||
1982 | rule->handle = handle; | 1993 | rule->handle = handle; |
1983 | rule->dlen = size; | 1994 | rule->dlen = size; |
1984 | rule->ulen = ulen; | 1995 | rule->udata = ulen ? 1 : 0; |
1985 | 1996 | ||
1986 | if (ulen) | 1997 | if (ulen) { |
1987 | nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen); | 1998 | udata = nft_userdata(rule); |
1999 | udata->len = ulen - 1; | ||
2000 | nla_memcpy(udata->data, nla[NFTA_RULE_USERDATA], ulen); | ||
2001 | } | ||
1988 | 2002 | ||
1989 | expr = nft_expr_first(rule); | 2003 | expr = nft_expr_first(rule); |
1990 | for (i = 0; i < n; i++) { | 2004 | for (i = 0; i < n; i++) { |
@@ -2031,12 +2045,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
2031 | 2045 | ||
2032 | err3: | 2046 | err3: |
2033 | list_del_rcu(&rule->list); | 2047 | list_del_rcu(&rule->list); |
2034 | if (trans) { | ||
2035 | list_del_rcu(&nft_trans_rule(trans)->list); | ||
2036 | nft_rule_clear(net, nft_trans_rule(trans)); | ||
2037 | nft_trans_destroy(trans); | ||
2038 | chain->use++; | ||
2039 | } | ||
2040 | err2: | 2048 | err2: |
2041 | nf_tables_rule_destroy(&ctx, rule); | 2049 | nf_tables_rule_destroy(&ctx, rule); |
2042 | err1: | 2050 | err1: |
@@ -3612,12 +3620,11 @@ static int nf_tables_commit(struct sk_buff *skb) | |||
3612 | &te->elem, | 3620 | &te->elem, |
3613 | NFT_MSG_DELSETELEM, 0); | 3621 | NFT_MSG_DELSETELEM, 0); |
3614 | te->set->ops->get(te->set, &te->elem); | 3622 | te->set->ops->get(te->set, &te->elem); |
3615 | te->set->ops->remove(te->set, &te->elem); | ||
3616 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); | 3623 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3617 | if (te->elem.flags & NFT_SET_MAP) { | 3624 | if (te->set->flags & NFT_SET_MAP && |
3618 | nft_data_uninit(&te->elem.data, | 3625 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) |
3619 | te->set->dtype); | 3626 | nft_data_uninit(&te->elem.data, te->set->dtype); |
3620 | } | 3627 | te->set->ops->remove(te->set, &te->elem); |
3621 | nft_trans_destroy(trans); | 3628 | nft_trans_destroy(trans); |
3622 | break; | 3629 | break; |
3623 | } | 3630 | } |
@@ -3658,7 +3665,7 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3658 | { | 3665 | { |
3659 | struct net *net = sock_net(skb->sk); | 3666 | struct net *net = sock_net(skb->sk); |
3660 | struct nft_trans *trans, *next; | 3667 | struct nft_trans *trans, *next; |
3661 | struct nft_set *set; | 3668 | struct nft_trans_elem *te; |
3662 | 3669 | ||
3663 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 3670 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { |
3664 | switch (trans->msg_type) { | 3671 | switch (trans->msg_type) { |
@@ -3719,9 +3726,13 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
3719 | break; | 3726 | break; |
3720 | case NFT_MSG_NEWSETELEM: | 3727 | case NFT_MSG_NEWSETELEM: |
3721 | nft_trans_elem_set(trans)->nelems--; | 3728 | nft_trans_elem_set(trans)->nelems--; |
3722 | set = nft_trans_elem_set(trans); | 3729 | te = (struct nft_trans_elem *)trans->data; |
3723 | set->ops->get(set, &nft_trans_elem(trans)); | 3730 | te->set->ops->get(te->set, &te->elem); |
3724 | set->ops->remove(set, &nft_trans_elem(trans)); | 3731 | nft_data_uninit(&te->elem.key, NFT_DATA_VALUE); |
3732 | if (te->set->flags & NFT_SET_MAP && | ||
3733 | !(te->elem.flags & NFT_SET_ELEM_INTERVAL_END)) | ||
3734 | nft_data_uninit(&te->elem.data, te->set->dtype); | ||
3735 | te->set->ops->remove(te->set, &te->elem); | ||
3725 | nft_trans_destroy(trans); | 3736 | nft_trans_destroy(trans); |
3726 | break; | 3737 | break; |
3727 | case NFT_MSG_DELSETELEM: | 3738 | case NFT_MSG_DELSETELEM: |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 1279cd85663e..213584cf04b3 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -123,7 +123,7 @@ static void | |||
123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, | 123 | nft_target_set_tgchk_param(struct xt_tgchk_param *par, |
124 | const struct nft_ctx *ctx, | 124 | const struct nft_ctx *ctx, |
125 | struct xt_target *target, void *info, | 125 | struct xt_target *target, void *info, |
126 | union nft_entry *entry, u8 proto, bool inv) | 126 | union nft_entry *entry, u16 proto, bool inv) |
127 | { | 127 | { |
128 | par->net = ctx->net; | 128 | par->net = ctx->net; |
129 | par->table = ctx->table->name; | 129 | par->table = ctx->table->name; |
@@ -137,7 +137,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, | |||
137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 137 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
138 | break; | 138 | break; |
139 | case NFPROTO_BRIDGE: | 139 | case NFPROTO_BRIDGE: |
140 | entry->ebt.ethproto = proto; | 140 | entry->ebt.ethproto = (__force __be16)proto; |
141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 141 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
142 | break; | 142 | break; |
143 | } | 143 | } |
@@ -171,7 +171,7 @@ static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] | |||
171 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, | 171 | [NFTA_RULE_COMPAT_FLAGS] = { .type = NLA_U32 }, |
172 | }; | 172 | }; |
173 | 173 | ||
174 | static int nft_parse_compat(const struct nlattr *attr, u8 *proto, bool *inv) | 174 | static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) |
175 | { | 175 | { |
176 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; | 176 | struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; |
177 | u32 flags; | 177 | u32 flags; |
@@ -203,7 +203,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
203 | struct xt_target *target = expr->ops->data; | 203 | struct xt_target *target = expr->ops->data; |
204 | struct xt_tgchk_param par; | 204 | struct xt_tgchk_param par; |
205 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); | 205 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); |
206 | u8 proto = 0; | 206 | u16 proto = 0; |
207 | bool inv = false; | 207 | bool inv = false; |
208 | union nft_entry e = {}; | 208 | union nft_entry e = {}; |
209 | int ret; | 209 | int ret; |
@@ -334,7 +334,7 @@ static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { | |||
334 | static void | 334 | static void |
335 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | 335 | nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, |
336 | struct xt_match *match, void *info, | 336 | struct xt_match *match, void *info, |
337 | union nft_entry *entry, u8 proto, bool inv) | 337 | union nft_entry *entry, u16 proto, bool inv) |
338 | { | 338 | { |
339 | par->net = ctx->net; | 339 | par->net = ctx->net; |
340 | par->table = ctx->table->name; | 340 | par->table = ctx->table->name; |
@@ -348,7 +348,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, | |||
348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; | 348 | entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; |
349 | break; | 349 | break; |
350 | case NFPROTO_BRIDGE: | 350 | case NFPROTO_BRIDGE: |
351 | entry->ebt.ethproto = proto; | 351 | entry->ebt.ethproto = (__force __be16)proto; |
352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; | 352 | entry->ebt.invflags = inv ? EBT_IPROTO : 0; |
353 | break; | 353 | break; |
354 | } | 354 | } |
@@ -385,7 +385,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
385 | struct xt_match *match = expr->ops->data; | 385 | struct xt_match *match = expr->ops->data; |
386 | struct xt_mtchk_param par; | 386 | struct xt_mtchk_param par; |
387 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); | 387 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); |
388 | u8 proto = 0; | 388 | u16 proto = 0; |
389 | bool inv = false; | 389 | bool inv = false; |
390 | union nft_entry e = {}; | 390 | union nft_entry e = {}; |
391 | int ret; | 391 | int ret; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5bf1e968a728..f8db7064d81c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -3123,11 +3123,18 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, | |||
3123 | return 0; | 3123 | return 0; |
3124 | } | 3124 | } |
3125 | 3125 | ||
3126 | static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what) | 3126 | static void packet_dev_mclist_delete(struct net_device *dev, |
3127 | struct packet_mclist **mlp) | ||
3127 | { | 3128 | { |
3128 | for ( ; i; i = i->next) { | 3129 | struct packet_mclist *ml; |
3129 | if (i->ifindex == dev->ifindex) | 3130 | |
3130 | packet_dev_mc(dev, i, what); | 3131 | while ((ml = *mlp) != NULL) { |
3132 | if (ml->ifindex == dev->ifindex) { | ||
3133 | packet_dev_mc(dev, ml, -1); | ||
3134 | *mlp = ml->next; | ||
3135 | kfree(ml); | ||
3136 | } else | ||
3137 | mlp = &ml->next; | ||
3131 | } | 3138 | } |
3132 | } | 3139 | } |
3133 | 3140 | ||
@@ -3204,12 +3211,11 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) | |||
3204 | packet_dev_mc(dev, ml, -1); | 3211 | packet_dev_mc(dev, ml, -1); |
3205 | kfree(ml); | 3212 | kfree(ml); |
3206 | } | 3213 | } |
3207 | rtnl_unlock(); | 3214 | break; |
3208 | return 0; | ||
3209 | } | 3215 | } |
3210 | } | 3216 | } |
3211 | rtnl_unlock(); | 3217 | rtnl_unlock(); |
3212 | return -EADDRNOTAVAIL; | 3218 | return 0; |
3213 | } | 3219 | } |
3214 | 3220 | ||
3215 | static void packet_flush_mclist(struct sock *sk) | 3221 | static void packet_flush_mclist(struct sock *sk) |
@@ -3559,7 +3565,7 @@ static int packet_notifier(struct notifier_block *this, | |||
3559 | switch (msg) { | 3565 | switch (msg) { |
3560 | case NETDEV_UNREGISTER: | 3566 | case NETDEV_UNREGISTER: |
3561 | if (po->mclist) | 3567 | if (po->mclist) |
3562 | packet_dev_mclist(dev, po->mclist, -1); | 3568 | packet_dev_mclist_delete(dev, &po->mclist); |
3563 | /* fallthrough */ | 3569 | /* fallthrough */ |
3564 | 3570 | ||
3565 | case NETDEV_DOWN: | 3571 | case NETDEV_DOWN: |
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index 5394b6be46ec..0610efa83d72 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c | |||
@@ -42,7 +42,8 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
42 | _leave("UDP socket errqueue empty"); | 42 | _leave("UDP socket errqueue empty"); |
43 | return; | 43 | return; |
44 | } | 44 | } |
45 | if (!skb->len) { | 45 | serr = SKB_EXT_ERR(skb); |
46 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { | ||
46 | _leave("UDP empty message"); | 47 | _leave("UDP empty message"); |
47 | kfree_skb(skb); | 48 | kfree_skb(skb); |
48 | return; | 49 | return; |
@@ -50,7 +51,6 @@ void rxrpc_UDP_error_report(struct sock *sk) | |||
50 | 51 | ||
51 | rxrpc_new_skb(skb); | 52 | rxrpc_new_skb(skb); |
52 | 53 | ||
53 | serr = SKB_EXT_ERR(skb); | ||
54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); | 54 | addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); |
55 | port = serr->port; | 55 | port = serr->port; |
56 | 56 | ||
diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..14f09b3cb87c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -464,10 +464,11 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
464 | /* Clean up all queues, except inputq: */ | 464 | /* Clean up all queues, except inputq: */ |
465 | __skb_queue_purge(&l_ptr->outqueue); | 465 | __skb_queue_purge(&l_ptr->outqueue); |
466 | __skb_queue_purge(&l_ptr->deferred_queue); | 466 | __skb_queue_purge(&l_ptr->deferred_queue); |
467 | skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); | 467 | if (!owner->inputq) |
468 | if (!skb_queue_empty(&l_ptr->inputq)) | 468 | owner->inputq = &l_ptr->inputq; |
469 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
470 | if (!skb_queue_empty(owner->inputq)) | ||
469 | owner->action_flags |= TIPC_MSG_EVT; | 471 | owner->action_flags |= TIPC_MSG_EVT; |
470 | owner->inputq = &l_ptr->inputq; | ||
471 | l_ptr->next_out = NULL; | 472 | l_ptr->next_out = NULL; |
472 | l_ptr->unacked_window = 0; | 473 | l_ptr->unacked_window = 0; |
473 | l_ptr->checkpoint = 1; | 474 | l_ptr->checkpoint = 1; |
diff --git a/sound/core/control.c b/sound/core/control.c index 35324a8e83c8..eeb691d1911f 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -1170,6 +1170,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, | |||
1170 | 1170 | ||
1171 | if (info->count < 1) | 1171 | if (info->count < 1) |
1172 | return -EINVAL; | 1172 | return -EINVAL; |
1173 | if (!*info->id.name) | ||
1174 | return -EINVAL; | ||
1175 | if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name)) | ||
1176 | return -EINVAL; | ||
1173 | access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : | 1177 | access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : |
1174 | (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| | 1178 | (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| |
1175 | SNDRV_CTL_ELEM_ACCESS_INACTIVE| | 1179 | SNDRV_CTL_ELEM_ACCESS_INACTIVE| |
diff --git a/sound/firewire/dice/dice-interface.h b/sound/firewire/dice/dice-interface.h index de7602bd69b5..27b044f84c81 100644 --- a/sound/firewire/dice/dice-interface.h +++ b/sound/firewire/dice/dice-interface.h | |||
@@ -299,23 +299,23 @@ | |||
299 | #define RX_ISOCHRONOUS 0x008 | 299 | #define RX_ISOCHRONOUS 0x008 |
300 | 300 | ||
301 | /* | 301 | /* |
302 | * Index of first quadlet to be interpreted; read/write. If > 0, that many | ||
303 | * quadlets at the beginning of each data block will be ignored, and all the | ||
304 | * audio and MIDI quadlets will follow. | ||
305 | */ | ||
306 | #define RX_SEQ_START 0x00c | ||
307 | |||
308 | /* | ||
302 | * The number of audio channels; read-only. There will be one quadlet per | 309 | * The number of audio channels; read-only. There will be one quadlet per |
303 | * channel. | 310 | * channel. |
304 | */ | 311 | */ |
305 | #define RX_NUMBER_AUDIO 0x00c | 312 | #define RX_NUMBER_AUDIO 0x010 |
306 | 313 | ||
307 | /* | 314 | /* |
308 | * The number of MIDI ports, 0-8; read-only. If > 0, there will be one | 315 | * The number of MIDI ports, 0-8; read-only. If > 0, there will be one |
309 | * additional quadlet in each data block, following the audio quadlets. | 316 | * additional quadlet in each data block, following the audio quadlets. |
310 | */ | 317 | */ |
311 | #define RX_NUMBER_MIDI 0x010 | 318 | #define RX_NUMBER_MIDI 0x014 |
312 | |||
313 | /* | ||
314 | * Index of first quadlet to be interpreted; read/write. If > 0, that many | ||
315 | * quadlets at the beginning of each data block will be ignored, and all the | ||
316 | * audio and MIDI quadlets will follow. | ||
317 | */ | ||
318 | #define RX_SEQ_START 0x014 | ||
319 | 319 | ||
320 | /* | 320 | /* |
321 | * Names of all audio channels; read-only. Quadlets are byte-swapped. Names | 321 | * Names of all audio channels; read-only. Quadlets are byte-swapped. Names |
diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c index ecfe20fd4de5..f5c1d1bced59 100644 --- a/sound/firewire/dice/dice-proc.c +++ b/sound/firewire/dice/dice-proc.c | |||
@@ -99,9 +99,9 @@ static void dice_proc_read(struct snd_info_entry *entry, | |||
99 | } tx; | 99 | } tx; |
100 | struct { | 100 | struct { |
101 | u32 iso; | 101 | u32 iso; |
102 | u32 seq_start; | ||
102 | u32 number_audio; | 103 | u32 number_audio; |
103 | u32 number_midi; | 104 | u32 number_midi; |
104 | u32 seq_start; | ||
105 | char names[RX_NAMES_SIZE]; | 105 | char names[RX_NAMES_SIZE]; |
106 | u32 ac3_caps; | 106 | u32 ac3_caps; |
107 | u32 ac3_enable; | 107 | u32 ac3_enable; |
@@ -204,10 +204,10 @@ static void dice_proc_read(struct snd_info_entry *entry, | |||
204 | break; | 204 | break; |
205 | snd_iprintf(buffer, "rx %u:\n", stream); | 205 | snd_iprintf(buffer, "rx %u:\n", stream); |
206 | snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); | 206 | snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); |
207 | snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); | ||
207 | snd_iprintf(buffer, " audio channels: %u\n", | 208 | snd_iprintf(buffer, " audio channels: %u\n", |
208 | buf.rx.number_audio); | 209 | buf.rx.number_audio); |
209 | snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); | 210 | snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); |
210 | snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); | ||
211 | if (quadlets >= 68) { | 211 | if (quadlets >= 68) { |
212 | dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); | 212 | dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); |
213 | snd_iprintf(buffer, " names: %s\n", buf.rx.names); | 213 | snd_iprintf(buffer, " names: %s\n", buf.rx.names); |
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index 5f17b77ee152..f0e4d502d604 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c | |||
@@ -26,7 +26,7 @@ | |||
26 | int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) | 26 | int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit) |
27 | { | 27 | { |
28 | r->channels_mask = ~0uLL; | 28 | r->channels_mask = ~0uLL; |
29 | r->unit = fw_unit_get(unit); | 29 | r->unit = unit; |
30 | mutex_init(&r->mutex); | 30 | mutex_init(&r->mutex); |
31 | r->allocated = false; | 31 | r->allocated = false; |
32 | 32 | ||
@@ -42,7 +42,6 @@ void fw_iso_resources_destroy(struct fw_iso_resources *r) | |||
42 | { | 42 | { |
43 | WARN_ON(r->allocated); | 43 | WARN_ON(r->allocated); |
44 | mutex_destroy(&r->mutex); | 44 | mutex_destroy(&r->mutex); |
45 | fw_unit_put(r->unit); | ||
46 | } | 45 | } |
47 | EXPORT_SYMBOL(fw_iso_resources_destroy); | 46 | EXPORT_SYMBOL(fw_iso_resources_destroy); |
48 | 47 | ||
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index a2ce773bdc62..17c2637d842c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c | |||
@@ -1164,7 +1164,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, | |||
1164 | } | 1164 | } |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | if (!bus->no_response_fallback) | 1167 | if (bus->no_response_fallback) |
1168 | return -1; | 1168 | return -1; |
1169 | 1169 | ||
1170 | if (!chip->polling_mode && chip->poll_count < 2) { | 1170 | if (!chip->polling_mode && chip->poll_count < 2) { |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b680b4ec6331..fe18071bf93a 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -692,7 +692,23 @@ static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx) | |||
692 | { | 692 | { |
693 | unsigned int caps = query_amp_caps(codec, nid, dir); | 693 | unsigned int caps = query_amp_caps(codec, nid, dir); |
694 | int val = get_amp_val_to_activate(codec, nid, dir, caps, false); | 694 | int val = get_amp_val_to_activate(codec, nid, dir, caps, false); |
695 | snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); | 695 | |
696 | if (get_wcaps(codec, nid) & AC_WCAP_STEREO) | ||
697 | snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val); | ||
698 | else | ||
699 | snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val); | ||
700 | } | ||
701 | |||
702 | /* update the amp, doing in stereo or mono depending on NID */ | ||
703 | static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, | ||
704 | unsigned int mask, unsigned int val) | ||
705 | { | ||
706 | if (get_wcaps(codec, nid) & AC_WCAP_STEREO) | ||
707 | return snd_hda_codec_amp_stereo(codec, nid, dir, idx, | ||
708 | mask, val); | ||
709 | else | ||
710 | return snd_hda_codec_amp_update(codec, nid, 0, dir, idx, | ||
711 | mask, val); | ||
696 | } | 712 | } |
697 | 713 | ||
698 | /* calculate amp value mask we can modify; | 714 | /* calculate amp value mask we can modify; |
@@ -732,7 +748,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir, | |||
732 | return; | 748 | return; |
733 | 749 | ||
734 | val &= mask; | 750 | val &= mask; |
735 | snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val); | 751 | update_amp(codec, nid, dir, idx, mask, val); |
736 | } | 752 | } |
737 | 753 | ||
738 | static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, | 754 | static void activate_amp_out(struct hda_codec *codec, struct nid_path *path, |
@@ -4424,13 +4440,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix) | |||
4424 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); | 4440 | has_amp = nid_has_mute(codec, mix, HDA_INPUT); |
4425 | for (i = 0; i < nums; i++) { | 4441 | for (i = 0; i < nums; i++) { |
4426 | if (has_amp) | 4442 | if (has_amp) |
4427 | snd_hda_codec_amp_stereo(codec, mix, | 4443 | update_amp(codec, mix, HDA_INPUT, i, |
4428 | HDA_INPUT, i, | 4444 | 0xff, HDA_AMP_MUTE); |
4429 | 0xff, HDA_AMP_MUTE); | ||
4430 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) | 4445 | else if (nid_has_volume(codec, conn[i], HDA_OUTPUT)) |
4431 | snd_hda_codec_amp_stereo(codec, conn[i], | 4446 | update_amp(codec, conn[i], HDA_OUTPUT, 0, |
4432 | HDA_OUTPUT, 0, | 4447 | 0xff, HDA_AMP_MUTE); |
4433 | 0xff, HDA_AMP_MUTE); | ||
4434 | } | 4448 | } |
4435 | } | 4449 | } |
4436 | 4450 | ||
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 1589c9bcce3e..dd2b3d92071f 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -393,6 +393,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { | |||
393 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), | 393 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), |
394 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), | 394 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), |
395 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), | 395 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), |
396 | SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81), | ||
396 | SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), | 397 | SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42), |
397 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), | 398 | SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE), |
398 | {} /* terminator */ | 399 | {} /* terminator */ |
@@ -584,6 +585,7 @@ static int patch_cs420x(struct hda_codec *codec) | |||
584 | return -ENOMEM; | 585 | return -ENOMEM; |
585 | 586 | ||
586 | spec->gen.automute_hook = cs_automute; | 587 | spec->gen.automute_hook = cs_automute; |
588 | codec->single_adc_amp = 1; | ||
587 | 589 | ||
588 | snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, | 590 | snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl, |
589 | cs420x_fixups); | 591 | cs420x_fixups); |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fd3ed18670e9..da67ea8645a6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -223,6 +223,7 @@ enum { | |||
223 | CXT_PINCFG_LENOVO_TP410, | 223 | CXT_PINCFG_LENOVO_TP410, |
224 | CXT_PINCFG_LEMOTE_A1004, | 224 | CXT_PINCFG_LEMOTE_A1004, |
225 | CXT_PINCFG_LEMOTE_A1205, | 225 | CXT_PINCFG_LEMOTE_A1205, |
226 | CXT_PINCFG_COMPAQ_CQ60, | ||
226 | CXT_FIXUP_STEREO_DMIC, | 227 | CXT_FIXUP_STEREO_DMIC, |
227 | CXT_FIXUP_INC_MIC_BOOST, | 228 | CXT_FIXUP_INC_MIC_BOOST, |
228 | CXT_FIXUP_HEADPHONE_MIC_PIN, | 229 | CXT_FIXUP_HEADPHONE_MIC_PIN, |
@@ -660,6 +661,15 @@ static const struct hda_fixup cxt_fixups[] = { | |||
660 | .type = HDA_FIXUP_PINS, | 661 | .type = HDA_FIXUP_PINS, |
661 | .v.pins = cxt_pincfg_lemote, | 662 | .v.pins = cxt_pincfg_lemote, |
662 | }, | 663 | }, |
664 | [CXT_PINCFG_COMPAQ_CQ60] = { | ||
665 | .type = HDA_FIXUP_PINS, | ||
666 | .v.pins = (const struct hda_pintbl[]) { | ||
667 | /* 0x17 was falsely set up as a mic, it should 0x1d */ | ||
668 | { 0x17, 0x400001f0 }, | ||
669 | { 0x1d, 0x97a70120 }, | ||
670 | { } | ||
671 | } | ||
672 | }, | ||
663 | [CXT_FIXUP_STEREO_DMIC] = { | 673 | [CXT_FIXUP_STEREO_DMIC] = { |
664 | .type = HDA_FIXUP_FUNC, | 674 | .type = HDA_FIXUP_FUNC, |
665 | .v.func = cxt_fixup_stereo_dmic, | 675 | .v.func = cxt_fixup_stereo_dmic, |
@@ -769,6 +779,7 @@ static const struct hda_model_fixup cxt5047_fixup_models[] = { | |||
769 | }; | 779 | }; |
770 | 780 | ||
771 | static const struct snd_pci_quirk cxt5051_fixups[] = { | 781 | static const struct snd_pci_quirk cxt5051_fixups[] = { |
782 | SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60), | ||
772 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), | 783 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), |
773 | {} | 784 | {} |
774 | }; | 785 | }; |
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 75870c0ea2c9..91eb3aef7f02 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c | |||
@@ -1049,7 +1049,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, | |||
1049 | enum spdif_txrate index, bool round) | 1049 | enum spdif_txrate index, bool round) |
1050 | { | 1050 | { |
1051 | const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; | 1051 | const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; |
1052 | bool is_sysclk = clk == spdif_priv->sysclk; | 1052 | bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); |
1053 | u64 rate_ideal, rate_actual, sub; | 1053 | u64 rate_ideal, rate_actual, sub; |
1054 | u32 sysclk_dfmin, sysclk_dfmax; | 1054 | u32 sysclk_dfmin, sysclk_dfmax; |
1055 | u32 txclk_df, sysclk_df, arate; | 1055 | u32 txclk_df, sysclk_df, arate; |
@@ -1143,7 +1143,7 @@ static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, | |||
1143 | spdif_priv->txclk_src[index], rate[index]); | 1143 | spdif_priv->txclk_src[index], rate[index]); |
1144 | dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", | 1144 | dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", |
1145 | spdif_priv->txclk_df[index], rate[index]); | 1145 | spdif_priv->txclk_df[index], rate[index]); |
1146 | if (spdif_priv->txclk[index] == spdif_priv->sysclk) | 1146 | if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk)) |
1147 | dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", | 1147 | dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", |
1148 | spdif_priv->sysclk_df[index], rate[index]); | 1148 | spdif_priv->sysclk_df[index], rate[index]); |
1149 | dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", | 1149 | dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", |
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c index def7d8260c4e..d19483081f9b 100644 --- a/sound/soc/kirkwood/kirkwood-i2s.c +++ b/sound/soc/kirkwood/kirkwood-i2s.c | |||
@@ -579,7 +579,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev) | |||
579 | if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) | 579 | if (PTR_ERR(priv->extclk) == -EPROBE_DEFER) |
580 | return -EPROBE_DEFER; | 580 | return -EPROBE_DEFER; |
581 | } else { | 581 | } else { |
582 | if (priv->extclk == priv->clk) { | 582 | if (clk_is_match(priv->extclk, priv->clk)) { |
583 | devm_clk_put(&pdev->dev, priv->extclk); | 583 | devm_clk_put(&pdev->dev, priv->extclk); |
584 | priv->extclk = ERR_PTR(-EINVAL); | 584 | priv->extclk = ERR_PTR(-EINVAL); |
585 | } else { | 585 | } else { |
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 67d476548dcf..07f984d5f516 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -1773,6 +1773,36 @@ YAMAHA_DEVICE(0x7010, "UB99"), | |||
1773 | } | 1773 | } |
1774 | } | 1774 | } |
1775 | }, | 1775 | }, |
1776 | { | ||
1777 | USB_DEVICE(0x0582, 0x0159), | ||
1778 | .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { | ||
1779 | /* .vendor_name = "Roland", */ | ||
1780 | /* .product_name = "UA-22", */ | ||
1781 | .ifnum = QUIRK_ANY_INTERFACE, | ||
1782 | .type = QUIRK_COMPOSITE, | ||
1783 | .data = (const struct snd_usb_audio_quirk[]) { | ||
1784 | { | ||
1785 | .ifnum = 0, | ||
1786 | .type = QUIRK_AUDIO_STANDARD_INTERFACE | ||
1787 | }, | ||
1788 | { | ||
1789 | .ifnum = 1, | ||
1790 | .type = QUIRK_AUDIO_STANDARD_INTERFACE | ||
1791 | }, | ||
1792 | { | ||
1793 | .ifnum = 2, | ||
1794 | .type = QUIRK_MIDI_FIXED_ENDPOINT, | ||
1795 | .data = & (const struct snd_usb_midi_endpoint_info) { | ||
1796 | .out_cables = 0x0001, | ||
1797 | .in_cables = 0x0001 | ||
1798 | } | ||
1799 | }, | ||
1800 | { | ||
1801 | .ifnum = -1 | ||
1802 | } | ||
1803 | } | ||
1804 | } | ||
1805 | }, | ||
1776 | /* this catches most recent vendor-specific Roland devices */ | 1806 | /* this catches most recent vendor-specific Roland devices */ |
1777 | { | 1807 | { |
1778 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | | 1808 | .match_flags = USB_DEVICE_ID_MATCH_VENDOR | |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 3ed7c0476d48..2e2ba2efa0d9 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c | |||
209 | 209 | ||
210 | $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) | 210 | $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ) |
211 | $(ECHO) " CC " $@ | 211 | $(ECHO) " CC " $@ |
212 | $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@ | 212 | $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@ |
213 | $(QUIET) $(STRIPCMD) $@ | 213 | $(QUIET) $(STRIPCMD) $@ |
214 | 214 | ||
215 | $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) | 215 | $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC) |
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index e238c9559caf..8d5d1d2ee7c1 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c | |||
@@ -30,7 +30,7 @@ static int execveat_(int fd, const char *path, char **argv, char **envp, | |||
30 | #ifdef __NR_execveat | 30 | #ifdef __NR_execveat |
31 | return syscall(__NR_execveat, fd, path, argv, envp, flags); | 31 | return syscall(__NR_execveat, fd, path, argv, envp, flags); |
32 | #else | 32 | #else |
33 | errno = -ENOSYS; | 33 | errno = ENOSYS; |
34 | return -1; | 34 | return -1; |
35 | #endif | 35 | #endif |
36 | } | 36 | } |
@@ -234,6 +234,14 @@ static int run_tests(void) | |||
234 | int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); | 234 | int fd_cloexec = open_or_die("execveat", O_RDONLY|O_CLOEXEC); |
235 | int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); | 235 | int fd_script_cloexec = open_or_die("script", O_RDONLY|O_CLOEXEC); |
236 | 236 | ||
237 | /* Check if we have execveat at all, and bail early if not */ | ||
238 | errno = 0; | ||
239 | execveat_(-1, NULL, NULL, NULL, 0); | ||
240 | if (errno == ENOSYS) { | ||
241 | printf("[FAIL] ENOSYS calling execveat - no kernel support?\n"); | ||
242 | return 1; | ||
243 | } | ||
244 | |||
237 | /* Change file position to confirm it doesn't affect anything */ | 245 | /* Change file position to confirm it doesn't affect anything */ |
238 | lseek(fd, 10, SEEK_SET); | 246 | lseek(fd, 10, SEEK_SET); |
239 | 247 | ||