diff options
262 files changed, 4682 insertions, 2842 deletions
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt index 437e0db3823c..4c26fda3844a 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-st.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt | |||
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 { | |||
31 | compatible = "st,comms-ssc4-i2c"; | 31 | compatible = "st,comms-ssc4-i2c"; |
32 | reg = <0xfed40000 0x110>; | 32 | reg = <0xfed40000 0x110>; |
33 | interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; | 33 | interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; |
34 | clocks = <&CLK_S_ICN_REG_0>; | 34 | clocks = <&clk_s_a0_ls CLK_ICN_REG>; |
35 | clock-names = "ssc"; | 35 | clock-names = "ssc"; |
36 | clock-frequency = <400000>; | 36 | clock-frequency = <400000>; |
37 | pinctrl-names = "default"; | 37 | pinctrl-names = "default"; |
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index 9f4e3824e71e..9f41d05be3be 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt | |||
@@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM | |||
47 | dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O | 47 | dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O |
48 | dallas,ds75 Digital Thermometer and Thermostat | 48 | dallas,ds75 Digital Thermometer and Thermostat |
49 | dlg,da9053 DA9053: flexible system level PMIC with multicore support | 49 | dlg,da9053 DA9053: flexible system level PMIC with multicore support |
50 | dlg,da9063 DA9063: system PMIC for quad-core application processors | ||
50 | epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE | 51 | epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE |
51 | epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE | 52 | epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE |
52 | fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer | 53 | fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer |
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt new file mode 100644 index 000000000000..cd29083e16ec --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt | |||
@@ -0,0 +1,41 @@ | |||
1 | * Renesas VMSA-Compatible IOMMU | ||
2 | |||
3 | The IPMMU is an IOMMU implementation compatible with the ARM VMSA page tables. | ||
4 | It provides address translation for bus masters outside of the CPU, each | ||
5 | connected to the IPMMU through a port called micro-TLB. | ||
6 | |||
7 | |||
8 | Required Properties: | ||
9 | |||
10 | - compatible: Must contain "renesas,ipmmu-vmsa". | ||
11 | - reg: Base address and size of the IPMMU registers. | ||
12 | - interrupts: Specifiers for the MMU fault interrupts. For instances that | ||
13 | support secure mode two interrupts must be specified, for non-secure and | ||
14 | secure mode, in that order. For instances that don't support secure mode a | ||
15 | single interrupt must be specified. | ||
16 | |||
17 | - #iommu-cells: Must be 1. | ||
18 | |||
19 | Each bus master connected to an IPMMU must reference the IPMMU in its device | ||
20 | node with the following property: | ||
21 | |||
22 | - iommus: A reference to the IPMMU in two cells. The first cell is a phandle | ||
23 | to the IPMMU and the second cell the number of the micro-TLB that the | ||
24 | device is connected to. | ||
25 | |||
26 | |||
27 | Example: R8A7791 IPMMU-MX and VSP1-D0 bus master | ||
28 | |||
29 | ipmmu_mx: mmu@fe951000 { | ||
30 | compatible = "renasas,ipmmu-vmsa"; | ||
31 | reg = <0 0xfe951000 0 0x1000>; | ||
32 | interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>, | ||
33 | <0 221 IRQ_TYPE_LEVEL_HIGH>; | ||
34 | #iommu-cells = <1>; | ||
35 | }; | ||
36 | |||
37 | vsp1@fe928000 { | ||
38 | ... | ||
39 | iommus = <&ipmmu_mx 13>; | ||
40 | ... | ||
41 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 2ebb056cbe0a..2821eaadcdf3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -708,6 +708,16 @@ X: drivers/iio/*/adjd* | |||
708 | F: drivers/staging/iio/*/ad* | 708 | F: drivers/staging/iio/*/ad* |
709 | F: staging/iio/trigger/iio-trig-bfin-timer.c | 709 | F: staging/iio/trigger/iio-trig-bfin-timer.c |
710 | 710 | ||
711 | ANDROID DRIVERS | ||
712 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | ||
713 | M: Arve Hjønnevåg <arve@android.com> | ||
714 | M: Riley Andrews <riandrews@android.com> | ||
715 | T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git | ||
716 | L: devel@driverdev.osuosl.org | ||
717 | S: Supported | ||
718 | F: drivers/android/ | ||
719 | F: drivers/staging/android/ | ||
720 | |||
711 | AOA (Apple Onboard Audio) ALSA DRIVER | 721 | AOA (Apple Onboard Audio) ALSA DRIVER |
712 | M: Johannes Berg <johannes@sipsolutions.net> | 722 | M: Johannes Berg <johannes@sipsolutions.net> |
713 | L: linuxppc-dev@lists.ozlabs.org | 723 | L: linuxppc-dev@lists.ozlabs.org |
@@ -1582,6 +1592,7 @@ M: Will Deacon <will.deacon@arm.com> | |||
1582 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1592 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1583 | S: Maintained | 1593 | S: Maintained |
1584 | F: drivers/iommu/arm-smmu.c | 1594 | F: drivers/iommu/arm-smmu.c |
1595 | F: drivers/iommu/io-pgtable-arm.c | ||
1585 | 1596 | ||
1586 | ARM64 PORT (AARCH64 ARCHITECTURE) | 1597 | ARM64 PORT (AARCH64 ARCHITECTURE) |
1587 | M: Catalin Marinas <catalin.marinas@arm.com> | 1598 | M: Catalin Marinas <catalin.marinas@arm.com> |
@@ -10166,6 +10177,7 @@ USERSPACE I/O (UIO) | |||
10166 | M: "Hans J. Koch" <hjk@hansjkoch.de> | 10177 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
10167 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 10178 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
10168 | S: Maintained | 10179 | S: Maintained |
10180 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git | ||
10169 | F: Documentation/DocBook/uio-howto.tmpl | 10181 | F: Documentation/DocBook/uio-howto.tmpl |
10170 | F: drivers/uio/ | 10182 | F: drivers/uio/ |
10171 | F: include/linux/uio*.h | 10183 | F: include/linux/uio*.h |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 19 | 2 | PATCHLEVEL = 19 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Diseased Newt | 5 | NAME = Diseased Newt |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 98838a05ba6d..9d0ac091a52a 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -156,6 +156,8 @@ retry: | |||
156 | if (unlikely(fault & VM_FAULT_ERROR)) { | 156 | if (unlikely(fault & VM_FAULT_ERROR)) { |
157 | if (fault & VM_FAULT_OOM) | 157 | if (fault & VM_FAULT_OOM) |
158 | goto out_of_memory; | 158 | goto out_of_memory; |
159 | else if (fault & VM_FAULT_SIGSEGV) | ||
160 | goto bad_area; | ||
159 | else if (fault & VM_FAULT_SIGBUS) | 161 | else if (fault & VM_FAULT_SIGBUS) |
160 | goto do_sigbus; | 162 | goto do_sigbus; |
161 | BUG(); | 163 | BUG(); |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 6f7e3a68803a..563cb27e37f5 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -161,6 +161,8 @@ good_area: | |||
161 | 161 | ||
162 | if (fault & VM_FAULT_OOM) | 162 | if (fault & VM_FAULT_OOM) |
163 | goto out_of_memory; | 163 | goto out_of_memory; |
164 | else if (fault & VM_FAULT_SIGSEGV) | ||
165 | goto bad_area; | ||
164 | else if (fault & VM_FAULT_SIGBUS) | 166 | else if (fault & VM_FAULT_SIGBUS) |
165 | goto do_sigbus; | 167 | goto do_sigbus; |
166 | 168 | ||
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 8c1febd7e3f2..c108bb451337 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts | |||
@@ -166,12 +166,12 @@ | |||
166 | #address-cells = <1>; | 166 | #address-cells = <1>; |
167 | #size-cells = <0>; | 167 | #size-cells = <0>; |
168 | 168 | ||
169 | ethphy1: ethernet-phy@0 { | 169 | ethphy1: ethernet-phy@1 { |
170 | reg = <0>; | 170 | reg = <1>; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | ethphy2: ethernet-phy@1 { | 173 | ethphy2: ethernet-phy@2 { |
174 | reg = <1>; | 174 | reg = <2>; |
175 | }; | 175 | }; |
176 | }; | 176 | }; |
177 | }; | 177 | }; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 7b4099fcf817..d5c4669224b1 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -17,14 +17,6 @@ | |||
17 | 17 | ||
18 | aliases { | 18 | aliases { |
19 | ethernet0 = &emac; | 19 | ethernet0 = &emac; |
20 | serial0 = &uart0; | ||
21 | serial1 = &uart1; | ||
22 | serial2 = &uart2; | ||
23 | serial3 = &uart3; | ||
24 | serial4 = &uart4; | ||
25 | serial5 = &uart5; | ||
26 | serial6 = &uart6; | ||
27 | serial7 = &uart7; | ||
28 | }; | 20 | }; |
29 | 21 | ||
30 | chosen { | 22 | chosen { |
@@ -39,6 +31,14 @@ | |||
39 | <&ahb_gates 44>; | 31 | <&ahb_gates 44>; |
40 | status = "disabled"; | 32 | status = "disabled"; |
41 | }; | 33 | }; |
34 | |||
35 | framebuffer@1 { | ||
36 | compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; | ||
37 | allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; | ||
38 | clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>, | ||
39 | <&ahb_gates 44>, <&ahb_gates 46>; | ||
40 | status = "disabled"; | ||
41 | }; | ||
42 | }; | 42 | }; |
43 | 43 | ||
44 | cpus { | 44 | cpus { |
@@ -438,8 +438,8 @@ | |||
438 | reg-names = "phy_ctrl", "pmu1", "pmu2"; | 438 | reg-names = "phy_ctrl", "pmu1", "pmu2"; |
439 | clocks = <&usb_clk 8>; | 439 | clocks = <&usb_clk 8>; |
440 | clock-names = "usb_phy"; | 440 | clock-names = "usb_phy"; |
441 | resets = <&usb_clk 1>, <&usb_clk 2>; | 441 | resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>; |
442 | reset-names = "usb1_reset", "usb2_reset"; | 442 | reset-names = "usb0_reset", "usb1_reset", "usb2_reset"; |
443 | status = "disabled"; | 443 | status = "disabled"; |
444 | }; | 444 | }; |
445 | 445 | ||
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts index fe3c559ca6a8..bfa742817690 100644 --- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts | |||
@@ -55,6 +55,12 @@ | |||
55 | model = "Olimex A10s-Olinuxino Micro"; | 55 | model = "Olimex A10s-Olinuxino Micro"; |
56 | compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; | 56 | compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &uart0; | ||
60 | serial1 = &uart2; | ||
61 | serial2 = &uart3; | ||
62 | }; | ||
63 | |||
58 | soc@01c00000 { | 64 | soc@01c00000 { |
59 | emac: ethernet@01c0b000 { | 65 | emac: ethernet@01c0b000 { |
60 | pinctrl-names = "default"; | 66 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 1b76667f3182..2e7d8263799d 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi | |||
@@ -18,10 +18,6 @@ | |||
18 | 18 | ||
19 | aliases { | 19 | aliases { |
20 | ethernet0 = &emac; | 20 | ethernet0 = &emac; |
21 | serial0 = &uart0; | ||
22 | serial1 = &uart1; | ||
23 | serial2 = &uart2; | ||
24 | serial3 = &uart3; | ||
25 | }; | 21 | }; |
26 | 22 | ||
27 | chosen { | 23 | chosen { |
@@ -390,8 +386,8 @@ | |||
390 | reg-names = "phy_ctrl", "pmu1"; | 386 | reg-names = "phy_ctrl", "pmu1"; |
391 | clocks = <&usb_clk 8>; | 387 | clocks = <&usb_clk 8>; |
392 | clock-names = "usb_phy"; | 388 | clock-names = "usb_phy"; |
393 | resets = <&usb_clk 1>; | 389 | resets = <&usb_clk 0>, <&usb_clk 1>; |
394 | reset-names = "usb1_reset"; | 390 | reset-names = "usb0_reset", "usb1_reset"; |
395 | status = "disabled"; | 391 | status = "disabled"; |
396 | }; | 392 | }; |
397 | 393 | ||
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts index eeed1f236ee8..c7be3abd9fcc 100644 --- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts +++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts | |||
@@ -53,6 +53,10 @@ | |||
53 | model = "HSG H702"; | 53 | model = "HSG H702"; |
54 | compatible = "hsg,h702", "allwinner,sun5i-a13"; | 54 | compatible = "hsg,h702", "allwinner,sun5i-a13"; |
55 | 55 | ||
56 | aliases { | ||
57 | serial0 = &uart1; | ||
58 | }; | ||
59 | |||
56 | soc@01c00000 { | 60 | soc@01c00000 { |
57 | mmc0: mmc@01c0f000 { | 61 | mmc0: mmc@01c0f000 { |
58 | pinctrl-names = "default"; | 62 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts index 916ee8bb826f..3decefb3c37a 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts | |||
@@ -54,6 +54,10 @@ | |||
54 | model = "Olimex A13-Olinuxino Micro"; | 54 | model = "Olimex A13-Olinuxino Micro"; |
55 | compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; | 55 | compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; |
56 | 56 | ||
57 | aliases { | ||
58 | serial0 = &uart1; | ||
59 | }; | ||
60 | |||
57 | soc@01c00000 { | 61 | soc@01c00000 { |
58 | mmc0: mmc@01c0f000 { | 62 | mmc0: mmc@01c0f000 { |
59 | pinctrl-names = "default"; | 63 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts index e31d291d14cb..b421f7fa197b 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts | |||
@@ -55,6 +55,10 @@ | |||
55 | model = "Olimex A13-Olinuxino"; | 55 | model = "Olimex A13-Olinuxino"; |
56 | compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; | 56 | compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &uart1; | ||
60 | }; | ||
61 | |||
58 | soc@01c00000 { | 62 | soc@01c00000 { |
59 | mmc0: mmc@01c0f000 { | 63 | mmc0: mmc@01c0f000 { |
60 | pinctrl-names = "default"; | 64 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index c35217ea1f64..c556688f8b8b 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -16,11 +16,6 @@ | |||
16 | / { | 16 | / { |
17 | interrupt-parent = <&intc>; | 17 | interrupt-parent = <&intc>; |
18 | 18 | ||
19 | aliases { | ||
20 | serial0 = &uart1; | ||
21 | serial1 = &uart3; | ||
22 | }; | ||
23 | |||
24 | cpus { | 19 | cpus { |
25 | #address-cells = <1>; | 20 | #address-cells = <1>; |
26 | #size-cells = <0>; | 21 | #size-cells = <0>; |
@@ -349,8 +344,8 @@ | |||
349 | reg-names = "phy_ctrl", "pmu1"; | 344 | reg-names = "phy_ctrl", "pmu1"; |
350 | clocks = <&usb_clk 8>; | 345 | clocks = <&usb_clk 8>; |
351 | clock-names = "usb_phy"; | 346 | clock-names = "usb_phy"; |
352 | resets = <&usb_clk 1>; | 347 | resets = <&usb_clk 0>, <&usb_clk 1>; |
353 | reset-names = "usb1_reset"; | 348 | reset-names = "usb0_reset", "usb1_reset"; |
354 | status = "disabled"; | 349 | status = "disabled"; |
355 | }; | 350 | }; |
356 | 351 | ||
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index f47156b6572b..1e7e7bcf8307 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
@@ -53,12 +53,6 @@ | |||
53 | interrupt-parent = <&gic>; | 53 | interrupt-parent = <&gic>; |
54 | 54 | ||
55 | aliases { | 55 | aliases { |
56 | serial0 = &uart0; | ||
57 | serial1 = &uart1; | ||
58 | serial2 = &uart2; | ||
59 | serial3 = &uart3; | ||
60 | serial4 = &uart4; | ||
61 | serial5 = &uart5; | ||
62 | ethernet0 = &gmac; | 56 | ethernet0 = &gmac; |
63 | }; | 57 | }; |
64 | 58 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts index 1cf1214cc068..bd7b15add697 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts | |||
@@ -55,6 +55,12 @@ | |||
55 | model = "LeMaker Banana Pi"; | 55 | model = "LeMaker Banana Pi"; |
56 | compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; | 56 | compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &uart0; | ||
60 | serial1 = &uart3; | ||
61 | serial2 = &uart7; | ||
62 | }; | ||
63 | |||
58 | soc@01c00000 { | 64 | soc@01c00000 { |
59 | spi0: spi@01c05000 { | 65 | spi0: spi@01c05000 { |
60 | pinctrl-names = "default"; | 66 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts index 0e4bfa3b2b85..0bcefcbbb756 100644 --- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts +++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts | |||
@@ -19,6 +19,14 @@ | |||
19 | model = "Merrii A20 Hummingbird"; | 19 | model = "Merrii A20 Hummingbird"; |
20 | compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; | 20 | compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; |
21 | 21 | ||
22 | aliases { | ||
23 | serial0 = &uart0; | ||
24 | serial1 = &uart2; | ||
25 | serial2 = &uart3; | ||
26 | serial3 = &uart4; | ||
27 | serial4 = &uart5; | ||
28 | }; | ||
29 | |||
22 | soc@01c00000 { | 30 | soc@01c00000 { |
23 | mmc0: mmc@01c0f000 { | 31 | mmc0: mmc@01c0f000 { |
24 | pinctrl-names = "default"; | 32 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts index 9d669cdf031d..66cc77707198 100644 --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts | |||
@@ -20,6 +20,9 @@ | |||
20 | compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; | 20 | compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; |
21 | 21 | ||
22 | aliases { | 22 | aliases { |
23 | serial0 = &uart0; | ||
24 | serial1 = &uart6; | ||
25 | serial2 = &uart7; | ||
23 | spi0 = &spi1; | 26 | spi0 = &spi1; |
24 | spi1 = &spi2; | 27 | spi1 = &spi2; |
25 | }; | 28 | }; |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e21ce5992d56..89749ce34a84 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -54,14 +54,6 @@ | |||
54 | 54 | ||
55 | aliases { | 55 | aliases { |
56 | ethernet0 = &gmac; | 56 | ethernet0 = &gmac; |
57 | serial0 = &uart0; | ||
58 | serial1 = &uart1; | ||
59 | serial2 = &uart2; | ||
60 | serial3 = &uart3; | ||
61 | serial4 = &uart4; | ||
62 | serial5 = &uart5; | ||
63 | serial6 = &uart6; | ||
64 | serial7 = &uart7; | ||
65 | }; | 57 | }; |
66 | 58 | ||
67 | chosen { | 59 | chosen { |
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts index 7f2117ce6985..32ad80804dbb 100644 --- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts +++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts | |||
@@ -55,6 +55,10 @@ | |||
55 | model = "Ippo Q8H Dual Core Tablet (v5)"; | 55 | model = "Ippo Q8H Dual Core Tablet (v5)"; |
56 | compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; | 56 | compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &r_uart; | ||
60 | }; | ||
61 | |||
58 | chosen { | 62 | chosen { |
59 | bootargs = "earlyprintk console=ttyS0,115200"; | 63 | bootargs = "earlyprintk console=ttyS0,115200"; |
60 | }; | 64 | }; |
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 0746cd1024d7..86584fcf5e32 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi | |||
@@ -52,15 +52,6 @@ | |||
52 | / { | 52 | / { |
53 | interrupt-parent = <&gic>; | 53 | interrupt-parent = <&gic>; |
54 | 54 | ||
55 | aliases { | ||
56 | serial0 = &uart0; | ||
57 | serial1 = &uart1; | ||
58 | serial2 = &uart2; | ||
59 | serial3 = &uart3; | ||
60 | serial4 = &uart4; | ||
61 | serial5 = &r_uart; | ||
62 | }; | ||
63 | |||
64 | cpus { | 55 | cpus { |
65 | #address-cells = <1>; | 56 | #address-cells = <1>; |
66 | #size-cells = <0>; | 57 | #size-cells = <0>; |
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts index 506948f582ee..11ec71072e81 100644 --- a/arch/arm/boot/dts/sun9i-a80-optimus.dts +++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts | |||
@@ -54,6 +54,11 @@ | |||
54 | model = "Merrii A80 Optimus Board"; | 54 | model = "Merrii A80 Optimus Board"; |
55 | compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; | 55 | compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; |
56 | 56 | ||
57 | aliases { | ||
58 | serial0 = &uart0; | ||
59 | serial1 = &uart4; | ||
60 | }; | ||
61 | |||
57 | chosen { | 62 | chosen { |
58 | bootargs = "earlyprintk console=ttyS0,115200"; | 63 | bootargs = "earlyprintk console=ttyS0,115200"; |
59 | }; | 64 | }; |
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index 494714f67b57..9ef4438206a9 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi | |||
@@ -52,16 +52,6 @@ | |||
52 | / { | 52 | / { |
53 | interrupt-parent = <&gic>; | 53 | interrupt-parent = <&gic>; |
54 | 54 | ||
55 | aliases { | ||
56 | serial0 = &uart0; | ||
57 | serial1 = &uart1; | ||
58 | serial2 = &uart2; | ||
59 | serial3 = &uart3; | ||
60 | serial4 = &uart4; | ||
61 | serial5 = &uart5; | ||
62 | serial6 = &r_uart; | ||
63 | }; | ||
64 | |||
65 | cpus { | 55 | cpus { |
66 | #address-cells = <1>; | 56 | #address-cells = <1>; |
67 | #size-cells = <0>; | 57 | #size-cells = <0>; |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 66ce17655bb9..7b0152321b20 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | |||
38 | vcpu->arch.hcr = HCR_GUEST_MASK; | 38 | vcpu->arch.hcr = HCR_GUEST_MASK; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return vcpu->arch.hcr; | ||
44 | } | ||
45 | |||
46 | static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | ||
47 | { | ||
48 | vcpu->arch.hcr = hcr; | ||
49 | } | ||
50 | |||
41 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | 51 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) |
42 | { | 52 | { |
43 | return 1; | 53 | return 1; |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 254e0650e48b..04b4ea0b550a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch { | |||
125 | * Anything that is not used directly from assembly code goes | 125 | * Anything that is not used directly from assembly code goes |
126 | * here. | 126 | * here. |
127 | */ | 127 | */ |
128 | /* dcache set/way operation pending */ | ||
129 | int last_pcpu; | ||
130 | cpumask_t require_dcache_flush; | ||
131 | 128 | ||
132 | /* Don't run the guest on this vcpu */ | 129 | /* Don't run the guest on this vcpu */ |
133 | bool pause; | 130 | bool pause; |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 63e0ecc04901..1bca8f8af442 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | #ifndef __ASSEMBLY__ | 45 | #ifndef __ASSEMBLY__ |
46 | 46 | ||
47 | #include <linux/highmem.h> | ||
47 | #include <asm/cacheflush.h> | 48 | #include <asm/cacheflush.h> |
48 | #include <asm/pgalloc.h> | 49 | #include <asm/pgalloc.h> |
49 | 50 | ||
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | |||
161 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; | 162 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; |
162 | } | 163 | } |
163 | 164 | ||
164 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 165 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, |
165 | unsigned long size, | 166 | unsigned long size, |
166 | bool ipa_uncached) | 167 | bool ipa_uncached) |
167 | { | 168 | { |
168 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) | ||
169 | kvm_flush_dcache_to_poc((void *)hva, size); | ||
170 | |||
171 | /* | 169 | /* |
172 | * If we are going to insert an instruction page and the icache is | 170 | * If we are going to insert an instruction page and the icache is |
173 | * either VIPT or PIPT, there is a potential problem where the host | 171 | * either VIPT or PIPT, there is a potential problem where the host |
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | |||
179 | * | 177 | * |
180 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | 178 | * VIVT caches are tagged using both the ASID and the VMID and doesn't |
181 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | 179 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). |
180 | * | ||
181 | * We need to do this through a kernel mapping (using the | ||
182 | * user-space mapping has proved to be the wrong | ||
183 | * solution). For that, we need to kmap one page at a time, | ||
184 | * and iterate over the range. | ||
182 | */ | 185 | */ |
183 | if (icache_is_pipt()) { | 186 | |
184 | __cpuc_coherent_user_range(hva, hva + size); | 187 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; |
185 | } else if (!icache_is_vivt_asid_tagged()) { | 188 | |
189 | VM_BUG_ON(size & PAGE_MASK); | ||
190 | |||
191 | if (!need_flush && !icache_is_pipt()) | ||
192 | goto vipt_cache; | ||
193 | |||
194 | while (size) { | ||
195 | void *va = kmap_atomic_pfn(pfn); | ||
196 | |||
197 | if (need_flush) | ||
198 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
199 | |||
200 | if (icache_is_pipt()) | ||
201 | __cpuc_coherent_user_range((unsigned long)va, | ||
202 | (unsigned long)va + PAGE_SIZE); | ||
203 | |||
204 | size -= PAGE_SIZE; | ||
205 | pfn++; | ||
206 | |||
207 | kunmap_atomic(va); | ||
208 | } | ||
209 | |||
210 | vipt_cache: | ||
211 | if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { | ||
186 | /* any kind of VIPT cache */ | 212 | /* any kind of VIPT cache */ |
187 | __flush_icache_all(); | 213 | __flush_icache_all(); |
188 | } | 214 | } |
189 | } | 215 | } |
190 | 216 | ||
217 | static inline void __kvm_flush_dcache_pte(pte_t pte) | ||
218 | { | ||
219 | void *va = kmap_atomic(pte_page(pte)); | ||
220 | |||
221 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
222 | |||
223 | kunmap_atomic(va); | ||
224 | } | ||
225 | |||
226 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) | ||
227 | { | ||
228 | unsigned long size = PMD_SIZE; | ||
229 | pfn_t pfn = pmd_pfn(pmd); | ||
230 | |||
231 | while (size) { | ||
232 | void *va = kmap_atomic_pfn(pfn); | ||
233 | |||
234 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
235 | |||
236 | pfn++; | ||
237 | size -= PAGE_SIZE; | ||
238 | |||
239 | kunmap_atomic(va); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static inline void __kvm_flush_dcache_pud(pud_t pud) | ||
244 | { | ||
245 | } | ||
246 | |||
191 | #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) | 247 | #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) |
192 | 248 | ||
193 | void stage2_flush_vm(struct kvm *kvm); | 249 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
250 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | ||
194 | 251 | ||
195 | #endif /* !__ASSEMBLY__ */ | 252 | #endif /* !__ASSEMBLY__ */ |
196 | 253 | ||
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 2d6d91001062..0b0d58a905c4 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
281 | vcpu->cpu = cpu; | 281 | vcpu->cpu = cpu; |
282 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 282 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); |
283 | 283 | ||
284 | /* | ||
285 | * Check whether this vcpu requires the cache to be flushed on | ||
286 | * this physical CPU. This is a consequence of doing dcache | ||
287 | * operations by set/way on this vcpu. We do it here to be in | ||
288 | * a non-preemptible section. | ||
289 | */ | ||
290 | if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) | ||
291 | flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ | ||
292 | |||
293 | kvm_arm_set_running_vcpu(vcpu); | 284 | kvm_arm_set_running_vcpu(vcpu); |
294 | } | 285 | } |
295 | 286 | ||
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
541 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); | 532 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); |
542 | 533 | ||
543 | vcpu->mode = OUTSIDE_GUEST_MODE; | 534 | vcpu->mode = OUTSIDE_GUEST_MODE; |
544 | vcpu->arch.last_pcpu = smp_processor_id(); | ||
545 | kvm_guest_exit(); | 535 | kvm_guest_exit(); |
546 | trace_kvm_exit(*vcpu_pc(vcpu)); | 536 | trace_kvm_exit(*vcpu_pc(vcpu)); |
547 | /* | 537 | /* |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 7928dbdf2102..f3d88dc388bc 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, | |||
189 | return true; | 189 | return true; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* See note at ARM ARM B1.14.4 */ | 192 | /* |
193 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | ||
194 | */ | ||
193 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 195 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
194 | const struct coproc_params *p, | 196 | const struct coproc_params *p, |
195 | const struct coproc_reg *r) | 197 | const struct coproc_reg *r) |
196 | { | 198 | { |
197 | unsigned long val; | ||
198 | int cpu; | ||
199 | |||
200 | if (!p->is_write) | 199 | if (!p->is_write) |
201 | return read_from_write_only(vcpu, p); | 200 | return read_from_write_only(vcpu, p); |
202 | 201 | ||
203 | cpu = get_cpu(); | 202 | kvm_set_way_flush(vcpu); |
204 | |||
205 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
206 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
207 | |||
208 | /* If we were already preempted, take the long way around */ | ||
209 | if (cpu != vcpu->arch.last_pcpu) { | ||
210 | flush_cache_all(); | ||
211 | goto done; | ||
212 | } | ||
213 | |||
214 | val = *vcpu_reg(vcpu, p->Rt1); | ||
215 | |||
216 | switch (p->CRm) { | ||
217 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
218 | case 14: /* DCCISW */ | ||
219 | asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); | ||
220 | break; | ||
221 | |||
222 | case 10: /* DCCSW */ | ||
223 | asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); | ||
224 | break; | ||
225 | } | ||
226 | |||
227 | done: | ||
228 | put_cpu(); | ||
229 | |||
230 | return true; | 203 | return true; |
231 | } | 204 | } |
232 | 205 | ||
233 | /* | 206 | /* |
234 | * Generic accessor for VM registers. Only called as long as HCR_TVM | 207 | * Generic accessor for VM registers. Only called as long as HCR_TVM |
235 | * is set. | 208 | * is set. If the guest enables the MMU, we stop trapping the VM |
209 | * sys_regs and leave it in complete control of the caches. | ||
210 | * | ||
211 | * Used by the cpu-specific code. | ||
236 | */ | 212 | */ |
237 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 213 | bool access_vm_reg(struct kvm_vcpu *vcpu, |
238 | const struct coproc_params *p, | 214 | const struct coproc_params *p, |
239 | const struct coproc_reg *r) | 215 | const struct coproc_reg *r) |
240 | { | 216 | { |
217 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | ||
218 | |||
241 | BUG_ON(!p->is_write); | 219 | BUG_ON(!p->is_write); |
242 | 220 | ||
243 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); | 221 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); |
244 | if (p->is_64bit) | 222 | if (p->is_64bit) |
245 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); | 223 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); |
246 | 224 | ||
247 | return true; | 225 | kvm_toggle_cache(vcpu, was_enabled); |
248 | } | ||
249 | |||
250 | /* | ||
251 | * SCTLR accessor. Only called as long as HCR_TVM is set. If the | ||
252 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | ||
253 | * it in complete control of the caches. | ||
254 | * | ||
255 | * Used by the cpu-specific code. | ||
256 | */ | ||
257 | bool access_sctlr(struct kvm_vcpu *vcpu, | ||
258 | const struct coproc_params *p, | ||
259 | const struct coproc_reg *r) | ||
260 | { | ||
261 | access_vm_reg(vcpu, p, r); | ||
262 | |||
263 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ | ||
264 | vcpu->arch.hcr &= ~HCR_TVM; | ||
265 | stage2_flush_vm(vcpu->kvm); | ||
266 | } | ||
267 | |||
268 | return true; | 226 | return true; |
269 | } | 227 | } |
270 | 228 | ||
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 1a44bbe39643..88d24a3a9778 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
153 | #define is64 .is_64 = true | 153 | #define is64 .is_64 = true |
154 | #define is32 .is_64 = false | 154 | #define is32 .is_64 = false |
155 | 155 | ||
156 | bool access_sctlr(struct kvm_vcpu *vcpu, | 156 | bool access_vm_reg(struct kvm_vcpu *vcpu, |
157 | const struct coproc_params *p, | 157 | const struct coproc_params *p, |
158 | const struct coproc_reg *r); | 158 | const struct coproc_reg *r); |
159 | 159 | ||
160 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ | 160 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ |
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index e6f4ae48bda9..a7136757d373 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static const struct coproc_reg a15_regs[] = { | 34 | static const struct coproc_reg a15_regs[] = { |
35 | /* SCTLR: swapped by interrupt.S. */ | 35 | /* SCTLR: swapped by interrupt.S. */ |
36 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | 36 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, |
37 | access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, | 37 | access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 }, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct kvm_coproc_target_table a15_target_table = { | 40 | static struct kvm_coproc_target_table a15_target_table = { |
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c index 17fc7cd479d3..b19e46d1b2c0 100644 --- a/arch/arm/kvm/coproc_a7.c +++ b/arch/arm/kvm/coproc_a7.c | |||
@@ -37,7 +37,7 @@ | |||
37 | static const struct coproc_reg a7_regs[] = { | 37 | static const struct coproc_reg a7_regs[] = { |
38 | /* SCTLR: swapped by interrupt.S. */ | 38 | /* SCTLR: swapped by interrupt.S. */ |
39 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | 39 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, |
40 | access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, | 40 | access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 }, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct kvm_coproc_target_table a7_target_table = { | 43 | static struct kvm_coproc_target_table a7_target_table = { |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 1dc9778a00af..136662547ca6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | |||
58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | 58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* | ||
62 | * D-Cache management functions. They take the page table entries by | ||
63 | * value, as they are flushing the cache using the kernel mapping (or | ||
64 | * kmap on 32bit). | ||
65 | */ | ||
66 | static void kvm_flush_dcache_pte(pte_t pte) | ||
67 | { | ||
68 | __kvm_flush_dcache_pte(pte); | ||
69 | } | ||
70 | |||
71 | static void kvm_flush_dcache_pmd(pmd_t pmd) | ||
72 | { | ||
73 | __kvm_flush_dcache_pmd(pmd); | ||
74 | } | ||
75 | |||
76 | static void kvm_flush_dcache_pud(pud_t pud) | ||
77 | { | ||
78 | __kvm_flush_dcache_pud(pud); | ||
79 | } | ||
80 | |||
61 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 81 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
62 | int min, int max) | 82 | int min, int max) |
63 | { | 83 | { |
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | |||
119 | put_page(virt_to_page(pmd)); | 139 | put_page(virt_to_page(pmd)); |
120 | } | 140 | } |
121 | 141 | ||
142 | /* | ||
143 | * Unmapping vs dcache management: | ||
144 | * | ||
145 | * If a guest maps certain memory pages as uncached, all writes will | ||
146 | * bypass the data cache and go directly to RAM. However, the CPUs | ||
147 | * can still speculate reads (not writes) and fill cache lines with | ||
148 | * data. | ||
149 | * | ||
150 | * Those cache lines will be *clean* cache lines though, so a | ||
151 | * clean+invalidate operation is equivalent to an invalidate | ||
152 | * operation, because no cache lines are marked dirty. | ||
153 | * | ||
154 | * Those clean cache lines could be filled prior to an uncached write | ||
155 | * by the guest, and the cache coherent IO subsystem would therefore | ||
156 | * end up writing old data to disk. | ||
157 | * | ||
158 | * This is why right after unmapping a page/section and invalidating | ||
159 | * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure | ||
160 | * the IO subsystem will never hit in the cache. | ||
161 | */ | ||
122 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | 162 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, |
123 | phys_addr_t addr, phys_addr_t end) | 163 | phys_addr_t addr, phys_addr_t end) |
124 | { | 164 | { |
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | |||
128 | start_pte = pte = pte_offset_kernel(pmd, addr); | 168 | start_pte = pte = pte_offset_kernel(pmd, addr); |
129 | do { | 169 | do { |
130 | if (!pte_none(*pte)) { | 170 | if (!pte_none(*pte)) { |
171 | pte_t old_pte = *pte; | ||
172 | |||
131 | kvm_set_pte(pte, __pte(0)); | 173 | kvm_set_pte(pte, __pte(0)); |
132 | put_page(virt_to_page(pte)); | ||
133 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 174 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
175 | |||
176 | /* No need to invalidate the cache for device mappings */ | ||
177 | if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | ||
178 | kvm_flush_dcache_pte(old_pte); | ||
179 | |||
180 | put_page(virt_to_page(pte)); | ||
134 | } | 181 | } |
135 | } while (pte++, addr += PAGE_SIZE, addr != end); | 182 | } while (pte++, addr += PAGE_SIZE, addr != end); |
136 | 183 | ||
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud, | |||
149 | next = kvm_pmd_addr_end(addr, end); | 196 | next = kvm_pmd_addr_end(addr, end); |
150 | if (!pmd_none(*pmd)) { | 197 | if (!pmd_none(*pmd)) { |
151 | if (kvm_pmd_huge(*pmd)) { | 198 | if (kvm_pmd_huge(*pmd)) { |
199 | pmd_t old_pmd = *pmd; | ||
200 | |||
152 | pmd_clear(pmd); | 201 | pmd_clear(pmd); |
153 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 202 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
203 | |||
204 | kvm_flush_dcache_pmd(old_pmd); | ||
205 | |||
154 | put_page(virt_to_page(pmd)); | 206 | put_page(virt_to_page(pmd)); |
155 | } else { | 207 | } else { |
156 | unmap_ptes(kvm, pmd, addr, next); | 208 | unmap_ptes(kvm, pmd, addr, next); |
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd, | |||
173 | next = kvm_pud_addr_end(addr, end); | 225 | next = kvm_pud_addr_end(addr, end); |
174 | if (!pud_none(*pud)) { | 226 | if (!pud_none(*pud)) { |
175 | if (pud_huge(*pud)) { | 227 | if (pud_huge(*pud)) { |
228 | pud_t old_pud = *pud; | ||
229 | |||
176 | pud_clear(pud); | 230 | pud_clear(pud); |
177 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 231 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
232 | |||
233 | kvm_flush_dcache_pud(old_pud); | ||
234 | |||
178 | put_page(virt_to_page(pud)); | 235 | put_page(virt_to_page(pud)); |
179 | } else { | 236 | } else { |
180 | unmap_pmds(kvm, pud, addr, next); | 237 | unmap_pmds(kvm, pud, addr, next); |
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, | |||
209 | 266 | ||
210 | pte = pte_offset_kernel(pmd, addr); | 267 | pte = pte_offset_kernel(pmd, addr); |
211 | do { | 268 | do { |
212 | if (!pte_none(*pte)) { | 269 | if (!pte_none(*pte) && |
213 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 270 | (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) |
214 | kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); | 271 | kvm_flush_dcache_pte(*pte); |
215 | } | ||
216 | } while (pte++, addr += PAGE_SIZE, addr != end); | 272 | } while (pte++, addr += PAGE_SIZE, addr != end); |
217 | } | 273 | } |
218 | 274 | ||
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |||
226 | do { | 282 | do { |
227 | next = kvm_pmd_addr_end(addr, end); | 283 | next = kvm_pmd_addr_end(addr, end); |
228 | if (!pmd_none(*pmd)) { | 284 | if (!pmd_none(*pmd)) { |
229 | if (kvm_pmd_huge(*pmd)) { | 285 | if (kvm_pmd_huge(*pmd)) |
230 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 286 | kvm_flush_dcache_pmd(*pmd); |
231 | kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); | 287 | else |
232 | } else { | ||
233 | stage2_flush_ptes(kvm, pmd, addr, next); | 288 | stage2_flush_ptes(kvm, pmd, addr, next); |
234 | } | ||
235 | } | 289 | } |
236 | } while (pmd++, addr = next, addr != end); | 290 | } while (pmd++, addr = next, addr != end); |
237 | } | 291 | } |
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, | |||
246 | do { | 300 | do { |
247 | next = kvm_pud_addr_end(addr, end); | 301 | next = kvm_pud_addr_end(addr, end); |
248 | if (!pud_none(*pud)) { | 302 | if (!pud_none(*pud)) { |
249 | if (pud_huge(*pud)) { | 303 | if (pud_huge(*pud)) |
250 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 304 | kvm_flush_dcache_pud(*pud); |
251 | kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); | 305 | else |
252 | } else { | ||
253 | stage2_flush_pmds(kvm, pud, addr, next); | 306 | stage2_flush_pmds(kvm, pud, addr, next); |
254 | } | ||
255 | } | 307 | } |
256 | } while (pud++, addr = next, addr != end); | 308 | } while (pud++, addr = next, addr != end); |
257 | } | 309 | } |
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm, | |||
278 | * Go through the stage 2 page tables and invalidate any cache lines | 330 | * Go through the stage 2 page tables and invalidate any cache lines |
279 | * backing memory already mapped to the VM. | 331 | * backing memory already mapped to the VM. |
280 | */ | 332 | */ |
281 | void stage2_flush_vm(struct kvm *kvm) | 333 | static void stage2_flush_vm(struct kvm *kvm) |
282 | { | 334 | { |
283 | struct kvm_memslots *slots; | 335 | struct kvm_memslots *slots; |
284 | struct kvm_memory_slot *memslot; | 336 | struct kvm_memory_slot *memslot; |
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn) | |||
905 | return !pfn_valid(pfn); | 957 | return !pfn_valid(pfn); |
906 | } | 958 | } |
907 | 959 | ||
960 | static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, | ||
961 | unsigned long size, bool uncached) | ||
962 | { | ||
963 | __coherent_cache_guest_page(vcpu, pfn, size, uncached); | ||
964 | } | ||
965 | |||
908 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 966 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
909 | struct kvm_memory_slot *memslot, unsigned long hva, | 967 | struct kvm_memory_slot *memslot, unsigned long hva, |
910 | unsigned long fault_status) | 968 | unsigned long fault_status) |
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
994 | kvm_set_s2pmd_writable(&new_pmd); | 1052 | kvm_set_s2pmd_writable(&new_pmd); |
995 | kvm_set_pfn_dirty(pfn); | 1053 | kvm_set_pfn_dirty(pfn); |
996 | } | 1054 | } |
997 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, | 1055 | coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); |
998 | fault_ipa_uncached); | ||
999 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); | 1056 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1000 | } else { | 1057 | } else { |
1001 | pte_t new_pte = pfn_pte(pfn, mem_type); | 1058 | pte_t new_pte = pfn_pte(pfn, mem_type); |
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1003 | kvm_set_s2pte_writable(&new_pte); | 1060 | kvm_set_s2pte_writable(&new_pte); |
1004 | kvm_set_pfn_dirty(pfn); | 1061 | kvm_set_pfn_dirty(pfn); |
1005 | } | 1062 | } |
1006 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, | 1063 | coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); |
1007 | fault_ipa_uncached); | ||
1008 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, | 1064 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, |
1009 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); | 1065 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); |
1010 | } | 1066 | } |
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
1411 | unmap_stage2_range(kvm, gpa, size); | 1467 | unmap_stage2_range(kvm, gpa, size); |
1412 | spin_unlock(&kvm->mmu_lock); | 1468 | spin_unlock(&kvm->mmu_lock); |
1413 | } | 1469 | } |
1470 | |||
1471 | /* | ||
1472 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | ||
1473 | * | ||
1474 | * Main problems: | ||
1475 | * - S/W ops are local to a CPU (not broadcast) | ||
1476 | * - We have line migration behind our back (speculation) | ||
1477 | * - System caches don't support S/W at all (damn!) | ||
1478 | * | ||
1479 | * In the face of the above, the best we can do is to try and convert | ||
1480 | * S/W ops to VA ops. Because the guest is not allowed to infer the | ||
1481 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | ||
1482 | * which is a rather good thing for us. | ||
1483 | * | ||
1484 | * Also, it is only used when turning caches on/off ("The expected | ||
1485 | * usage of the cache maintenance instructions that operate by set/way | ||
1486 | * is associated with the cache maintenance instructions associated | ||
1487 | * with the powerdown and powerup of caches, if this is required by | ||
1488 | * the implementation."). | ||
1489 | * | ||
1490 | * We use the following policy: | ||
1491 | * | ||
1492 | * - If we trap a S/W operation, we enable VM trapping to detect | ||
1493 | * caches being turned on/off, and do a full clean. | ||
1494 | * | ||
1495 | * - We flush the caches on both caches being turned on and off. | ||
1496 | * | ||
1497 | * - Once the caches are enabled, we stop trapping VM ops. | ||
1498 | */ | ||
1499 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | ||
1500 | { | ||
1501 | unsigned long hcr = vcpu_get_hcr(vcpu); | ||
1502 | |||
1503 | /* | ||
1504 | * If this is the first time we do a S/W operation | ||
1505 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | ||
1506 | * VM trapping. | ||
1507 | * | ||
1508 | * Otherwise, rely on the VM trapping to wait for the MMU + | ||
1509 | * Caches to be turned off. At that point, we'll be able to | ||
1510 | * clean the caches again. | ||
1511 | */ | ||
1512 | if (!(hcr & HCR_TVM)) { | ||
1513 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | ||
1514 | vcpu_has_cache_enabled(vcpu)); | ||
1515 | stage2_flush_vm(vcpu->kvm); | ||
1516 | vcpu_set_hcr(vcpu, hcr | HCR_TVM); | ||
1517 | } | ||
1518 | } | ||
1519 | |||
1520 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | ||
1521 | { | ||
1522 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | ||
1523 | |||
1524 | /* | ||
1525 | * If switching the MMU+caches on, need to invalidate the caches. | ||
1526 | * If switching it off, need to clean the caches. | ||
1527 | * Clean + invalidate does the trick always. | ||
1528 | */ | ||
1529 | if (now_enabled != was_enabled) | ||
1530 | stage2_flush_vm(vcpu->kvm); | ||
1531 | |||
1532 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ | ||
1533 | if (now_enabled) | ||
1534 | vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); | ||
1535 | |||
1536 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); | ||
1537 | } | ||
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index b1d640f78623..b6a6e7102201 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h | |||
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc, | |||
223 | __entry->vcpu_pc, __entry->r0, __entry->imm) | 223 | __entry->vcpu_pc, __entry->r0, __entry->imm) |
224 | ); | 224 | ); |
225 | 225 | ||
226 | TRACE_EVENT(kvm_set_way_flush, | ||
227 | TP_PROTO(unsigned long vcpu_pc, bool cache), | ||
228 | TP_ARGS(vcpu_pc, cache), | ||
229 | |||
230 | TP_STRUCT__entry( | ||
231 | __field( unsigned long, vcpu_pc ) | ||
232 | __field( bool, cache ) | ||
233 | ), | ||
234 | |||
235 | TP_fast_assign( | ||
236 | __entry->vcpu_pc = vcpu_pc; | ||
237 | __entry->cache = cache; | ||
238 | ), | ||
239 | |||
240 | TP_printk("S/W flush at 0x%016lx (cache %s)", | ||
241 | __entry->vcpu_pc, __entry->cache ? "on" : "off") | ||
242 | ); | ||
243 | |||
244 | TRACE_EVENT(kvm_toggle_cache, | ||
245 | TP_PROTO(unsigned long vcpu_pc, bool was, bool now), | ||
246 | TP_ARGS(vcpu_pc, was, now), | ||
247 | |||
248 | TP_STRUCT__entry( | ||
249 | __field( unsigned long, vcpu_pc ) | ||
250 | __field( bool, was ) | ||
251 | __field( bool, now ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->vcpu_pc = vcpu_pc; | ||
256 | __entry->was = was; | ||
257 | __entry->now = now; | ||
258 | ), | ||
259 | |||
260 | TP_printk("VM op at 0x%016lx (cache was %s, now %s)", | ||
261 | __entry->vcpu_pc, __entry->was ? "on" : "off", | ||
262 | __entry->now ? "on" : "off") | ||
263 | ); | ||
264 | |||
226 | #endif /* _TRACE_KVM_H */ | 265 | #endif /* _TRACE_KVM_H */ |
227 | 266 | ||
228 | #undef TRACE_INCLUDE_PATH | 267 | #undef TRACE_INCLUDE_PATH |
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index caa21e9b8cd9..ccef8806bb58 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c | |||
@@ -190,6 +190,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np) | |||
190 | arch_ioremap_caller = armada_pcie_wa_ioremap_caller; | 190 | arch_ioremap_caller = armada_pcie_wa_ioremap_caller; |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * We should switch the PL310 to I/O coherency mode only if | ||
194 | * I/O coherency is actually enabled. | ||
195 | */ | ||
196 | if (!coherency_available()) | ||
197 | return; | ||
198 | |||
199 | /* | ||
193 | * Add the PL310 property "arm,io-coherent". This makes sure the | 200 | * Add the PL310 property "arm,io-coherent". This makes sure the |
194 | * outer sync operation is not used, which allows to | 201 | * outer sync operation is not used, which allows to |
195 | * workaround the system erratum that causes deadlocks when | 202 | * workaround the system erratum that causes deadlocks when |
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c index 66f67816a844..444f22d370f0 100644 --- a/arch/arm/mach-shmobile/board-ape6evm.c +++ b/arch/arm/mach-shmobile/board-ape6evm.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/gpio_keys.h> | 18 | #include <linux/gpio_keys.h> |
19 | #include <linux/input.h> | 19 | #include <linux/input.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/irqchip.h> | ||
22 | #include <linux/irqchip/arm-gic.h> | ||
21 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
22 | #include <linux/mfd/tmio.h> | 24 | #include <linux/mfd/tmio.h> |
23 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void) | |||
273 | sizeof(ape6evm_leds_pdata)); | 275 | sizeof(ape6evm_leds_pdata)); |
274 | } | 276 | } |
275 | 277 | ||
278 | static void __init ape6evm_legacy_init_time(void) | ||
279 | { | ||
280 | /* Do not invoke DT-based timers via clocksource_of_init() */ | ||
281 | } | ||
282 | |||
283 | static void __init ape6evm_legacy_init_irq(void) | ||
284 | { | ||
285 | void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); | ||
286 | void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); | ||
287 | |||
288 | gic_init(0, 29, gic_dist_base, gic_cpu_base); | ||
289 | |||
290 | /* Do not invoke DT-based interrupt code via irqchip_init() */ | ||
291 | } | ||
292 | |||
293 | |||
276 | static const char *ape6evm_boards_compat_dt[] __initdata = { | 294 | static const char *ape6evm_boards_compat_dt[] __initdata = { |
277 | "renesas,ape6evm", | 295 | "renesas,ape6evm", |
278 | NULL, | 296 | NULL, |
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = { | |||
280 | 298 | ||
281 | DT_MACHINE_START(APE6EVM_DT, "ape6evm") | 299 | DT_MACHINE_START(APE6EVM_DT, "ape6evm") |
282 | .init_early = shmobile_init_delay, | 300 | .init_early = shmobile_init_delay, |
301 | .init_irq = ape6evm_legacy_init_irq, | ||
283 | .init_machine = ape6evm_add_standard_devices, | 302 | .init_machine = ape6evm_add_standard_devices, |
284 | .init_late = shmobile_init_late, | 303 | .init_late = shmobile_init_late, |
285 | .dt_compat = ape6evm_boards_compat_dt, | 304 | .dt_compat = ape6evm_boards_compat_dt, |
305 | .init_time = ape6evm_legacy_init_time, | ||
286 | MACHINE_END | 306 | MACHINE_END |
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index f8197eb6e566..65b128dd4072 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/input.h> | 21 | #include <linux/input.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/irqchip.h> | ||
25 | #include <linux/irqchip/arm-gic.h> | ||
24 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
25 | #include <linux/leds.h> | 27 | #include <linux/leds.h> |
26 | #include <linux/mfd/tmio.h> | 28 | #include <linux/mfd/tmio.h> |
@@ -811,6 +813,16 @@ static void __init lager_init(void) | |||
811 | lager_ksz8041_fixup); | 813 | lager_ksz8041_fixup); |
812 | } | 814 | } |
813 | 815 | ||
816 | static void __init lager_legacy_init_irq(void) | ||
817 | { | ||
818 | void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); | ||
819 | void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); | ||
820 | |||
821 | gic_init(0, 29, gic_dist_base, gic_cpu_base); | ||
822 | |||
823 | /* Do not invoke DT-based interrupt code via irqchip_init() */ | ||
824 | } | ||
825 | |||
814 | static const char * const lager_boards_compat_dt[] __initconst = { | 826 | static const char * const lager_boards_compat_dt[] __initconst = { |
815 | "renesas,lager", | 827 | "renesas,lager", |
816 | NULL, | 828 | NULL, |
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = { | |||
819 | DT_MACHINE_START(LAGER_DT, "lager") | 831 | DT_MACHINE_START(LAGER_DT, "lager") |
820 | .smp = smp_ops(r8a7790_smp_ops), | 832 | .smp = smp_ops(r8a7790_smp_ops), |
821 | .init_early = shmobile_init_delay, | 833 | .init_early = shmobile_init_delay, |
834 | .init_irq = lager_legacy_init_irq, | ||
822 | .init_time = rcar_gen2_timer_init, | 835 | .init_time = rcar_gen2_timer_init, |
823 | .init_machine = lager_init, | 836 | .init_machine = lager_init, |
824 | .init_late = shmobile_init_late, | 837 | .init_late = shmobile_init_late, |
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index 3dd6edd9bd1d..cc9470dfb1ce 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c | |||
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void) | |||
133 | #ifdef CONFIG_COMMON_CLK | 133 | #ifdef CONFIG_COMMON_CLK |
134 | rcar_gen2_clocks_init(mode); | 134 | rcar_gen2_clocks_init(mode); |
135 | #endif | 135 | #endif |
136 | #ifdef CONFIG_ARCH_SHMOBILE_MULTI | ||
136 | clocksource_of_init(); | 137 | clocksource_of_init(); |
138 | #endif | ||
137 | } | 139 | } |
138 | 140 | ||
139 | struct memory_reserve_config { | 141 | struct memory_reserve_config { |
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index f1d027aa7a81..0edf2a6d2bbe 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c | |||
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void) | |||
70 | if (!max_freq) | 70 | if (!max_freq) |
71 | return; | 71 | return; |
72 | 72 | ||
73 | #ifdef CONFIG_ARCH_SHMOBILE_LEGACY | ||
74 | /* Non-multiplatform r8a73a4 SoC cannot use arch timer due | ||
75 | * to GIC being initialized from C and arch timer via DT */ | ||
76 | if (of_machine_is_compatible("renesas,r8a73a4")) | ||
77 | has_arch_timer = false; | ||
78 | |||
79 | /* Non-multiplatform r8a7790 SoC cannot use arch timer due | ||
80 | * to GIC being initialized from C and arch timer via DT */ | ||
81 | if (of_machine_is_compatible("renesas,r8a7790")) | ||
82 | has_arch_timer = false; | ||
83 | #endif | ||
84 | |||
73 | if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { | 85 | if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { |
74 | if (is_a7_a8_a9) | 86 | if (is_a7_a8_a9) |
75 | shmobile_setup_delay_hz(max_freq, 1, 3); | 87 | shmobile_setup_delay_hz(max_freq, 1, 3); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7864797609b3..a673c7f7e208 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) | |||
1940 | } | 1940 | } |
1941 | EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); | 1941 | EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); |
1942 | 1942 | ||
1943 | static int __arm_iommu_attach_device(struct device *dev, | ||
1944 | struct dma_iommu_mapping *mapping) | ||
1945 | { | ||
1946 | int err; | ||
1947 | |||
1948 | err = iommu_attach_device(mapping->domain, dev); | ||
1949 | if (err) | ||
1950 | return err; | ||
1951 | |||
1952 | kref_get(&mapping->kref); | ||
1953 | dev->archdata.mapping = mapping; | ||
1954 | |||
1955 | pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); | ||
1956 | return 0; | ||
1957 | } | ||
1958 | |||
1943 | /** | 1959 | /** |
1944 | * arm_iommu_attach_device | 1960 | * arm_iommu_attach_device |
1945 | * @dev: valid struct device pointer | 1961 | * @dev: valid struct device pointer |
1946 | * @mapping: io address space mapping structure (returned from | 1962 | * @mapping: io address space mapping structure (returned from |
1947 | * arm_iommu_create_mapping) | 1963 | * arm_iommu_create_mapping) |
1948 | * | 1964 | * |
1949 | * Attaches specified io address space mapping to the provided device, | 1965 | * Attaches specified io address space mapping to the provided device. |
1966 | * This replaces the dma operations (dma_map_ops pointer) with the | ||
1967 | * IOMMU aware version. | ||
1968 | * | ||
1950 | * More than one client might be attached to the same io address space | 1969 | * More than one client might be attached to the same io address space |
1951 | * mapping. | 1970 | * mapping. |
1952 | */ | 1971 | */ |
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev, | |||
1955 | { | 1974 | { |
1956 | int err; | 1975 | int err; |
1957 | 1976 | ||
1958 | err = iommu_attach_device(mapping->domain, dev); | 1977 | err = __arm_iommu_attach_device(dev, mapping); |
1959 | if (err) | 1978 | if (err) |
1960 | return err; | 1979 | return err; |
1961 | 1980 | ||
1962 | kref_get(&mapping->kref); | 1981 | set_dma_ops(dev, &iommu_ops); |
1963 | dev->archdata.mapping = mapping; | ||
1964 | |||
1965 | pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); | ||
1966 | return 0; | 1982 | return 0; |
1967 | } | 1983 | } |
1968 | EXPORT_SYMBOL_GPL(arm_iommu_attach_device); | 1984 | EXPORT_SYMBOL_GPL(arm_iommu_attach_device); |
1969 | 1985 | ||
1970 | /** | 1986 | static void __arm_iommu_detach_device(struct device *dev) |
1971 | * arm_iommu_detach_device | ||
1972 | * @dev: valid struct device pointer | ||
1973 | * | ||
1974 | * Detaches the provided device from a previously attached map. | ||
1975 | */ | ||
1976 | void arm_iommu_detach_device(struct device *dev) | ||
1977 | { | 1987 | { |
1978 | struct dma_iommu_mapping *mapping; | 1988 | struct dma_iommu_mapping *mapping; |
1979 | 1989 | ||
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev) | |||
1989 | 1999 | ||
1990 | pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); | 2000 | pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); |
1991 | } | 2001 | } |
2002 | |||
2003 | /** | ||
2004 | * arm_iommu_detach_device | ||
2005 | * @dev: valid struct device pointer | ||
2006 | * | ||
2007 | * Detaches the provided device from a previously attached map. | ||
2008 | * This voids the dma operations (dma_map_ops pointer) | ||
2009 | */ | ||
2010 | void arm_iommu_detach_device(struct device *dev) | ||
2011 | { | ||
2012 | __arm_iommu_detach_device(dev); | ||
2013 | set_dma_ops(dev, NULL); | ||
2014 | } | ||
1992 | EXPORT_SYMBOL_GPL(arm_iommu_detach_device); | 2015 | EXPORT_SYMBOL_GPL(arm_iommu_detach_device); |
1993 | 2016 | ||
1994 | static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) | 2017 | static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) |
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
2011 | return false; | 2034 | return false; |
2012 | } | 2035 | } |
2013 | 2036 | ||
2014 | if (arm_iommu_attach_device(dev, mapping)) { | 2037 | if (__arm_iommu_attach_device(dev, mapping)) { |
2015 | pr_warn("Failed to attached device %s to IOMMU_mapping\n", | 2038 | pr_warn("Failed to attached device %s to IOMMU_mapping\n", |
2016 | dev_name(dev)); | 2039 | dev_name(dev)); |
2017 | arm_iommu_release_mapping(mapping); | 2040 | arm_iommu_release_mapping(mapping); |
@@ -2025,7 +2048,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) | |||
2025 | { | 2048 | { |
2026 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 2049 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; |
2027 | 2050 | ||
2028 | arm_iommu_detach_device(dev); | 2051 | __arm_iommu_detach_device(dev); |
2029 | arm_iommu_release_mapping(mapping); | 2052 | arm_iommu_release_mapping(mapping); |
2030 | } | 2053 | } |
2031 | 2054 | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1f9a20a3677..528c3fd2d4c1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -349,7 +349,6 @@ config ARM64_VA_BITS_42 | |||
349 | 349 | ||
350 | config ARM64_VA_BITS_48 | 350 | config ARM64_VA_BITS_48 |
351 | bool "48-bit" | 351 | bool "48-bit" |
352 | depends on !ARM_SMMU | ||
353 | 352 | ||
354 | endchoice | 353 | endchoice |
355 | 354 | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 865a7e28ea2d..3cb4c856b10d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | |||
45 | vcpu->arch.hcr_el2 &= ~HCR_RW; | 45 | vcpu->arch.hcr_el2 &= ~HCR_RW; |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | ||
49 | { | ||
50 | return vcpu->arch.hcr_el2; | ||
51 | } | ||
52 | |||
53 | static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | ||
54 | { | ||
55 | vcpu->arch.hcr_el2 = hcr; | ||
56 | } | ||
57 | |||
48 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | 58 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
49 | { | 59 | { |
50 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | 60 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 0b7dfdb931df..acd101a9014d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch { | |||
116 | * Anything that is not used directly from assembly code goes | 116 | * Anything that is not used directly from assembly code goes |
117 | * here. | 117 | * here. |
118 | */ | 118 | */ |
119 | /* dcache set/way operation pending */ | ||
120 | int last_pcpu; | ||
121 | cpumask_t require_dcache_flush; | ||
122 | 119 | ||
123 | /* Don't run the guest */ | 120 | /* Don't run the guest */ |
124 | bool pause; | 121 | bool pause; |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 14a74f136272..adcf49547301 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | |||
243 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; | 243 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
244 | } | 244 | } |
245 | 245 | ||
246 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 246 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, |
247 | unsigned long size, | 247 | unsigned long size, |
248 | bool ipa_uncached) | 248 | bool ipa_uncached) |
249 | { | 249 | { |
250 | void *va = page_address(pfn_to_page(pfn)); | ||
251 | |||
250 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) | 252 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
251 | kvm_flush_dcache_to_poc((void *)hva, size); | 253 | kvm_flush_dcache_to_poc(va, size); |
252 | 254 | ||
253 | if (!icache_is_aliasing()) { /* PIPT */ | 255 | if (!icache_is_aliasing()) { /* PIPT */ |
254 | flush_icache_range(hva, hva + size); | 256 | flush_icache_range((unsigned long)va, |
257 | (unsigned long)va + size); | ||
255 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ | 258 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ |
256 | /* any kind of VIPT cache */ | 259 | /* any kind of VIPT cache */ |
257 | __flush_icache_all(); | 260 | __flush_icache_all(); |
258 | } | 261 | } |
259 | } | 262 | } |
260 | 263 | ||
264 | static inline void __kvm_flush_dcache_pte(pte_t pte) | ||
265 | { | ||
266 | struct page *page = pte_page(pte); | ||
267 | kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); | ||
268 | } | ||
269 | |||
270 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) | ||
271 | { | ||
272 | struct page *page = pmd_page(pmd); | ||
273 | kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); | ||
274 | } | ||
275 | |||
276 | static inline void __kvm_flush_dcache_pud(pud_t pud) | ||
277 | { | ||
278 | struct page *page = pud_page(pud); | ||
279 | kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); | ||
280 | } | ||
281 | |||
261 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) | 282 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) |
262 | 283 | ||
263 | void stage2_flush_vm(struct kvm *kvm); | 284 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
285 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | ||
264 | 286 | ||
265 | #endif /* __ASSEMBLY__ */ | 287 | #endif /* __ASSEMBLY__ */ |
266 | #endif /* __ARM64_KVM_MMU_H__ */ | 288 | #endif /* __ARM64_KVM_MMU_H__ */ |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3d7c2df89946..f31e8bb2bc5b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr) | |||
69 | return ccsidr; | 69 | return ccsidr; |
70 | } | 70 | } |
71 | 71 | ||
72 | static void do_dc_cisw(u32 val) | 72 | /* |
73 | { | 73 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
74 | asm volatile("dc cisw, %x0" : : "r" (val)); | 74 | */ |
75 | dsb(ish); | ||
76 | } | ||
77 | |||
78 | static void do_dc_csw(u32 val) | ||
79 | { | ||
80 | asm volatile("dc csw, %x0" : : "r" (val)); | ||
81 | dsb(ish); | ||
82 | } | ||
83 | |||
84 | /* See note at ARM ARM B1.14.4 */ | ||
85 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 75 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
86 | const struct sys_reg_params *p, | 76 | const struct sys_reg_params *p, |
87 | const struct sys_reg_desc *r) | 77 | const struct sys_reg_desc *r) |
88 | { | 78 | { |
89 | unsigned long val; | ||
90 | int cpu; | ||
91 | |||
92 | if (!p->is_write) | 79 | if (!p->is_write) |
93 | return read_from_write_only(vcpu, p); | 80 | return read_from_write_only(vcpu, p); |
94 | 81 | ||
95 | cpu = get_cpu(); | 82 | kvm_set_way_flush(vcpu); |
96 | |||
97 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
98 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
99 | |||
100 | /* If we were already preempted, take the long way around */ | ||
101 | if (cpu != vcpu->arch.last_pcpu) { | ||
102 | flush_cache_all(); | ||
103 | goto done; | ||
104 | } | ||
105 | |||
106 | val = *vcpu_reg(vcpu, p->Rt); | ||
107 | |||
108 | switch (p->CRm) { | ||
109 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
110 | case 14: /* DCCISW */ | ||
111 | do_dc_cisw(val); | ||
112 | break; | ||
113 | |||
114 | case 10: /* DCCSW */ | ||
115 | do_dc_csw(val); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | done: | ||
120 | put_cpu(); | ||
121 | |||
122 | return true; | 83 | return true; |
123 | } | 84 | } |
124 | 85 | ||
125 | /* | 86 | /* |
126 | * Generic accessor for VM registers. Only called as long as HCR_TVM | 87 | * Generic accessor for VM registers. Only called as long as HCR_TVM |
127 | * is set. | 88 | * is set. If the guest enables the MMU, we stop trapping the VM |
89 | * sys_regs and leave it in complete control of the caches. | ||
128 | */ | 90 | */ |
129 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 91 | static bool access_vm_reg(struct kvm_vcpu *vcpu, |
130 | const struct sys_reg_params *p, | 92 | const struct sys_reg_params *p, |
131 | const struct sys_reg_desc *r) | 93 | const struct sys_reg_desc *r) |
132 | { | 94 | { |
133 | unsigned long val; | 95 | unsigned long val; |
96 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | ||
134 | 97 | ||
135 | BUG_ON(!p->is_write); | 98 | BUG_ON(!p->is_write); |
136 | 99 | ||
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, | |||
143 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; | 106 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; |
144 | } | 107 | } |
145 | 108 | ||
146 | return true; | 109 | kvm_toggle_cache(vcpu, was_enabled); |
147 | } | ||
148 | |||
149 | /* | ||
150 | * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the | ||
151 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | ||
152 | * it in complete control of the caches. | ||
153 | */ | ||
154 | static bool access_sctlr(struct kvm_vcpu *vcpu, | ||
155 | const struct sys_reg_params *p, | ||
156 | const struct sys_reg_desc *r) | ||
157 | { | ||
158 | access_vm_reg(vcpu, p, r); | ||
159 | |||
160 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ | ||
161 | vcpu->arch.hcr_el2 &= ~HCR_TVM; | ||
162 | stage2_flush_vm(vcpu->kvm); | ||
163 | } | ||
164 | |||
165 | return true; | 110 | return true; |
166 | } | 111 | } |
167 | 112 | ||
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
377 | NULL, reset_mpidr, MPIDR_EL1 }, | 322 | NULL, reset_mpidr, MPIDR_EL1 }, |
378 | /* SCTLR_EL1 */ | 323 | /* SCTLR_EL1 */ |
379 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | 324 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), |
380 | access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, | 325 | access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
381 | /* CPACR_EL1 */ | 326 | /* CPACR_EL1 */ |
382 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | 327 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), |
383 | NULL, reset_val, CPACR_EL1, 0 }, | 328 | NULL, reset_val, CPACR_EL1, 0 }, |
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = { | |||
657 | * register). | 602 | * register). |
658 | */ | 603 | */ |
659 | static const struct sys_reg_desc cp15_regs[] = { | 604 | static const struct sys_reg_desc cp15_regs[] = { |
660 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, | 605 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, |
661 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, | 606 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
662 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, | 607 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, |
663 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, | 608 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, |
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 0eca93327195..d223a8b57c1e 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c | |||
@@ -142,6 +142,8 @@ good_area: | |||
142 | if (unlikely(fault & VM_FAULT_ERROR)) { | 142 | if (unlikely(fault & VM_FAULT_ERROR)) { |
143 | if (fault & VM_FAULT_OOM) | 143 | if (fault & VM_FAULT_OOM) |
144 | goto out_of_memory; | 144 | goto out_of_memory; |
145 | else if (fault & VM_FAULT_SIGSEGV) | ||
146 | goto bad_area; | ||
145 | else if (fault & VM_FAULT_SIGBUS) | 147 | else if (fault & VM_FAULT_SIGBUS) |
146 | goto do_sigbus; | 148 | goto do_sigbus; |
147 | BUG(); | 149 | BUG(); |
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 1790f22e71a2..2686a7aa8ec8 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c | |||
@@ -176,6 +176,8 @@ retry: | |||
176 | if (unlikely(fault & VM_FAULT_ERROR)) { | 176 | if (unlikely(fault & VM_FAULT_ERROR)) { |
177 | if (fault & VM_FAULT_OOM) | 177 | if (fault & VM_FAULT_OOM) |
178 | goto out_of_memory; | 178 | goto out_of_memory; |
179 | else if (fault & VM_FAULT_SIGSEGV) | ||
180 | goto bad_area; | ||
179 | else if (fault & VM_FAULT_SIGBUS) | 181 | else if (fault & VM_FAULT_SIGBUS) |
180 | goto do_sigbus; | 182 | goto do_sigbus; |
181 | BUG(); | 183 | BUG(); |
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 9a66372fc7c7..ec4917ddf678 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c | |||
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear | |||
168 | if (unlikely(fault & VM_FAULT_ERROR)) { | 168 | if (unlikely(fault & VM_FAULT_ERROR)) { |
169 | if (fault & VM_FAULT_OOM) | 169 | if (fault & VM_FAULT_OOM) |
170 | goto out_of_memory; | 170 | goto out_of_memory; |
171 | else if (fault & VM_FAULT_SIGSEGV) | ||
172 | goto bad_area; | ||
171 | else if (fault & VM_FAULT_SIGBUS) | 173 | else if (fault & VM_FAULT_SIGBUS) |
172 | goto do_sigbus; | 174 | goto do_sigbus; |
173 | BUG(); | 175 | BUG(); |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7225dad87094..ba5ba7accd0d 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -172,6 +172,8 @@ retry: | |||
172 | */ | 172 | */ |
173 | if (fault & VM_FAULT_OOM) { | 173 | if (fault & VM_FAULT_OOM) { |
174 | goto out_of_memory; | 174 | goto out_of_memory; |
175 | } else if (fault & VM_FAULT_SIGSEGV) { | ||
176 | goto bad_area; | ||
175 | } else if (fault & VM_FAULT_SIGBUS) { | 177 | } else if (fault & VM_FAULT_SIGBUS) { |
176 | signal = SIGBUS; | 178 | signal = SIGBUS; |
177 | goto bad_area; | 179 | goto bad_area; |
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index e9c6a8014bd6..e3d4d4890104 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c | |||
@@ -200,6 +200,8 @@ good_area: | |||
200 | if (unlikely(fault & VM_FAULT_ERROR)) { | 200 | if (unlikely(fault & VM_FAULT_ERROR)) { |
201 | if (fault & VM_FAULT_OOM) | 201 | if (fault & VM_FAULT_OOM) |
202 | goto out_of_memory; | 202 | goto out_of_memory; |
203 | else if (fault & VM_FAULT_SIGSEGV) | ||
204 | goto bad_area; | ||
203 | else if (fault & VM_FAULT_SIGBUS) | 205 | else if (fault & VM_FAULT_SIGBUS) |
204 | goto do_sigbus; | 206 | goto do_sigbus; |
205 | BUG(); | 207 | BUG(); |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 2bd7487440c4..b2f04aee46ec 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
@@ -145,6 +145,8 @@ good_area: | |||
145 | if (unlikely(fault & VM_FAULT_ERROR)) { | 145 | if (unlikely(fault & VM_FAULT_ERROR)) { |
146 | if (fault & VM_FAULT_OOM) | 146 | if (fault & VM_FAULT_OOM) |
147 | goto out_of_memory; | 147 | goto out_of_memory; |
148 | else if (fault & VM_FAULT_SIGSEGV) | ||
149 | goto map_err; | ||
148 | else if (fault & VM_FAULT_SIGBUS) | 150 | else if (fault & VM_FAULT_SIGBUS) |
149 | goto bus_err; | 151 | goto bus_err; |
150 | BUG(); | 152 | BUG(); |
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index 332680e5ebf2..2de5dc695a87 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c | |||
@@ -141,6 +141,8 @@ good_area: | |||
141 | if (unlikely(fault & VM_FAULT_ERROR)) { | 141 | if (unlikely(fault & VM_FAULT_ERROR)) { |
142 | if (fault & VM_FAULT_OOM) | 142 | if (fault & VM_FAULT_OOM) |
143 | goto out_of_memory; | 143 | goto out_of_memory; |
144 | else if (fault & VM_FAULT_SIGSEGV) | ||
145 | goto bad_area; | ||
144 | else if (fault & VM_FAULT_SIGBUS) | 146 | else if (fault & VM_FAULT_SIGBUS) |
145 | goto do_sigbus; | 147 | goto do_sigbus; |
146 | BUG(); | 148 | BUG(); |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index fa4cf52aa7a6..d46a5ebb7570 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -224,6 +224,8 @@ good_area: | |||
224 | if (unlikely(fault & VM_FAULT_ERROR)) { | 224 | if (unlikely(fault & VM_FAULT_ERROR)) { |
225 | if (fault & VM_FAULT_OOM) | 225 | if (fault & VM_FAULT_OOM) |
226 | goto out_of_memory; | 226 | goto out_of_memory; |
227 | else if (fault & VM_FAULT_SIGSEGV) | ||
228 | goto bad_area; | ||
227 | else if (fault & VM_FAULT_SIGBUS) | 229 | else if (fault & VM_FAULT_SIGBUS) |
228 | goto do_sigbus; | 230 | goto do_sigbus; |
229 | BUG(); | 231 | BUG(); |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42bb1849..70ab5d664332 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -158,6 +158,8 @@ good_area: | |||
158 | if (unlikely(fault & VM_FAULT_ERROR)) { | 158 | if (unlikely(fault & VM_FAULT_ERROR)) { |
159 | if (fault & VM_FAULT_OOM) | 159 | if (fault & VM_FAULT_OOM) |
160 | goto out_of_memory; | 160 | goto out_of_memory; |
161 | else if (fault & VM_FAULT_SIGSEGV) | ||
162 | goto bad_area; | ||
161 | else if (fault & VM_FAULT_SIGBUS) | 163 | else if (fault & VM_FAULT_SIGBUS) |
162 | goto do_sigbus; | 164 | goto do_sigbus; |
163 | BUG(); | 165 | BUG(); |
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 3516cbdf1ee9..0c2cc5d39c8e 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c | |||
@@ -262,6 +262,8 @@ good_area: | |||
262 | if (unlikely(fault & VM_FAULT_ERROR)) { | 262 | if (unlikely(fault & VM_FAULT_ERROR)) { |
263 | if (fault & VM_FAULT_OOM) | 263 | if (fault & VM_FAULT_OOM) |
264 | goto out_of_memory; | 264 | goto out_of_memory; |
265 | else if (fault & VM_FAULT_SIGSEGV) | ||
266 | goto bad_area; | ||
265 | else if (fault & VM_FAULT_SIGBUS) | 267 | else if (fault & VM_FAULT_SIGBUS) |
266 | goto do_sigbus; | 268 | goto do_sigbus; |
267 | BUG(); | 269 | BUG(); |
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 15a0bb5fc06d..34429d5a0ccd 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c | |||
@@ -135,6 +135,8 @@ survive: | |||
135 | if (unlikely(fault & VM_FAULT_ERROR)) { | 135 | if (unlikely(fault & VM_FAULT_ERROR)) { |
136 | if (fault & VM_FAULT_OOM) | 136 | if (fault & VM_FAULT_OOM) |
137 | goto out_of_memory; | 137 | goto out_of_memory; |
138 | else if (fault & VM_FAULT_SIGSEGV) | ||
139 | goto bad_area; | ||
138 | else if (fault & VM_FAULT_SIGBUS) | 140 | else if (fault & VM_FAULT_SIGBUS) |
139 | goto do_sigbus; | 141 | goto do_sigbus; |
140 | BUG(); | 142 | BUG(); |
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 0703acf7d327..230ac20ae794 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c | |||
@@ -171,6 +171,8 @@ good_area: | |||
171 | if (unlikely(fault & VM_FAULT_ERROR)) { | 171 | if (unlikely(fault & VM_FAULT_ERROR)) { |
172 | if (fault & VM_FAULT_OOM) | 172 | if (fault & VM_FAULT_OOM) |
173 | goto out_of_memory; | 173 | goto out_of_memory; |
174 | else if (fault & VM_FAULT_SIGSEGV) | ||
175 | goto bad_area; | ||
174 | else if (fault & VM_FAULT_SIGBUS) | 176 | else if (fault & VM_FAULT_SIGBUS) |
175 | goto do_sigbus; | 177 | goto do_sigbus; |
176 | BUG(); | 178 | BUG(); |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 3ca9c1131cfe..e5120e653240 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -256,6 +256,8 @@ good_area: | |||
256 | */ | 256 | */ |
257 | if (fault & VM_FAULT_OOM) | 257 | if (fault & VM_FAULT_OOM) |
258 | goto out_of_memory; | 258 | goto out_of_memory; |
259 | else if (fault & VM_FAULT_SIGSEGV) | ||
260 | goto bad_area; | ||
259 | else if (fault & VM_FAULT_SIGBUS) | 261 | else if (fault & VM_FAULT_SIGBUS) |
260 | goto bad_area; | 262 | goto bad_area; |
261 | BUG(); | 263 | BUG(); |
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h index caa1b21c25cd..38311c98eed9 100644 --- a/arch/powerpc/include/asm/fsl_pamu_stash.h +++ b/arch/powerpc/include/asm/fsl_pamu_stash.h | |||
@@ -32,8 +32,8 @@ enum pamu_stash_target { | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | struct pamu_stash_attribute { | 34 | struct pamu_stash_attribute { |
35 | u32 cpu; /* cpu number */ | 35 | u32 cpu; /* cpu number */ |
36 | u32 cache; /* cache to stash to: L1,L2,L3 */ | 36 | u32 cache; /* cache to stash to: L1,L2,L3 */ |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #endif /* __FSL_PAMU_STASH_H */ | 39 | #endif /* __FSL_PAMU_STASH_H */ |
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 5a236f082c78..1b5305d4bdab 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c | |||
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | |||
76 | if (*flt & VM_FAULT_OOM) { | 76 | if (*flt & VM_FAULT_OOM) { |
77 | ret = -ENOMEM; | 77 | ret = -ENOMEM; |
78 | goto out_unlock; | 78 | goto out_unlock; |
79 | } else if (*flt & VM_FAULT_SIGBUS) { | 79 | } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { |
80 | ret = -EFAULT; | 80 | ret = -EFAULT; |
81 | goto out_unlock; | 81 | goto out_unlock; |
82 | } | 82 | } |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index eb79907f34fa..6154b0a2b063 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -437,6 +437,8 @@ good_area: | |||
437 | */ | 437 | */ |
438 | fault = handle_mm_fault(mm, vma, address, flags); | 438 | fault = handle_mm_fault(mm, vma, address, flags); |
439 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { | 439 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { |
440 | if (fault & VM_FAULT_SIGSEGV) | ||
441 | goto bad_area; | ||
440 | rc = mm_fault_error(regs, address, fault); | 442 | rc = mm_fault_error(regs, address, fault); |
441 | if (rc >= MM_FAULT_RETURN) | 443 | if (rc >= MM_FAULT_RETURN) |
442 | goto bail; | 444 | goto bail; |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index b700a329c31d..d2de7d5d7574 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void) | |||
304 | * all cpus at boot. Get these reg values of current cpu and use the | 304 | * all cpus at boot. Get these reg values of current cpu and use the |
305 | * same accross all cpus. | 305 | * same accross all cpus. |
306 | */ | 306 | */ |
307 | uint64_t lpcr_val = mfspr(SPRN_LPCR); | 307 | uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; |
308 | uint64_t hid0_val = mfspr(SPRN_HID0); | 308 | uint64_t hid0_val = mfspr(SPRN_HID0); |
309 | uint64_t hid1_val = mfspr(SPRN_HID1); | 309 | uint64_t hid1_val = mfspr(SPRN_HID1); |
310 | uint64_t hid4_val = mfspr(SPRN_HID4); | 310 | uint64_t hid4_val = mfspr(SPRN_HID4); |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 5b150f0c5df9..13c6e200b24e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void) | |||
337 | args.token = rtas_token("set-indicator"); | 337 | args.token = rtas_token("set-indicator"); |
338 | if (args.token == RTAS_UNKNOWN_SERVICE) | 338 | if (args.token == RTAS_UNKNOWN_SERVICE) |
339 | return; | 339 | return; |
340 | args.token = cpu_to_be32(args.token); | ||
340 | args.nargs = cpu_to_be32(3); | 341 | args.nargs = cpu_to_be32(3); |
341 | args.nret = cpu_to_be32(1); | 342 | args.nret = cpu_to_be32(1); |
342 | args.rets = &args.args[3]; | 343 | args.rets = &args.args[3]; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 811937bb90be..9065d5aa3932 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) | |||
374 | do_no_context(regs); | 374 | do_no_context(regs); |
375 | else | 375 | else |
376 | pagefault_out_of_memory(); | 376 | pagefault_out_of_memory(); |
377 | } else if (fault & VM_FAULT_SIGSEGV) { | ||
378 | /* Kernel mode? Handle exceptions or die */ | ||
379 | if (!user_mode(regs)) | ||
380 | do_no_context(regs); | ||
381 | else | ||
382 | do_sigsegv(regs, SEGV_MAPERR); | ||
377 | } else if (fault & VM_FAULT_SIGBUS) { | 383 | } else if (fault & VM_FAULT_SIGBUS) { |
378 | /* Kernel mode? Handle exceptions or die */ | 384 | /* Kernel mode? Handle exceptions or die */ |
379 | if (!user_mode(regs)) | 385 | if (!user_mode(regs)) |
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 52238983527d..6860beb2a280 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c | |||
@@ -114,6 +114,8 @@ good_area: | |||
114 | if (unlikely(fault & VM_FAULT_ERROR)) { | 114 | if (unlikely(fault & VM_FAULT_ERROR)) { |
115 | if (fault & VM_FAULT_OOM) | 115 | if (fault & VM_FAULT_OOM) |
116 | goto out_of_memory; | 116 | goto out_of_memory; |
117 | else if (fault & VM_FAULT_SIGSEGV) | ||
118 | goto bad_area; | ||
117 | else if (fault & VM_FAULT_SIGBUS) | 119 | else if (fault & VM_FAULT_SIGBUS) |
118 | goto do_sigbus; | 120 | goto do_sigbus; |
119 | BUG(); | 121 | BUG(); |
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 541dc6101508..a58fec9b55e0 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
353 | } else { | 353 | } else { |
354 | if (fault & VM_FAULT_SIGBUS) | 354 | if (fault & VM_FAULT_SIGBUS) |
355 | do_sigbus(regs, error_code, address); | 355 | do_sigbus(regs, error_code, address); |
356 | else if (fault & VM_FAULT_SIGSEGV) | ||
357 | bad_area(regs, error_code, address); | ||
356 | else | 358 | else |
357 | BUG(); | 359 | BUG(); |
358 | } | 360 | } |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 908e8c17c902..70d817154fe8 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -249,6 +249,8 @@ good_area: | |||
249 | if (unlikely(fault & VM_FAULT_ERROR)) { | 249 | if (unlikely(fault & VM_FAULT_ERROR)) { |
250 | if (fault & VM_FAULT_OOM) | 250 | if (fault & VM_FAULT_OOM) |
251 | goto out_of_memory; | 251 | goto out_of_memory; |
252 | else if (fault & VM_FAULT_SIGSEGV) | ||
253 | goto bad_area; | ||
252 | else if (fault & VM_FAULT_SIGBUS) | 254 | else if (fault & VM_FAULT_SIGBUS) |
253 | goto do_sigbus; | 255 | goto do_sigbus; |
254 | BUG(); | 256 | BUG(); |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 18fcd7167095..479823249429 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -446,6 +446,8 @@ good_area: | |||
446 | if (unlikely(fault & VM_FAULT_ERROR)) { | 446 | if (unlikely(fault & VM_FAULT_ERROR)) { |
447 | if (fault & VM_FAULT_OOM) | 447 | if (fault & VM_FAULT_OOM) |
448 | goto out_of_memory; | 448 | goto out_of_memory; |
449 | else if (fault & VM_FAULT_SIGSEGV) | ||
450 | goto bad_area; | ||
449 | else if (fault & VM_FAULT_SIGBUS) | 451 | else if (fault & VM_FAULT_SIGBUS) |
450 | goto do_sigbus; | 452 | goto do_sigbus; |
451 | BUG(); | 453 | BUG(); |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 565e25a98334..0f61a73534e6 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -442,6 +442,8 @@ good_area: | |||
442 | if (unlikely(fault & VM_FAULT_ERROR)) { | 442 | if (unlikely(fault & VM_FAULT_ERROR)) { |
443 | if (fault & VM_FAULT_OOM) | 443 | if (fault & VM_FAULT_OOM) |
444 | goto out_of_memory; | 444 | goto out_of_memory; |
445 | else if (fault & VM_FAULT_SIGSEGV) | ||
446 | goto bad_area; | ||
445 | else if (fault & VM_FAULT_SIGBUS) | 447 | else if (fault & VM_FAULT_SIGBUS) |
446 | goto do_sigbus; | 448 | goto do_sigbus; |
447 | BUG(); | 449 | BUG(); |
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 5678c3571e7c..209617302df8 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c | |||
@@ -80,6 +80,8 @@ good_area: | |||
80 | if (unlikely(fault & VM_FAULT_ERROR)) { | 80 | if (unlikely(fault & VM_FAULT_ERROR)) { |
81 | if (fault & VM_FAULT_OOM) { | 81 | if (fault & VM_FAULT_OOM) { |
82 | goto out_of_memory; | 82 | goto out_of_memory; |
83 | } else if (fault & VM_FAULT_SIGSEGV) { | ||
84 | goto out; | ||
83 | } else if (fault & VM_FAULT_SIGBUS) { | 85 | } else if (fault & VM_FAULT_SIGBUS) { |
84 | err = -EACCES; | 86 | err = -EACCES; |
85 | goto out; | 87 | goto out; |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index d999398928bc..ad754b4411f7 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo | |||
90 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 | 90 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 |
91 | 91 | ||
92 | RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ | 92 | RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ |
93 | perl $(srctree)/arch/x86/tools/calc_run_size.pl) | 93 | $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh) |
94 | quiet_cmd_mkpiggy = MKPIGGY $@ | 94 | quiet_cmd_mkpiggy = MKPIGGY $@ |
95 | cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) | 95 | cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) |
96 | 96 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 944bf019b74f..498b6d967138 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void) | |||
2431 | break; | 2431 | break; |
2432 | 2432 | ||
2433 | case 55: /* 22nm Atom "Silvermont" */ | 2433 | case 55: /* 22nm Atom "Silvermont" */ |
2434 | case 76: /* 14nm Atom "Airmont" */ | ||
2434 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ | 2435 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ |
2435 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, | 2436 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, |
2436 | sizeof(hw_cache_event_ids)); | 2437 | sizeof(hw_cache_event_ids)); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 6e434f8e5fc8..c4bb8b8e5017 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v) | |||
142 | * or use ldexp(count, -32). | 142 | * or use ldexp(count, -32). |
143 | * Watts = Joules/Time delta | 143 | * Watts = Joules/Time delta |
144 | */ | 144 | */ |
145 | return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); | 145 | return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit); |
146 | } | 146 | } |
147 | 147 | ||
148 | static u64 rapl_event_update(struct perf_event *event) | 148 | static u64 rapl_event_update(struct perf_event *event) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 10b8d3eaaf15..c635b8b49e93 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id | |||
840 | box->phys_id = phys_id; | 840 | box->phys_id = phys_id; |
841 | box->pci_dev = pdev; | 841 | box->pci_dev = pdev; |
842 | box->pmu = pmu; | 842 | box->pmu = pmu; |
843 | uncore_box_init(box); | ||
844 | pci_set_drvdata(pdev, box); | 843 | pci_set_drvdata(pdev, box); |
845 | 844 | ||
846 | raw_spin_lock(&uncore_box_lock); | 845 | raw_spin_lock(&uncore_box_lock); |
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu) | |||
1004 | pmu = &type->pmus[j]; | 1003 | pmu = &type->pmus[j]; |
1005 | box = *per_cpu_ptr(pmu->box, cpu); | 1004 | box = *per_cpu_ptr(pmu->box, cpu); |
1006 | /* called by uncore_cpu_init? */ | 1005 | /* called by uncore_cpu_init? */ |
1007 | if (box && box->phys_id >= 0) { | 1006 | if (box && box->phys_id >= 0) |
1008 | uncore_box_init(box); | ||
1009 | continue; | 1007 | continue; |
1010 | } | ||
1011 | 1008 | ||
1012 | for_each_online_cpu(k) { | 1009 | for_each_online_cpu(k) { |
1013 | exist = *per_cpu_ptr(pmu->box, k); | 1010 | exist = *per_cpu_ptr(pmu->box, k); |
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu) | |||
1023 | } | 1020 | } |
1024 | } | 1021 | } |
1025 | 1022 | ||
1026 | if (box) { | 1023 | if (box) |
1027 | box->phys_id = phys_id; | 1024 | box->phys_id = phys_id; |
1028 | uncore_box_init(box); | ||
1029 | } | ||
1030 | } | 1025 | } |
1031 | } | 1026 | } |
1032 | return 0; | 1027 | return 0; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 863d9b02563e..6c8c1e7e69d8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box) | |||
257 | return box->pmu->type->num_counters; | 257 | return box->pmu->type->num_counters; |
258 | } | 258 | } |
259 | 259 | ||
260 | static inline void uncore_box_init(struct intel_uncore_box *box) | ||
261 | { | ||
262 | if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { | ||
263 | if (box->pmu->type->ops->init_box) | ||
264 | box->pmu->type->ops->init_box(box); | ||
265 | } | ||
266 | } | ||
267 | |||
260 | static inline void uncore_disable_box(struct intel_uncore_box *box) | 268 | static inline void uncore_disable_box(struct intel_uncore_box *box) |
261 | { | 269 | { |
262 | if (box->pmu->type->ops->disable_box) | 270 | if (box->pmu->type->ops->disable_box) |
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box) | |||
265 | 273 | ||
266 | static inline void uncore_enable_box(struct intel_uncore_box *box) | 274 | static inline void uncore_enable_box(struct intel_uncore_box *box) |
267 | { | 275 | { |
276 | uncore_box_init(box); | ||
277 | |||
268 | if (box->pmu->type->ops->enable_box) | 278 | if (box->pmu->type->ops->enable_box) |
269 | box->pmu->type->ops->enable_box(box); | 279 | box->pmu->type->ops->enable_box(box); |
270 | } | 280 | } |
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box, | |||
287 | return box->pmu->type->ops->read_counter(box, event); | 297 | return box->pmu->type->ops->read_counter(box, event); |
288 | } | 298 | } |
289 | 299 | ||
290 | static inline void uncore_box_init(struct intel_uncore_box *box) | ||
291 | { | ||
292 | if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { | ||
293 | if (box->pmu->type->ops->init_box) | ||
294 | box->pmu->type->ops->init_box(box); | ||
295 | } | ||
296 | } | ||
297 | |||
298 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) | 300 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) |
299 | { | 301 | { |
300 | return (box->phys_id < 0); | 302 | return (box->phys_id < 0); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4f0c0b954686..d52dcf0776ea 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm) | |||
192 | u16 cid, lid; | 192 | u16 cid, lid; |
193 | u32 ldr, aid; | 193 | u32 ldr, aid; |
194 | 194 | ||
195 | if (!kvm_apic_present(vcpu)) | ||
196 | continue; | ||
197 | |||
195 | aid = kvm_apic_id(apic); | 198 | aid = kvm_apic_id(apic); |
196 | ldr = kvm_apic_get_reg(apic, APIC_LDR); | 199 | ldr = kvm_apic_get_reg(apic, APIC_LDR); |
197 | cid = apic_cluster_id(new, ldr); | 200 | cid = apic_cluster_id(new, ldr); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 38dcec403b46..e3ff27a5b634 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
898 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 898 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
899 | VM_FAULT_HWPOISON_LARGE)) | 899 | VM_FAULT_HWPOISON_LARGE)) |
900 | do_sigbus(regs, error_code, address, fault); | 900 | do_sigbus(regs, error_code, address, fault); |
901 | else if (fault & VM_FAULT_SIGSEGV) | ||
902 | bad_area_nosemaphore(regs, error_code, address); | ||
901 | else | 903 | else |
902 | BUG(); | 904 | BUG(); |
903 | } | 905 | } |
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl deleted file mode 100644 index 23210baade2d..000000000000 --- a/arch/x86/tools/calc_run_size.pl +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Calculate the amount of space needed to run the kernel, including room for | ||
4 | # the .bss and .brk sections. | ||
5 | # | ||
6 | # Usage: | ||
7 | # objdump -h a.out | perl calc_run_size.pl | ||
8 | use strict; | ||
9 | |||
10 | my $mem_size = 0; | ||
11 | my $file_offset = 0; | ||
12 | |||
13 | my $sections=" *[0-9]+ \.(?:bss|brk) +"; | ||
14 | while (<>) { | ||
15 | if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { | ||
16 | my $size = hex($1); | ||
17 | my $offset = hex($2); | ||
18 | $mem_size += $size; | ||
19 | if ($file_offset == 0) { | ||
20 | $file_offset = $offset; | ||
21 | } elsif ($file_offset != $offset) { | ||
22 | # BFD linker shows the same file offset in ELF. | ||
23 | # Gold linker shows them as consecutive. | ||
24 | next if ($file_offset + $mem_size == $offset + $size); | ||
25 | |||
26 | printf STDERR "file_offset: 0x%lx\n", $file_offset; | ||
27 | printf STDERR "mem_size: 0x%lx\n", $mem_size; | ||
28 | printf STDERR "offset: 0x%lx\n", $offset; | ||
29 | printf STDERR "size: 0x%lx\n", $size; | ||
30 | |||
31 | die ".bss and .brk are non-contiguous\n"; | ||
32 | } | ||
33 | } | ||
34 | } | ||
35 | |||
36 | if ($file_offset == 0) { | ||
37 | die "Never found .bss or .brk file offset\n"; | ||
38 | } | ||
39 | printf("%d\n", $mem_size + $file_offset); | ||
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh new file mode 100644 index 000000000000..1a4c17bb3910 --- /dev/null +++ b/arch/x86/tools/calc_run_size.sh | |||
@@ -0,0 +1,42 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # Calculate the amount of space needed to run the kernel, including room for | ||
4 | # the .bss and .brk sections. | ||
5 | # | ||
6 | # Usage: | ||
7 | # objdump -h a.out | sh calc_run_size.sh | ||
8 | |||
9 | NUM='\([0-9a-fA-F]*[ \t]*\)' | ||
10 | OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p') | ||
11 | if [ -z "$OUT" ] ; then | ||
12 | echo "Never found .bss or .brk file offset" >&2 | ||
13 | exit 1 | ||
14 | fi | ||
15 | |||
16 | OUT=$(echo ${OUT# }) | ||
17 | sizeA=$(printf "%d" 0x${OUT%% *}) | ||
18 | OUT=${OUT#* } | ||
19 | offsetA=$(printf "%d" 0x${OUT%% *}) | ||
20 | OUT=${OUT#* } | ||
21 | sizeB=$(printf "%d" 0x${OUT%% *}) | ||
22 | OUT=${OUT#* } | ||
23 | offsetB=$(printf "%d" 0x${OUT%% *}) | ||
24 | |||
25 | run_size=$(( $offsetA + $sizeA + $sizeB )) | ||
26 | |||
27 | # BFD linker shows the same file offset in ELF. | ||
28 | if [ "$offsetA" -ne "$offsetB" ] ; then | ||
29 | # Gold linker shows them as consecutive. | ||
30 | endB=$(( $offsetB + $sizeB )) | ||
31 | if [ "$endB" != "$run_size" ] ; then | ||
32 | printf "sizeA: 0x%x\n" $sizeA >&2 | ||
33 | printf "offsetA: 0x%x\n" $offsetA >&2 | ||
34 | printf "sizeB: 0x%x\n" $sizeB >&2 | ||
35 | printf "offsetB: 0x%x\n" $offsetB >&2 | ||
36 | echo ".bss and .brk are non-contiguous" >&2 | ||
37 | exit 1 | ||
38 | fi | ||
39 | fi | ||
40 | |||
41 | printf "%d\n" $run_size | ||
42 | exit 0 | ||
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index b57c4f91f487..9e3571a6535c 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
@@ -117,6 +117,8 @@ good_area: | |||
117 | if (unlikely(fault & VM_FAULT_ERROR)) { | 117 | if (unlikely(fault & VM_FAULT_ERROR)) { |
118 | if (fault & VM_FAULT_OOM) | 118 | if (fault & VM_FAULT_OOM) |
119 | goto out_of_memory; | 119 | goto out_of_memory; |
120 | else if (fault & VM_FAULT_SIGSEGV) | ||
121 | goto bad_area; | ||
120 | else if (fault & VM_FAULT_SIGBUS) | 122 | else if (fault & VM_FAULT_SIGBUS) |
121 | goto do_sigbus; | 123 | goto do_sigbus; |
122 | BUG(); | 124 | BUG(); |
diff --git a/drivers/Kconfig b/drivers/Kconfig index 694d5a70d6ce..c70d6e45dc10 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig" | |||
134 | 134 | ||
135 | source "drivers/platform/Kconfig" | 135 | source "drivers/platform/Kconfig" |
136 | 136 | ||
137 | source "drivers/soc/Kconfig" | ||
138 | |||
139 | source "drivers/clk/Kconfig" | 137 | source "drivers/clk/Kconfig" |
140 | 138 | ||
141 | source "drivers/hwspinlock/Kconfig" | 139 | source "drivers/hwspinlock/Kconfig" |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3ec85dfce124..8a86b62466f7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev) | |||
2098 | * If an image has a non-zero parent overlap, get a reference to its | 2098 | * If an image has a non-zero parent overlap, get a reference to its |
2099 | * parent. | 2099 | * parent. |
2100 | * | 2100 | * |
2101 | * We must get the reference before checking for the overlap to | ||
2102 | * coordinate properly with zeroing the parent overlap in | ||
2103 | * rbd_dev_v2_parent_info() when an image gets flattened. We | ||
2104 | * drop it again if there is no overlap. | ||
2105 | * | ||
2106 | * Returns true if the rbd device has a parent with a non-zero | 2101 | * Returns true if the rbd device has a parent with a non-zero |
2107 | * overlap and a reference for it was successfully taken, or | 2102 | * overlap and a reference for it was successfully taken, or |
2108 | * false otherwise. | 2103 | * false otherwise. |
2109 | */ | 2104 | */ |
2110 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) | 2105 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) |
2111 | { | 2106 | { |
2112 | int counter; | 2107 | int counter = 0; |
2113 | 2108 | ||
2114 | if (!rbd_dev->parent_spec) | 2109 | if (!rbd_dev->parent_spec) |
2115 | return false; | 2110 | return false; |
2116 | 2111 | ||
2117 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); | 2112 | down_read(&rbd_dev->header_rwsem); |
2118 | if (counter > 0 && rbd_dev->parent_overlap) | 2113 | if (rbd_dev->parent_overlap) |
2119 | return true; | 2114 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); |
2120 | 2115 | up_read(&rbd_dev->header_rwsem); | |
2121 | /* Image was flattened, but parent is not yet torn down */ | ||
2122 | 2116 | ||
2123 | if (counter < 0) | 2117 | if (counter < 0) |
2124 | rbd_warn(rbd_dev, "parent reference overflow"); | 2118 | rbd_warn(rbd_dev, "parent reference overflow"); |
2125 | 2119 | ||
2126 | return false; | 2120 | return counter > 0; |
2127 | } | 2121 | } |
2128 | 2122 | ||
2129 | /* | 2123 | /* |
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4239 | */ | 4233 | */ |
4240 | if (rbd_dev->parent_overlap) { | 4234 | if (rbd_dev->parent_overlap) { |
4241 | rbd_dev->parent_overlap = 0; | 4235 | rbd_dev->parent_overlap = 0; |
4242 | smp_mb(); | ||
4243 | rbd_dev_parent_put(rbd_dev); | 4236 | rbd_dev_parent_put(rbd_dev); |
4244 | pr_info("%s: clone image has been flattened\n", | 4237 | pr_info("%s: clone image has been flattened\n", |
4245 | rbd_dev->disk->disk_name); | 4238 | rbd_dev->disk->disk_name); |
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
4285 | * treat it specially. | 4278 | * treat it specially. |
4286 | */ | 4279 | */ |
4287 | rbd_dev->parent_overlap = overlap; | 4280 | rbd_dev->parent_overlap = overlap; |
4288 | smp_mb(); | ||
4289 | if (!overlap) { | 4281 | if (!overlap) { |
4290 | 4282 | ||
4291 | /* A null parent_spec indicates it's the initial probe */ | 4283 | /* A null parent_spec indicates it's the initial probe */ |
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) | |||
5114 | { | 5106 | { |
5115 | struct rbd_image_header *header; | 5107 | struct rbd_image_header *header; |
5116 | 5108 | ||
5117 | /* Drop parent reference unless it's already been done (or none) */ | 5109 | rbd_dev_parent_put(rbd_dev); |
5118 | |||
5119 | if (rbd_dev->parent_overlap) | ||
5120 | rbd_dev_parent_put(rbd_dev); | ||
5121 | 5110 | ||
5122 | /* Free dynamic fields from the header, then zero it out */ | 5111 | /* Free dynamic fields from the header, then zero it out */ |
5123 | 5112 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 633532a2e7ec..25bc47f3c1cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include "kfd_priv.h" | 27 | #include "kfd_priv.h" |
28 | #include "kfd_device_queue_manager.h" | 28 | #include "kfd_device_queue_manager.h" |
29 | #include "kfd_pm4_headers.h" | ||
29 | 30 | ||
30 | #define MQD_SIZE_ALIGNED 768 | 31 | #define MQD_SIZE_ALIGNED 768 |
31 | 32 | ||
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
169 | kfd->shared_resources = *gpu_resources; | 170 | kfd->shared_resources = *gpu_resources; |
170 | 171 | ||
171 | /* calculate max size of mqds needed for queues */ | 172 | /* calculate max size of mqds needed for queues */ |
172 | size = max_num_of_processes * | 173 | size = max_num_of_queues_per_device * |
173 | max_num_of_queues_per_process * | 174 | kfd->device_info->mqd_size_aligned; |
174 | kfd->device_info->mqd_size_aligned; | ||
175 | 175 | ||
176 | /* add another 512KB for all other allocations on gart */ | 176 | /* add another 512KB for all other allocations on gart */ |
177 | size += 512 * 1024; | 177 | size += 512 * 1024; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 30c8fda9622e..0d8694f015c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
183 | 183 | ||
184 | mutex_lock(&dqm->lock); | 184 | mutex_lock(&dqm->lock); |
185 | 185 | ||
186 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
187 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
188 | dqm->total_queue_count); | ||
189 | mutex_unlock(&dqm->lock); | ||
190 | return -EPERM; | ||
191 | } | ||
192 | |||
186 | if (list_empty(&qpd->queues_list)) { | 193 | if (list_empty(&qpd->queues_list)) { |
187 | retval = allocate_vmid(dqm, qpd, q); | 194 | retval = allocate_vmid(dqm, qpd, q); |
188 | if (retval != 0) { | 195 | if (retval != 0) { |
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
207 | list_add(&q->list, &qpd->queues_list); | 214 | list_add(&q->list, &qpd->queues_list); |
208 | dqm->queue_count++; | 215 | dqm->queue_count++; |
209 | 216 | ||
217 | /* | ||
218 | * Unconditionally increment this counter, regardless of the queue's | ||
219 | * type or whether the queue is active. | ||
220 | */ | ||
221 | dqm->total_queue_count++; | ||
222 | pr_debug("Total of %d queues are accountable so far\n", | ||
223 | dqm->total_queue_count); | ||
224 | |||
210 | mutex_unlock(&dqm->lock); | 225 | mutex_unlock(&dqm->lock); |
211 | return 0; | 226 | return 0; |
212 | } | 227 | } |
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, | |||
326 | if (list_empty(&qpd->queues_list)) | 341 | if (list_empty(&qpd->queues_list)) |
327 | deallocate_vmid(dqm, qpd, q); | 342 | deallocate_vmid(dqm, qpd, q); |
328 | dqm->queue_count--; | 343 | dqm->queue_count--; |
344 | |||
345 | /* | ||
346 | * Unconditionally decrement this counter, regardless of the queue's | ||
347 | * type | ||
348 | */ | ||
349 | dqm->total_queue_count--; | ||
350 | pr_debug("Total of %d queues are accountable so far\n", | ||
351 | dqm->total_queue_count); | ||
352 | |||
329 | out: | 353 | out: |
330 | mutex_unlock(&dqm->lock); | 354 | mutex_unlock(&dqm->lock); |
331 | return retval; | 355 | return retval; |
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm, | |||
541 | 565 | ||
542 | for (i = 0; i < pipes_num; i++) { | 566 | for (i = 0; i < pipes_num; i++) { |
543 | inx = i + first_pipe; | 567 | inx = i + first_pipe; |
568 | /* | ||
569 | * HPD buffer on GTT is allocated by amdkfd, no need to waste | ||
570 | * space in GTT for pipelines we don't initialize | ||
571 | */ | ||
544 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; | 572 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; |
545 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); | 573 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); |
546 | /* = log2(bytes/4)-1 */ | 574 | /* = log2(bytes/4)-1 */ |
547 | kfd2kgd->init_pipeline(dqm->dev->kgd, i, | 575 | kfd2kgd->init_pipeline(dqm->dev->kgd, inx, |
548 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); | 576 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); |
549 | } | 577 | } |
550 | 578 | ||
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm) | |||
560 | 588 | ||
561 | pr_debug("kfd: In %s\n", __func__); | 589 | pr_debug("kfd: In %s\n", __func__); |
562 | 590 | ||
563 | retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); | 591 | retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); |
564 | if (retval != 0) | 592 | if (retval != 0) |
565 | return retval; | 593 | return retval; |
566 | 594 | ||
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
752 | pr_debug("kfd: In func %s\n", __func__); | 780 | pr_debug("kfd: In func %s\n", __func__); |
753 | 781 | ||
754 | mutex_lock(&dqm->lock); | 782 | mutex_lock(&dqm->lock); |
783 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
784 | pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", | ||
785 | dqm->total_queue_count); | ||
786 | mutex_unlock(&dqm->lock); | ||
787 | return -EPERM; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * Unconditionally increment this counter, regardless of the queue's | ||
792 | * type or whether the queue is active. | ||
793 | */ | ||
794 | dqm->total_queue_count++; | ||
795 | pr_debug("Total of %d queues are accountable so far\n", | ||
796 | dqm->total_queue_count); | ||
797 | |||
755 | list_add(&kq->list, &qpd->priv_queue_list); | 798 | list_add(&kq->list, &qpd->priv_queue_list); |
756 | dqm->queue_count++; | 799 | dqm->queue_count++; |
757 | qpd->is_debug = true; | 800 | qpd->is_debug = true; |
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
775 | dqm->queue_count--; | 818 | dqm->queue_count--; |
776 | qpd->is_debug = false; | 819 | qpd->is_debug = false; |
777 | execute_queues_cpsch(dqm, false); | 820 | execute_queues_cpsch(dqm, false); |
821 | /* | ||
822 | * Unconditionally decrement this counter, regardless of the queue's | ||
823 | * type. | ||
824 | */ | ||
825 | dqm->total_queue_count++; | ||
826 | pr_debug("Total of %d queues are accountable so far\n", | ||
827 | dqm->total_queue_count); | ||
778 | mutex_unlock(&dqm->lock); | 828 | mutex_unlock(&dqm->lock); |
779 | } | 829 | } |
780 | 830 | ||
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
793 | 843 | ||
794 | mutex_lock(&dqm->lock); | 844 | mutex_lock(&dqm->lock); |
795 | 845 | ||
846 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
847 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
848 | dqm->total_queue_count); | ||
849 | retval = -EPERM; | ||
850 | goto out; | ||
851 | } | ||
852 | |||
796 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); | 853 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); |
797 | if (mqd == NULL) { | 854 | if (mqd == NULL) { |
798 | mutex_unlock(&dqm->lock); | 855 | mutex_unlock(&dqm->lock); |
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
810 | retval = execute_queues_cpsch(dqm, false); | 867 | retval = execute_queues_cpsch(dqm, false); |
811 | } | 868 | } |
812 | 869 | ||
870 | /* | ||
871 | * Unconditionally increment this counter, regardless of the queue's | ||
872 | * type or whether the queue is active. | ||
873 | */ | ||
874 | dqm->total_queue_count++; | ||
875 | |||
876 | pr_debug("Total of %d queues are accountable so far\n", | ||
877 | dqm->total_queue_count); | ||
878 | |||
813 | out: | 879 | out: |
814 | mutex_unlock(&dqm->lock); | 880 | mutex_unlock(&dqm->lock); |
815 | return retval; | 881 | return retval; |
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |||
930 | 996 | ||
931 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | 997 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
932 | 998 | ||
999 | /* | ||
1000 | * Unconditionally decrement this counter, regardless of the queue's | ||
1001 | * type | ||
1002 | */ | ||
1003 | dqm->total_queue_count--; | ||
1004 | pr_debug("Total of %d queues are accountable so far\n", | ||
1005 | dqm->total_queue_count); | ||
1006 | |||
933 | mutex_unlock(&dqm->lock); | 1007 | mutex_unlock(&dqm->lock); |
934 | 1008 | ||
935 | return 0; | 1009 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c3f189e8ae35..52035bf0c1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | |||
@@ -130,6 +130,7 @@ struct device_queue_manager { | |||
130 | struct list_head queues; | 130 | struct list_head queues; |
131 | unsigned int processes_count; | 131 | unsigned int processes_count; |
132 | unsigned int queue_count; | 132 | unsigned int queue_count; |
133 | unsigned int total_queue_count; | ||
133 | unsigned int next_pipe_to_allocate; | 134 | unsigned int next_pipe_to_allocate; |
134 | unsigned int *allocated_queues; | 135 | unsigned int *allocated_queues; |
135 | unsigned int vmid_bitmap; | 136 | unsigned int vmid_bitmap; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 95d5af138e6e..a8be6df85347 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c | |||
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); | |||
50 | MODULE_PARM_DESC(sched_policy, | 50 | MODULE_PARM_DESC(sched_policy, |
51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); | 51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); |
52 | 52 | ||
53 | int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; | 53 | int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; |
54 | module_param(max_num_of_processes, int, 0444); | 54 | module_param(max_num_of_queues_per_device, int, 0444); |
55 | MODULE_PARM_DESC(max_num_of_processes, | 55 | MODULE_PARM_DESC(max_num_of_queues_per_device, |
56 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); | 56 | "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); |
57 | |||
58 | int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; | ||
59 | module_param(max_num_of_queues_per_process, int, 0444); | ||
60 | MODULE_PARM_DESC(max_num_of_queues_per_process, | ||
61 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); | ||
62 | 57 | ||
63 | bool kgd2kfd_init(unsigned interface_version, | 58 | bool kgd2kfd_init(unsigned interface_version, |
64 | const struct kfd2kgd_calls *f2g, | 59 | const struct kfd2kgd_calls *f2g, |
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void) | |||
100 | } | 95 | } |
101 | 96 | ||
102 | /* Verify module parameters */ | 97 | /* Verify module parameters */ |
103 | if ((max_num_of_processes < 0) || | 98 | if ((max_num_of_queues_per_device < 0) || |
104 | (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { | 99 | (max_num_of_queues_per_device > |
105 | pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); | 100 | KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { |
106 | return -1; | 101 | pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); |
107 | } | ||
108 | |||
109 | if ((max_num_of_queues_per_process < 0) || | ||
110 | (max_num_of_queues_per_process > | ||
111 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { | ||
112 | pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); | ||
113 | return -1; | 102 | return -1; |
114 | } | 103 | } |
115 | 104 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | |||
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); | |||
30 | 30 | ||
31 | int kfd_pasid_init(void) | 31 | int kfd_pasid_init(void) |
32 | { | 32 | { |
33 | pasid_limit = max_num_of_processes; | 33 | pasid_limit = KFD_MAX_NUM_OF_PROCESSES; |
34 | 34 | ||
35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); | 35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); |
36 | if (!pasid_bitmap) | 36 | if (!pasid_bitmap) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b3dc13c83169..96dc10e8904a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
@@ -52,20 +52,19 @@ | |||
52 | #define kfd_alloc_struct(ptr_to_struct) \ | 52 | #define kfd_alloc_struct(ptr_to_struct) \ |
53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) | 53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) |
54 | 54 | ||
55 | /* Kernel module parameter to specify maximum number of supported processes */ | ||
56 | extern int max_num_of_processes; | ||
57 | |||
58 | #define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 | ||
59 | #define KFD_MAX_NUM_OF_PROCESSES 512 | 55 | #define KFD_MAX_NUM_OF_PROCESSES 512 |
56 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | ||
60 | 57 | ||
61 | /* | 58 | /* |
62 | * Kernel module parameter to specify maximum number of supported queues | 59 | * Kernel module parameter to specify maximum number of supported queues per |
63 | * per process | 60 | * device |
64 | */ | 61 | */ |
65 | extern int max_num_of_queues_per_process; | 62 | extern int max_num_of_queues_per_device; |
66 | 63 | ||
67 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 | 64 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 |
68 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | 65 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ |
66 | (KFD_MAX_NUM_OF_PROCESSES * \ | ||
67 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) | ||
69 | 68 | ||
70 | #define KFD_KERNEL_QUEUE_SIZE 2048 | 69 | #define KFD_KERNEL_QUEUE_SIZE 2048 |
71 | 70 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 47526780d736..f37cf5efe642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, | |||
54 | pr_debug("kfd: in %s\n", __func__); | 54 | pr_debug("kfd: in %s\n", __func__); |
55 | 55 | ||
56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, | 56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, |
57 | max_num_of_queues_per_process); | 57 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); |
58 | 58 | ||
59 | pr_debug("kfd: the new slot id %lu\n", found); | 59 | pr_debug("kfd: the new slot id %lu\n", found); |
60 | 60 | ||
61 | if (found >= max_num_of_queues_per_process) { | 61 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { |
62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", | 62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", |
63 | pqm->process->pasid); | 63 | pqm->process->pasid); |
64 | return -ENOMEM; | 64 | return -ENOMEM; |
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) | |||
76 | 76 | ||
77 | INIT_LIST_HEAD(&pqm->queues); | 77 | INIT_LIST_HEAD(&pqm->queues); |
78 | pqm->queue_slot_bitmap = | 78 | pqm->queue_slot_bitmap = |
79 | kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, | 79 | kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, |
80 | BITS_PER_BYTE), GFP_KERNEL); | 80 | BITS_PER_BYTE), GFP_KERNEL); |
81 | if (pqm->queue_slot_bitmap == NULL) | 81 | if (pqm->queue_slot_bitmap == NULL) |
82 | return -ENOMEM; | 82 | return -ENOMEM; |
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
203 | pqn->kq = NULL; | 203 | pqn->kq = NULL; |
204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, | 204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, |
205 | &q->properties.vmid); | 205 | &q->properties.vmid); |
206 | pr_debug("DQM returned %d for create_queue\n", retval); | ||
206 | print_queue(q); | 207 | print_queue(q); |
207 | break; | 208 | break; |
208 | case KFD_QUEUE_TYPE_DIQ: | 209 | case KFD_QUEUE_TYPE_DIQ: |
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
222 | } | 223 | } |
223 | 224 | ||
224 | if (retval != 0) { | 225 | if (retval != 0) { |
225 | pr_err("kfd: error dqm create queue\n"); | 226 | pr_debug("Error dqm create queue\n"); |
226 | goto err_create_queue; | 227 | goto err_create_queue; |
227 | } | 228 | } |
228 | 229 | ||
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
241 | err_create_queue: | 242 | err_create_queue: |
242 | kfree(pqn); | 243 | kfree(pqn); |
243 | err_allocate_pqn: | 244 | err_allocate_pqn: |
245 | /* check if queues list is empty unregister process from device */ | ||
244 | clear_bit(*qid, pqm->queue_slot_bitmap); | 246 | clear_bit(*qid, pqm->queue_slot_bitmap); |
247 | if (list_empty(&pqm->queues)) | ||
248 | dev->dqm->unregister_process(dev->dqm, &pdd->qpd); | ||
245 | return retval; | 249 | return retval; |
246 | } | 250 | } |
247 | 251 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index cf775a4449c1..dc386ebe5193 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ | |||
145 | } | 145 | } |
146 | EXPORT_SYMBOL(drm_fb_helper_add_one_connector); | 146 | EXPORT_SYMBOL(drm_fb_helper_add_one_connector); |
147 | 147 | ||
148 | static void remove_from_modeset(struct drm_mode_set *set, | ||
149 | struct drm_connector *connector) | ||
150 | { | ||
151 | int i, j; | ||
152 | |||
153 | for (i = 0; i < set->num_connectors; i++) { | ||
154 | if (set->connectors[i] == connector) | ||
155 | break; | ||
156 | } | ||
157 | |||
158 | if (i == set->num_connectors) | ||
159 | return; | ||
160 | |||
161 | for (j = i + 1; j < set->num_connectors; j++) { | ||
162 | set->connectors[j - 1] = set->connectors[j]; | ||
163 | } | ||
164 | set->num_connectors--; | ||
165 | |||
166 | /* because i915 is pissy about this.. | ||
167 | * TODO maybe need to makes sure we set it back to !=NULL somewhere? | ||
168 | */ | ||
169 | if (set->num_connectors == 0) | ||
170 | set->fb = NULL; | ||
171 | } | ||
172 | |||
148 | int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, | 173 | int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, |
149 | struct drm_connector *connector) | 174 | struct drm_connector *connector) |
150 | { | 175 | { |
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, | |||
167 | } | 192 | } |
168 | fb_helper->connector_count--; | 193 | fb_helper->connector_count--; |
169 | kfree(fb_helper_connector); | 194 | kfree(fb_helper_connector); |
195 | |||
196 | /* also cleanup dangling references to the connector: */ | ||
197 | for (i = 0; i < fb_helper->crtc_count; i++) | ||
198 | remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); | ||
199 | |||
170 | return 0; | 200 | return 0; |
171 | } | 201 | } |
172 | EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); | 202 | EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..a9041d1a8ff0 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
@@ -32,6 +32,8 @@ | |||
32 | struct tda998x_priv { | 32 | struct tda998x_priv { |
33 | struct i2c_client *cec; | 33 | struct i2c_client *cec; |
34 | struct i2c_client *hdmi; | 34 | struct i2c_client *hdmi; |
35 | struct mutex mutex; | ||
36 | struct delayed_work dwork; | ||
35 | uint16_t rev; | 37 | uint16_t rev; |
36 | uint8_t current_page; | 38 | uint8_t current_page; |
37 | int dpms; | 39 | int dpms; |
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) | |||
402 | uint8_t addr = REG2ADDR(reg); | 404 | uint8_t addr = REG2ADDR(reg); |
403 | int ret; | 405 | int ret; |
404 | 406 | ||
407 | mutex_lock(&priv->mutex); | ||
405 | ret = set_page(priv, reg); | 408 | ret = set_page(priv, reg); |
406 | if (ret < 0) | 409 | if (ret < 0) |
407 | return ret; | 410 | goto out; |
408 | 411 | ||
409 | ret = i2c_master_send(client, &addr, sizeof(addr)); | 412 | ret = i2c_master_send(client, &addr, sizeof(addr)); |
410 | if (ret < 0) | 413 | if (ret < 0) |
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) | |||
414 | if (ret < 0) | 417 | if (ret < 0) |
415 | goto fail; | 418 | goto fail; |
416 | 419 | ||
417 | return ret; | 420 | goto out; |
418 | 421 | ||
419 | fail: | 422 | fail: |
420 | dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); | 423 | dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); |
424 | out: | ||
425 | mutex_unlock(&priv->mutex); | ||
421 | return ret; | 426 | return ret; |
422 | } | 427 | } |
423 | 428 | ||
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) | |||
431 | buf[0] = REG2ADDR(reg); | 436 | buf[0] = REG2ADDR(reg); |
432 | memcpy(&buf[1], p, cnt); | 437 | memcpy(&buf[1], p, cnt); |
433 | 438 | ||
439 | mutex_lock(&priv->mutex); | ||
434 | ret = set_page(priv, reg); | 440 | ret = set_page(priv, reg); |
435 | if (ret < 0) | 441 | if (ret < 0) |
436 | return; | 442 | goto out; |
437 | 443 | ||
438 | ret = i2c_master_send(client, buf, cnt + 1); | 444 | ret = i2c_master_send(client, buf, cnt + 1); |
439 | if (ret < 0) | 445 | if (ret < 0) |
440 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 446 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
447 | out: | ||
448 | mutex_unlock(&priv->mutex); | ||
441 | } | 449 | } |
442 | 450 | ||
443 | static int | 451 | static int |
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) | |||
459 | uint8_t buf[] = {REG2ADDR(reg), val}; | 467 | uint8_t buf[] = {REG2ADDR(reg), val}; |
460 | int ret; | 468 | int ret; |
461 | 469 | ||
470 | mutex_lock(&priv->mutex); | ||
462 | ret = set_page(priv, reg); | 471 | ret = set_page(priv, reg); |
463 | if (ret < 0) | 472 | if (ret < 0) |
464 | return; | 473 | goto out; |
465 | 474 | ||
466 | ret = i2c_master_send(client, buf, sizeof(buf)); | 475 | ret = i2c_master_send(client, buf, sizeof(buf)); |
467 | if (ret < 0) | 476 | if (ret < 0) |
468 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 477 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
478 | out: | ||
479 | mutex_unlock(&priv->mutex); | ||
469 | } | 480 | } |
470 | 481 | ||
471 | static void | 482 | static void |
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) | |||
475 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; | 486 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; |
476 | int ret; | 487 | int ret; |
477 | 488 | ||
489 | mutex_lock(&priv->mutex); | ||
478 | ret = set_page(priv, reg); | 490 | ret = set_page(priv, reg); |
479 | if (ret < 0) | 491 | if (ret < 0) |
480 | return; | 492 | goto out; |
481 | 493 | ||
482 | ret = i2c_master_send(client, buf, sizeof(buf)); | 494 | ret = i2c_master_send(client, buf, sizeof(buf)); |
483 | if (ret < 0) | 495 | if (ret < 0) |
484 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 496 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
497 | out: | ||
498 | mutex_unlock(&priv->mutex); | ||
485 | } | 499 | } |
486 | 500 | ||
487 | static void | 501 | static void |
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv) | |||
536 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); | 550 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); |
537 | } | 551 | } |
538 | 552 | ||
553 | /* handle HDMI connect/disconnect */ | ||
554 | static void tda998x_hpd(struct work_struct *work) | ||
555 | { | ||
556 | struct delayed_work *dwork = to_delayed_work(work); | ||
557 | struct tda998x_priv *priv = | ||
558 | container_of(dwork, struct tda998x_priv, dwork); | ||
559 | |||
560 | if (priv->encoder && priv->encoder->dev) | ||
561 | drm_kms_helper_hotplug_event(priv->encoder->dev); | ||
562 | } | ||
563 | |||
539 | /* | 564 | /* |
540 | * only 2 interrupts may occur: screen plug/unplug and EDID read | 565 | * only 2 interrupts may occur: screen plug/unplug and EDID read |
541 | */ | 566 | */ |
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) | |||
559 | priv->wq_edid_wait = 0; | 584 | priv->wq_edid_wait = 0; |
560 | wake_up(&priv->wq_edid); | 585 | wake_up(&priv->wq_edid); |
561 | } else if (cec != 0) { /* HPD change */ | 586 | } else if (cec != 0) { /* HPD change */ |
562 | if (priv->encoder && priv->encoder->dev) | 587 | schedule_delayed_work(&priv->dwork, HZ/10); |
563 | drm_helper_hpd_irq_event(priv->encoder->dev); | ||
564 | } | 588 | } |
565 | return IRQ_HANDLED; | 589 | return IRQ_HANDLED; |
566 | } | 590 | } |
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) | |||
1170 | /* disable all IRQs and free the IRQ handler */ | 1194 | /* disable all IRQs and free the IRQ handler */ |
1171 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); | 1195 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); |
1172 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | 1196 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); |
1173 | if (priv->hdmi->irq) | 1197 | if (priv->hdmi->irq) { |
1174 | free_irq(priv->hdmi->irq, priv); | 1198 | free_irq(priv->hdmi->irq, priv); |
1199 | cancel_delayed_work_sync(&priv->dwork); | ||
1200 | } | ||
1175 | 1201 | ||
1176 | i2c_unregister_device(priv->cec); | 1202 | i2c_unregister_device(priv->cec); |
1177 | } | 1203 | } |
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
1255 | struct device_node *np = client->dev.of_node; | 1281 | struct device_node *np = client->dev.of_node; |
1256 | u32 video; | 1282 | u32 video; |
1257 | int rev_lo, rev_hi, ret; | 1283 | int rev_lo, rev_hi, ret; |
1284 | unsigned short cec_addr; | ||
1258 | 1285 | ||
1259 | priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); | 1286 | priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); |
1260 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); | 1287 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); |
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
1262 | 1289 | ||
1263 | priv->current_page = 0xff; | 1290 | priv->current_page = 0xff; |
1264 | priv->hdmi = client; | 1291 | priv->hdmi = client; |
1265 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1292 | /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ |
1293 | cec_addr = 0x34 + (client->addr & 0x03); | ||
1294 | priv->cec = i2c_new_dummy(client->adapter, cec_addr); | ||
1266 | if (!priv->cec) | 1295 | if (!priv->cec) |
1267 | return -ENODEV; | 1296 | return -ENODEV; |
1268 | 1297 | ||
1269 | priv->dpms = DRM_MODE_DPMS_OFF; | 1298 | priv->dpms = DRM_MODE_DPMS_OFF; |
1270 | 1299 | ||
1300 | mutex_init(&priv->mutex); /* protect the page access */ | ||
1301 | |||
1271 | /* wake up the device: */ | 1302 | /* wake up the device: */ |
1272 | cec_write(priv, REG_CEC_ENAMODS, | 1303 | cec_write(priv, REG_CEC_ENAMODS, |
1273 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); | 1304 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); |
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
1323 | if (client->irq) { | 1354 | if (client->irq) { |
1324 | int irqf_trigger; | 1355 | int irqf_trigger; |
1325 | 1356 | ||
1326 | /* init read EDID waitqueue */ | 1357 | /* init read EDID waitqueue and HDP work */ |
1327 | init_waitqueue_head(&priv->wq_edid); | 1358 | init_waitqueue_head(&priv->wq_edid); |
1359 | INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); | ||
1328 | 1360 | ||
1329 | /* clear pending interrupts */ | 1361 | /* clear pending interrupts */ |
1330 | reg_read(priv, REG_INT_FLAGS_0); | 1362 | reg_read(priv, REG_INT_FLAGS_0); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 574057cd1d09..7643300828c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev) | |||
462 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 462 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
463 | dev_priv->pch_type = PCH_LPT; | 463 | dev_priv->pch_type = PCH_LPT; |
464 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | 464 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
465 | WARN_ON(!IS_HASWELL(dev)); | 465 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
466 | WARN_ON(IS_HSW_ULT(dev)); | 466 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); |
467 | } else if (IS_BROADWELL(dev)) { | ||
468 | dev_priv->pch_type = PCH_LPT; | ||
469 | dev_priv->pch_id = | ||
470 | INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; | ||
471 | DRM_DEBUG_KMS("This is Broadwell, assuming " | ||
472 | "LynxPoint LP PCH\n"); | ||
473 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 467 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
474 | dev_priv->pch_type = PCH_LPT; | 468 | dev_priv->pch_type = PCH_LPT; |
475 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | 469 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
476 | WARN_ON(!IS_HASWELL(dev)); | 470 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
477 | WARN_ON(!IS_HSW_ULT(dev)); | 471 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); |
478 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | 472 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
479 | dev_priv->pch_type = PCH_SPT; | 473 | dev_priv->pch_type = PCH_SPT; |
480 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | 474 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9f891c432f8..9d7a7155bf02 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table { | |||
2159 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ | 2159 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
2160 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) | 2160 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
2161 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ | 2161 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
2162 | ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ | 2162 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
2163 | (INTEL_DEVID(dev) & 0xf) == 0x6 || \ | ||
2164 | (INTEL_DEVID(dev) & 0xf) == 0xe)) | 2163 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
2165 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ | 2164 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
2166 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2165 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 76354d3ba925..5f614828d365 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, | |||
3148 | u32 size = i915_gem_obj_ggtt_size(obj); | 3148 | u32 size = i915_gem_obj_ggtt_size(obj); |
3149 | uint64_t val; | 3149 | uint64_t val; |
3150 | 3150 | ||
3151 | /* Adjust fence size to match tiled area */ | ||
3152 | if (obj->tiling_mode != I915_TILING_NONE) { | ||
3153 | uint32_t row_size = obj->stride * | ||
3154 | (obj->tiling_mode == I915_TILING_Y ? 32 : 8); | ||
3155 | size = (size / row_size) * row_size; | ||
3156 | } | ||
3157 | |||
3151 | val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & | 3158 | val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & |
3152 | 0xfffff000) << 32; | 3159 | 0xfffff000) << 32; |
3153 | val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; | 3160 | val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; |
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4884 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4891 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
4885 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); | 4892 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); |
4886 | 4893 | ||
4887 | /* | 4894 | ret = i915_ppgtt_init_hw(dev); |
4888 | * XXX: Contexts should only be initialized once. Doing a switch to the | ||
4889 | * default context switch however is something we'd like to do after | ||
4890 | * reset or thaw (the latter may not actually be necessary for HW, but | ||
4891 | * goes with our code better). Context switching requires rings (for | ||
4892 | * the do_switch), but before enabling PPGTT. So don't move this. | ||
4893 | */ | ||
4894 | ret = i915_gem_context_enable(dev_priv); | ||
4895 | if (ret && ret != -EIO) { | 4895 | if (ret && ret != -EIO) { |
4896 | DRM_ERROR("Context enable failed %d\n", ret); | 4896 | DRM_ERROR("PPGTT enable failed %d\n", ret); |
4897 | i915_gem_cleanup_ringbuffer(dev); | 4897 | i915_gem_cleanup_ringbuffer(dev); |
4898 | |||
4899 | return ret; | ||
4900 | } | 4898 | } |
4901 | 4899 | ||
4902 | ret = i915_ppgtt_init_hw(dev); | 4900 | ret = i915_gem_context_enable(dev_priv); |
4903 | if (ret && ret != -EIO) { | 4901 | if (ret && ret != -EIO) { |
4904 | DRM_ERROR("PPGTT enable failed %d\n", ret); | 4902 | DRM_ERROR("Context enable failed %d\n", ret); |
4905 | i915_gem_cleanup_ringbuffer(dev); | 4903 | i915_gem_cleanup_ringbuffer(dev); |
4904 | |||
4905 | return ret; | ||
4906 | } | 4906 | } |
4907 | 4907 | ||
4908 | return ret; | 4908 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4d63839bd9b4..dfb783a8f2c3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) | |||
962 | 962 | ||
963 | WARN_ON(panel->backlight.max == 0); | 963 | WARN_ON(panel->backlight.max == 0); |
964 | 964 | ||
965 | if (panel->backlight.level == 0) { | 965 | if (panel->backlight.level <= panel->backlight.min) { |
966 | panel->backlight.level = panel->backlight.max; | 966 | panel->backlight.level = panel->backlight.max; |
967 | if (panel->backlight.device) | 967 | if (panel->backlight.device) |
968 | panel->backlight.device->props.brightness = | 968 | panel->backlight.device->props.brightness = |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index a0133c74f4cf..42cd0cffe210 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, | |||
816 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 816 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
817 | if (flags & R600_PTE_SYSTEM) { | 817 | if (flags & R600_PTE_SYSTEM) { |
818 | value = radeon_vm_map_gart(rdev, addr); | 818 | value = radeon_vm_map_gart(rdev, addr); |
819 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
820 | } else if (flags & R600_PTE_VALID) { | 819 | } else if (flags & R600_PTE_VALID) { |
821 | value = addr; | 820 | value = addr; |
822 | } else { | 821 | } else { |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 4be2bb7cbef3..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, | |||
372 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 372 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
373 | if (flags & R600_PTE_SYSTEM) { | 373 | if (flags & R600_PTE_SYSTEM) { |
374 | value = radeon_vm_map_gart(rdev, addr); | 374 | value = radeon_vm_map_gart(rdev, addr); |
375 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
376 | } else if (flags & R600_PTE_VALID) { | 375 | } else if (flags & R600_PTE_VALID) { |
377 | value = addr; | 376 | value = addr; |
378 | } else { | 377 | } else { |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
644 | return r; | 644 | return r; |
645 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 645 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
646 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; | 646 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
647 | rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; | ||
647 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; | 648 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
648 | return radeon_gart_table_ram_alloc(rdev); | 649 | return radeon_gart_table_ram_alloc(rdev); |
649 | } | 650 | } |
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) | |||
681 | WREG32(RADEON_AIC_HI_ADDR, 0); | 682 | WREG32(RADEON_AIC_HI_ADDR, 0); |
682 | } | 683 | } |
683 | 684 | ||
685 | uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) | ||
686 | { | ||
687 | return addr; | ||
688 | } | ||
689 | |||
684 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, | 690 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
685 | uint64_t addr, uint32_t flags) | 691 | uint64_t entry) |
686 | { | 692 | { |
687 | u32 *gtt = rdev->gart.ptr; | 693 | u32 *gtt = rdev->gart.ptr; |
688 | gtt[i] = cpu_to_le32(lower_32_bits(addr)); | 694 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
689 | } | 695 | } |
690 | 696 | ||
691 | void r100_pci_gart_fini(struct radeon_device *rdev) | 697 | void r100_pci_gart_fini(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
73 | #define R300_PTE_WRITEABLE (1 << 2) | 73 | #define R300_PTE_WRITEABLE (1 << 2) |
74 | #define R300_PTE_READABLE (1 << 3) | 74 | #define R300_PTE_READABLE (1 << 3) |
75 | 75 | ||
76 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | 76 | uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) |
77 | uint64_t addr, uint32_t flags) | ||
78 | { | 77 | { |
79 | void __iomem *ptr = rdev->gart.ptr; | ||
80 | |||
81 | addr = (lower_32_bits(addr) >> 8) | | 78 | addr = (lower_32_bits(addr) >> 8) | |
82 | ((upper_32_bits(addr) & 0xff) << 24); | 79 | ((upper_32_bits(addr) & 0xff) << 24); |
83 | if (flags & RADEON_GART_PAGE_READ) | 80 | if (flags & RADEON_GART_PAGE_READ) |
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
86 | addr |= R300_PTE_WRITEABLE; | 83 | addr |= R300_PTE_WRITEABLE; |
87 | if (!(flags & RADEON_GART_PAGE_SNOOP)) | 84 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
88 | addr |= R300_PTE_UNSNOOPED; | 85 | addr |= R300_PTE_UNSNOOPED; |
86 | return addr; | ||
87 | } | ||
88 | |||
89 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
90 | uint64_t entry) | ||
91 | { | ||
92 | void __iomem *ptr = rdev->gart.ptr; | ||
93 | |||
89 | /* on x86 we want this to be CPU endian, on powerpc | 94 | /* on x86 we want this to be CPU endian, on powerpc |
90 | * on powerpc without HW swappers, it'll get swapped on way | 95 | * on powerpc without HW swappers, it'll get swapped on way |
91 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | 96 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
92 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | 97 | writel(entry, ((void __iomem *)ptr) + (i * 4)); |
93 | } | 98 | } |
94 | 99 | ||
95 | int rv370_pcie_gart_init(struct radeon_device *rdev) | 100 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
109 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); | 114 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
110 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 115 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
111 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; | 116 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
117 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; | ||
112 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; | 118 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
113 | return radeon_gart_table_vram_alloc(rdev); | 119 | return radeon_gart_table_vram_alloc(rdev); |
114 | } | 120 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 54529b837afa..3f2a8d3febca 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); | |||
242 | * Dummy page | 242 | * Dummy page |
243 | */ | 243 | */ |
244 | struct radeon_dummy_page { | 244 | struct radeon_dummy_page { |
245 | uint64_t entry; | ||
245 | struct page *page; | 246 | struct page *page; |
246 | dma_addr_t addr; | 247 | dma_addr_t addr; |
247 | }; | 248 | }; |
@@ -645,7 +646,7 @@ struct radeon_gart { | |||
645 | unsigned num_cpu_pages; | 646 | unsigned num_cpu_pages; |
646 | unsigned table_size; | 647 | unsigned table_size; |
647 | struct page **pages; | 648 | struct page **pages; |
648 | dma_addr_t *pages_addr; | 649 | uint64_t *pages_entry; |
649 | bool ready; | 650 | bool ready; |
650 | }; | 651 | }; |
651 | 652 | ||
@@ -1847,8 +1848,9 @@ struct radeon_asic { | |||
1847 | /* gart */ | 1848 | /* gart */ |
1848 | struct { | 1849 | struct { |
1849 | void (*tlb_flush)(struct radeon_device *rdev); | 1850 | void (*tlb_flush)(struct radeon_device *rdev); |
1851 | uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); | ||
1850 | void (*set_page)(struct radeon_device *rdev, unsigned i, | 1852 | void (*set_page)(struct radeon_device *rdev, unsigned i, |
1851 | uint64_t addr, uint32_t flags); | 1853 | uint64_t entry); |
1852 | } gart; | 1854 | } gart; |
1853 | struct { | 1855 | struct { |
1854 | int (*init)(struct radeon_device *rdev); | 1856 | int (*init)(struct radeon_device *rdev); |
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |||
2852 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 2854 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
2853 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) | 2855 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
2854 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) | 2856 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
2855 | #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) | 2857 | #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) |
2858 | #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) | ||
2856 | #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) | 2859 | #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) |
2857 | #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) | 2860 | #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) |
2858 | #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) | 2861 | #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 121aff6a3b41..ed0e10eee2dc 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
159 | DRM_INFO("Forcing AGP to PCIE mode\n"); | 159 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
160 | rdev->flags |= RADEON_IS_PCIE; | 160 | rdev->flags |= RADEON_IS_PCIE; |
161 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; | 161 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
162 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; | ||
162 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; | 163 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
163 | } else { | 164 | } else { |
164 | DRM_INFO("Forcing AGP to PCI mode\n"); | 165 | DRM_INFO("Forcing AGP to PCI mode\n"); |
165 | rdev->flags |= RADEON_IS_PCI; | 166 | rdev->flags |= RADEON_IS_PCI; |
166 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; | 167 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
168 | rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; | ||
167 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; | 169 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
168 | } | 170 | } |
169 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 171 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { | |||
199 | .mc_wait_for_idle = &r100_mc_wait_for_idle, | 201 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
200 | .gart = { | 202 | .gart = { |
201 | .tlb_flush = &r100_pci_gart_tlb_flush, | 203 | .tlb_flush = &r100_pci_gart_tlb_flush, |
204 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
202 | .set_page = &r100_pci_gart_set_page, | 205 | .set_page = &r100_pci_gart_set_page, |
203 | }, | 206 | }, |
204 | .ring = { | 207 | .ring = { |
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { | |||
265 | .mc_wait_for_idle = &r100_mc_wait_for_idle, | 268 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
266 | .gart = { | 269 | .gart = { |
267 | .tlb_flush = &r100_pci_gart_tlb_flush, | 270 | .tlb_flush = &r100_pci_gart_tlb_flush, |
271 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
268 | .set_page = &r100_pci_gart_set_page, | 272 | .set_page = &r100_pci_gart_set_page, |
269 | }, | 273 | }, |
270 | .ring = { | 274 | .ring = { |
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = { | |||
359 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 363 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
360 | .gart = { | 364 | .gart = { |
361 | .tlb_flush = &r100_pci_gart_tlb_flush, | 365 | .tlb_flush = &r100_pci_gart_tlb_flush, |
366 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
362 | .set_page = &r100_pci_gart_set_page, | 367 | .set_page = &r100_pci_gart_set_page, |
363 | }, | 368 | }, |
364 | .ring = { | 369 | .ring = { |
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { | |||
425 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 430 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
426 | .gart = { | 431 | .gart = { |
427 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 432 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
433 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
428 | .set_page = &rv370_pcie_gart_set_page, | 434 | .set_page = &rv370_pcie_gart_set_page, |
429 | }, | 435 | }, |
430 | .ring = { | 436 | .ring = { |
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = { | |||
491 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 497 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
492 | .gart = { | 498 | .gart = { |
493 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 499 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
500 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
494 | .set_page = &rv370_pcie_gart_set_page, | 501 | .set_page = &rv370_pcie_gart_set_page, |
495 | }, | 502 | }, |
496 | .ring = { | 503 | .ring = { |
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = { | |||
557 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, | 564 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, |
558 | .gart = { | 565 | .gart = { |
559 | .tlb_flush = &rs400_gart_tlb_flush, | 566 | .tlb_flush = &rs400_gart_tlb_flush, |
567 | .get_page_entry = &rs400_gart_get_page_entry, | ||
560 | .set_page = &rs400_gart_set_page, | 568 | .set_page = &rs400_gart_set_page, |
561 | }, | 569 | }, |
562 | .ring = { | 570 | .ring = { |
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = { | |||
623 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, | 631 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, |
624 | .gart = { | 632 | .gart = { |
625 | .tlb_flush = &rs600_gart_tlb_flush, | 633 | .tlb_flush = &rs600_gart_tlb_flush, |
634 | .get_page_entry = &rs600_gart_get_page_entry, | ||
626 | .set_page = &rs600_gart_set_page, | 635 | .set_page = &rs600_gart_set_page, |
627 | }, | 636 | }, |
628 | .ring = { | 637 | .ring = { |
@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = { | |||
691 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, | 700 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, |
692 | .gart = { | 701 | .gart = { |
693 | .tlb_flush = &rs400_gart_tlb_flush, | 702 | .tlb_flush = &rs400_gart_tlb_flush, |
703 | .get_page_entry = &rs400_gart_get_page_entry, | ||
694 | .set_page = &rs400_gart_set_page, | 704 | .set_page = &rs400_gart_set_page, |
695 | }, | 705 | }, |
696 | .ring = { | 706 | .ring = { |
@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = { | |||
759 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, | 769 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, |
760 | .gart = { | 770 | .gart = { |
761 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 771 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
772 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
762 | .set_page = &rv370_pcie_gart_set_page, | 773 | .set_page = &rv370_pcie_gart_set_page, |
763 | }, | 774 | }, |
764 | .ring = { | 775 | .ring = { |
@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = { | |||
825 | .mc_wait_for_idle = &r520_mc_wait_for_idle, | 836 | .mc_wait_for_idle = &r520_mc_wait_for_idle, |
826 | .gart = { | 837 | .gart = { |
827 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 838 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
839 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
828 | .set_page = &rv370_pcie_gart_set_page, | 840 | .set_page = &rv370_pcie_gart_set_page, |
829 | }, | 841 | }, |
830 | .ring = { | 842 | .ring = { |
@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = { | |||
919 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 931 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
920 | .gart = { | 932 | .gart = { |
921 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 933 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
934 | .get_page_entry = &rs600_gart_get_page_entry, | ||
922 | .set_page = &rs600_gart_set_page, | 935 | .set_page = &rs600_gart_set_page, |
923 | }, | 936 | }, |
924 | .ring = { | 937 | .ring = { |
@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { | |||
1004 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1017 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1005 | .gart = { | 1018 | .gart = { |
1006 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1019 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
1020 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1007 | .set_page = &rs600_gart_set_page, | 1021 | .set_page = &rs600_gart_set_page, |
1008 | }, | 1022 | }, |
1009 | .ring = { | 1023 | .ring = { |
@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = { | |||
1095 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1109 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1096 | .gart = { | 1110 | .gart = { |
1097 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1111 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
1112 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1098 | .set_page = &rs600_gart_set_page, | 1113 | .set_page = &rs600_gart_set_page, |
1099 | }, | 1114 | }, |
1100 | .ring = { | 1115 | .ring = { |
@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = { | |||
1199 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1214 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1200 | .gart = { | 1215 | .gart = { |
1201 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1216 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
1217 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1202 | .set_page = &rs600_gart_set_page, | 1218 | .set_page = &rs600_gart_set_page, |
1203 | }, | 1219 | }, |
1204 | .ring = { | 1220 | .ring = { |
@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = { | |||
1317 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1333 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1318 | .gart = { | 1334 | .gart = { |
1319 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1335 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
1336 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1320 | .set_page = &rs600_gart_set_page, | 1337 | .set_page = &rs600_gart_set_page, |
1321 | }, | 1338 | }, |
1322 | .ring = { | 1339 | .ring = { |
@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = { | |||
1409 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1426 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1410 | .gart = { | 1427 | .gart = { |
1411 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1428 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
1429 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1412 | .set_page = &rs600_gart_set_page, | 1430 | .set_page = &rs600_gart_set_page, |
1413 | }, | 1431 | }, |
1414 | .ring = { | 1432 | .ring = { |
@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = { | |||
1500 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1518 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1501 | .gart = { | 1519 | .gart = { |
1502 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1520 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
1521 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1503 | .set_page = &rs600_gart_set_page, | 1522 | .set_page = &rs600_gart_set_page, |
1504 | }, | 1523 | }, |
1505 | .ring = { | 1524 | .ring = { |
@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = { | |||
1635 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1654 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1636 | .gart = { | 1655 | .gart = { |
1637 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | 1656 | .tlb_flush = &cayman_pcie_gart_tlb_flush, |
1657 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1638 | .set_page = &rs600_gart_set_page, | 1658 | .set_page = &rs600_gart_set_page, |
1639 | }, | 1659 | }, |
1640 | .vm = { | 1660 | .vm = { |
@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = { | |||
1738 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1758 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
1739 | .gart = { | 1759 | .gart = { |
1740 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | 1760 | .tlb_flush = &cayman_pcie_gart_tlb_flush, |
1761 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1741 | .set_page = &rs600_gart_set_page, | 1762 | .set_page = &rs600_gart_set_page, |
1742 | }, | 1763 | }, |
1743 | .vm = { | 1764 | .vm = { |
@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = { | |||
1871 | .get_gpu_clock_counter = &si_get_gpu_clock_counter, | 1892 | .get_gpu_clock_counter = &si_get_gpu_clock_counter, |
1872 | .gart = { | 1893 | .gart = { |
1873 | .tlb_flush = &si_pcie_gart_tlb_flush, | 1894 | .tlb_flush = &si_pcie_gart_tlb_flush, |
1895 | .get_page_entry = &rs600_gart_get_page_entry, | ||
1874 | .set_page = &rs600_gart_set_page, | 1896 | .set_page = &rs600_gart_set_page, |
1875 | }, | 1897 | }, |
1876 | .vm = { | 1898 | .vm = { |
@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = { | |||
2032 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, | 2054 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
2033 | .gart = { | 2055 | .gart = { |
2034 | .tlb_flush = &cik_pcie_gart_tlb_flush, | 2056 | .tlb_flush = &cik_pcie_gart_tlb_flush, |
2057 | .get_page_entry = &rs600_gart_get_page_entry, | ||
2035 | .set_page = &rs600_gart_set_page, | 2058 | .set_page = &rs600_gart_set_page, |
2036 | }, | 2059 | }, |
2037 | .vm = { | 2060 | .vm = { |
@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = { | |||
2139 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, | 2162 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
2140 | .gart = { | 2163 | .gart = { |
2141 | .tlb_flush = &cik_pcie_gart_tlb_flush, | 2164 | .tlb_flush = &cik_pcie_gart_tlb_flush, |
2165 | .get_page_entry = &rs600_gart_get_page_entry, | ||
2142 | .set_page = &rs600_gart_set_page, | 2166 | .set_page = &rs600_gart_set_page, |
2143 | }, | 2167 | }, |
2144 | .vm = { | 2168 | .vm = { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 2a45d548d5ec..8d787d115653 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); | |||
67 | int r100_asic_reset(struct radeon_device *rdev); | 67 | int r100_asic_reset(struct radeon_device *rdev); |
68 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 68 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
69 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 69 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
70 | uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
70 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, | 71 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
71 | uint64_t addr, uint32_t flags); | 72 | uint64_t entry); |
72 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); | 73 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
73 | int r100_irq_set(struct radeon_device *rdev); | 74 | int r100_irq_set(struct radeon_device *rdev); |
74 | int r100_irq_process(struct radeon_device *rdev); | 75 | int r100_irq_process(struct radeon_device *rdev); |
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, | |||
172 | struct radeon_fence *fence); | 173 | struct radeon_fence *fence); |
173 | extern int r300_cs_parse(struct radeon_cs_parser *p); | 174 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
174 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 175 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
176 | extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
175 | extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | 177 | extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
176 | uint64_t addr, uint32_t flags); | 178 | uint64_t entry); |
177 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 179 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
178 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 180 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
179 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 181 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); | |||
208 | extern int rs400_suspend(struct radeon_device *rdev); | 210 | extern int rs400_suspend(struct radeon_device *rdev); |
209 | extern int rs400_resume(struct radeon_device *rdev); | 211 | extern int rs400_resume(struct radeon_device *rdev); |
210 | void rs400_gart_tlb_flush(struct radeon_device *rdev); | 212 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
213 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
211 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | 214 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
212 | uint64_t addr, uint32_t flags); | 215 | uint64_t entry); |
213 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 216 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
214 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 217 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
215 | int rs400_gart_init(struct radeon_device *rdev); | 218 | int rs400_gart_init(struct radeon_device *rdev); |
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); | |||
232 | void rs600_irq_disable(struct radeon_device *rdev); | 235 | void rs600_irq_disable(struct radeon_device *rdev); |
233 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 236 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
234 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 237 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
238 | uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
235 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | 239 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, |
236 | uint64_t addr, uint32_t flags); | 240 | uint64_t entry); |
237 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 241 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
238 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 242 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
239 | void rs600_bandwidth_update(struct radeon_device *rdev); | 243 | void rs600_bandwidth_update(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) | |||
774 | rdev->dummy_page.page = NULL; | 774 | rdev->dummy_page.page = NULL; |
775 | return -ENOMEM; | 775 | return -ENOMEM; |
776 | } | 776 | } |
777 | rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, | ||
778 | RADEON_GART_PAGE_DUMMY); | ||
777 | return 0; | 779 | return 0; |
778 | } | 780 | } |
779 | 781 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) | |||
165 | radeon_bo_unpin(rdev->gart.robj); | 165 | radeon_bo_unpin(rdev->gart.robj); |
166 | radeon_bo_unreserve(rdev->gart.robj); | 166 | radeon_bo_unreserve(rdev->gart.robj); |
167 | rdev->gart.table_addr = gpu_addr; | 167 | rdev->gart.table_addr = gpu_addr; |
168 | |||
169 | if (!r) { | ||
170 | int i; | ||
171 | |||
172 | /* We might have dropped some GART table updates while it wasn't | ||
173 | * mapped, restore all entries | ||
174 | */ | ||
175 | for (i = 0; i < rdev->gart.num_gpu_pages; i++) | ||
176 | radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); | ||
177 | mb(); | ||
178 | radeon_gart_tlb_flush(rdev); | ||
179 | } | ||
180 | |||
168 | return r; | 181 | return r; |
169 | } | 182 | } |
170 | 183 | ||
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
228 | unsigned t; | 241 | unsigned t; |
229 | unsigned p; | 242 | unsigned p; |
230 | int i, j; | 243 | int i, j; |
231 | u64 page_base; | ||
232 | 244 | ||
233 | if (!rdev->gart.ready) { | 245 | if (!rdev->gart.ready) { |
234 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); | 246 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
239 | for (i = 0; i < pages; i++, p++) { | 251 | for (i = 0; i < pages; i++, p++) { |
240 | if (rdev->gart.pages[p]) { | 252 | if (rdev->gart.pages[p]) { |
241 | rdev->gart.pages[p] = NULL; | 253 | rdev->gart.pages[p] = NULL; |
242 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | ||
243 | page_base = rdev->gart.pages_addr[p]; | ||
244 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 254 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
255 | rdev->gart.pages_entry[t] = rdev->dummy_page.entry; | ||
245 | if (rdev->gart.ptr) { | 256 | if (rdev->gart.ptr) { |
246 | radeon_gart_set_page(rdev, t, page_base, | 257 | radeon_gart_set_page(rdev, t, |
247 | RADEON_GART_PAGE_DUMMY); | 258 | rdev->dummy_page.entry); |
248 | } | 259 | } |
249 | page_base += RADEON_GPU_PAGE_SIZE; | ||
250 | } | 260 | } |
251 | } | 261 | } |
252 | } | 262 | } |
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
274 | { | 284 | { |
275 | unsigned t; | 285 | unsigned t; |
276 | unsigned p; | 286 | unsigned p; |
277 | uint64_t page_base; | 287 | uint64_t page_base, page_entry; |
278 | int i, j; | 288 | int i, j; |
279 | 289 | ||
280 | if (!rdev->gart.ready) { | 290 | if (!rdev->gart.ready) { |
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
285 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 295 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
286 | 296 | ||
287 | for (i = 0; i < pages; i++, p++) { | 297 | for (i = 0; i < pages; i++, p++) { |
288 | rdev->gart.pages_addr[p] = dma_addr[i]; | ||
289 | rdev->gart.pages[p] = pagelist[i]; | 298 | rdev->gart.pages[p] = pagelist[i]; |
290 | if (rdev->gart.ptr) { | 299 | page_base = dma_addr[i]; |
291 | page_base = rdev->gart.pages_addr[p]; | 300 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
292 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 301 | page_entry = radeon_gart_get_page_entry(page_base, flags); |
293 | radeon_gart_set_page(rdev, t, page_base, flags); | 302 | rdev->gart.pages_entry[t] = page_entry; |
294 | page_base += RADEON_GPU_PAGE_SIZE; | 303 | if (rdev->gart.ptr) { |
304 | radeon_gart_set_page(rdev, t, page_entry); | ||
295 | } | 305 | } |
306 | page_base += RADEON_GPU_PAGE_SIZE; | ||
296 | } | 307 | } |
297 | } | 308 | } |
298 | mb(); | 309 | mb(); |
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
334 | radeon_gart_fini(rdev); | 345 | radeon_gart_fini(rdev); |
335 | return -ENOMEM; | 346 | return -ENOMEM; |
336 | } | 347 | } |
337 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * | 348 | rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * |
338 | rdev->gart.num_cpu_pages); | 349 | rdev->gart.num_gpu_pages); |
339 | if (rdev->gart.pages_addr == NULL) { | 350 | if (rdev->gart.pages_entry == NULL) { |
340 | radeon_gart_fini(rdev); | 351 | radeon_gart_fini(rdev); |
341 | return -ENOMEM; | 352 | return -ENOMEM; |
342 | } | 353 | } |
343 | /* set GART entry to point to the dummy page by default */ | 354 | /* set GART entry to point to the dummy page by default */ |
344 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { | 355 | for (i = 0; i < rdev->gart.num_gpu_pages; i++) |
345 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; | 356 | rdev->gart.pages_entry[i] = rdev->dummy_page.entry; |
346 | } | ||
347 | return 0; | 357 | return 0; |
348 | } | 358 | } |
349 | 359 | ||
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
356 | */ | 366 | */ |
357 | void radeon_gart_fini(struct radeon_device *rdev) | 367 | void radeon_gart_fini(struct radeon_device *rdev) |
358 | { | 368 | { |
359 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { | 369 | if (rdev->gart.ready) { |
360 | /* unbind pages */ | 370 | /* unbind pages */ |
361 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); | 371 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
362 | } | 372 | } |
363 | rdev->gart.ready = false; | 373 | rdev->gart.ready = false; |
364 | vfree(rdev->gart.pages); | 374 | vfree(rdev->gart.pages); |
365 | vfree(rdev->gart.pages_addr); | 375 | vfree(rdev->gart.pages_entry); |
366 | rdev->gart.pages = NULL; | 376 | rdev->gart.pages = NULL; |
367 | rdev->gart.pages_addr = NULL; | 377 | rdev->gart.pages_entry = NULL; |
368 | 378 | ||
369 | radeon_dummy_page_fini(rdev); | 379 | radeon_dummy_page_fini(rdev); |
370 | } | 380 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 8bf87f1203cc..bef9a0953284 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd) | |||
436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, | 436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, |
437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) | 437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) |
438 | { | 438 | { |
439 | uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; | 439 | uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; |
440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); | 440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); |
441 | 441 | ||
442 | lock_srbm(kgd, mec, pipe, 0, 0); | 442 | lock_srbm(kgd, mec, pipe, 0, 0); |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..06d2246d07f1 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |||
587 | uint64_t result; | 587 | uint64_t result; |
588 | 588 | ||
589 | /* page table offset */ | 589 | /* page table offset */ |
590 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; | 590 | result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; |
591 | 591 | result &= ~RADEON_GPU_PAGE_MASK; | |
592 | /* in case cpu page size != gpu page size*/ | ||
593 | result |= addr & (~PAGE_MASK); | ||
594 | 592 | ||
595 | return result; | 593 | return result; |
596 | } | 594 | } |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
212 | #define RS400_PTE_WRITEABLE (1 << 2) | 212 | #define RS400_PTE_WRITEABLE (1 << 2) |
213 | #define RS400_PTE_READABLE (1 << 3) | 213 | #define RS400_PTE_READABLE (1 << 3) |
214 | 214 | ||
215 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | 215 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) |
216 | uint64_t addr, uint32_t flags) | ||
217 | { | 216 | { |
218 | uint32_t entry; | 217 | uint32_t entry; |
219 | u32 *gtt = rdev->gart.ptr; | ||
220 | 218 | ||
221 | entry = (lower_32_bits(addr) & PAGE_MASK) | | 219 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
222 | ((upper_32_bits(addr) & 0xff) << 4); | 220 | ((upper_32_bits(addr) & 0xff) << 4); |
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
226 | entry |= RS400_PTE_WRITEABLE; | 224 | entry |= RS400_PTE_WRITEABLE; |
227 | if (!(flags & RADEON_GART_PAGE_SNOOP)) | 225 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
228 | entry |= RS400_PTE_UNSNOOPED; | 226 | entry |= RS400_PTE_UNSNOOPED; |
229 | entry = cpu_to_le32(entry); | 227 | return entry; |
230 | gtt[i] = entry; | 228 | } |
229 | |||
230 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
231 | uint64_t entry) | ||
232 | { | ||
233 | u32 *gtt = rdev->gart.ptr; | ||
234 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); | ||
231 | } | 235 | } |
232 | 236 | ||
233 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) | 237 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9acb1c3c005b..74bce91aecc1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) | |||
625 | radeon_gart_table_vram_free(rdev); | 625 | radeon_gart_table_vram_free(rdev); |
626 | } | 626 | } |
627 | 627 | ||
628 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | 628 | uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) |
629 | uint64_t addr, uint32_t flags) | ||
630 | { | 629 | { |
631 | void __iomem *ptr = (void *)rdev->gart.ptr; | ||
632 | |||
633 | addr = addr & 0xFFFFFFFFFFFFF000ULL; | 630 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
634 | addr |= R600_PTE_SYSTEM; | 631 | addr |= R600_PTE_SYSTEM; |
635 | if (flags & RADEON_GART_PAGE_VALID) | 632 | if (flags & RADEON_GART_PAGE_VALID) |
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
640 | addr |= R600_PTE_WRITEABLE; | 637 | addr |= R600_PTE_WRITEABLE; |
641 | if (flags & RADEON_GART_PAGE_SNOOP) | 638 | if (flags & RADEON_GART_PAGE_SNOOP) |
642 | addr |= R600_PTE_SNOOPED; | 639 | addr |= R600_PTE_SNOOPED; |
643 | writeq(addr, ptr + (i * 8)); | 640 | return addr; |
641 | } | ||
642 | |||
643 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
644 | uint64_t entry) | ||
645 | { | ||
646 | void __iomem *ptr = (void *)rdev->gart.ptr; | ||
647 | writeq(entry, ptr + (i * 8)); | ||
644 | } | 648 | } |
645 | 649 | ||
646 | int rs600_irq_set(struct radeon_device *rdev) | 650 | int rs600_irq_set(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index aa7b872b2c43..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, | |||
123 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 123 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
124 | if (flags & R600_PTE_SYSTEM) { | 124 | if (flags & R600_PTE_SYSTEM) { |
125 | value = radeon_vm_map_gart(rdev, addr); | 125 | value = radeon_vm_map_gart(rdev, addr); |
126 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
127 | } else if (flags & R600_PTE_VALID) { | 126 | } else if (flags & R600_PTE_VALID) { |
128 | value = addr; | 127 | value = addr; |
129 | } else { | 128 | } else { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, | |||
406 | if (unlikely(ret != 0)) | 406 | if (unlikely(ret != 0)) |
407 | --dev_priv->num_3d_resources; | 407 | --dev_priv->num_3d_resources; |
408 | } else if (unhide_svga) { | 408 | } else if (unhide_svga) { |
409 | mutex_lock(&dev_priv->hw_mutex); | ||
410 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 409 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
411 | vmw_read(dev_priv, SVGA_REG_ENABLE) & | 410 | vmw_read(dev_priv, SVGA_REG_ENABLE) & |
412 | ~SVGA_REG_ENABLE_HIDE); | 411 | ~SVGA_REG_ENABLE_HIDE); |
413 | mutex_unlock(&dev_priv->hw_mutex); | ||
414 | } | 412 | } |
415 | 413 | ||
416 | mutex_unlock(&dev_priv->release_mutex); | 414 | mutex_unlock(&dev_priv->release_mutex); |
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, | |||
433 | mutex_lock(&dev_priv->release_mutex); | 431 | mutex_lock(&dev_priv->release_mutex); |
434 | if (unlikely(--dev_priv->num_3d_resources == 0)) | 432 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
435 | vmw_release_device(dev_priv); | 433 | vmw_release_device(dev_priv); |
436 | else if (hide_svga) { | 434 | else if (hide_svga) |
437 | mutex_lock(&dev_priv->hw_mutex); | ||
438 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 435 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
439 | vmw_read(dev_priv, SVGA_REG_ENABLE) | | 436 | vmw_read(dev_priv, SVGA_REG_ENABLE) | |
440 | SVGA_REG_ENABLE_HIDE); | 437 | SVGA_REG_ENABLE_HIDE); |
441 | mutex_unlock(&dev_priv->hw_mutex); | ||
442 | } | ||
443 | 438 | ||
444 | n3d = (int32_t) dev_priv->num_3d_resources; | 439 | n3d = (int32_t) dev_priv->num_3d_resources; |
445 | mutex_unlock(&dev_priv->release_mutex); | 440 | mutex_unlock(&dev_priv->release_mutex); |
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
600 | dev_priv->dev = dev; | 595 | dev_priv->dev = dev; |
601 | dev_priv->vmw_chipset = chipset; | 596 | dev_priv->vmw_chipset = chipset; |
602 | dev_priv->last_read_seqno = (uint32_t) -100; | 597 | dev_priv->last_read_seqno = (uint32_t) -100; |
603 | mutex_init(&dev_priv->hw_mutex); | ||
604 | mutex_init(&dev_priv->cmdbuf_mutex); | 598 | mutex_init(&dev_priv->cmdbuf_mutex); |
605 | mutex_init(&dev_priv->release_mutex); | 599 | mutex_init(&dev_priv->release_mutex); |
606 | mutex_init(&dev_priv->binding_mutex); | 600 | mutex_init(&dev_priv->binding_mutex); |
607 | rwlock_init(&dev_priv->resource_lock); | 601 | rwlock_init(&dev_priv->resource_lock); |
608 | ttm_lock_init(&dev_priv->reservation_sem); | 602 | ttm_lock_init(&dev_priv->reservation_sem); |
603 | spin_lock_init(&dev_priv->hw_lock); | ||
604 | spin_lock_init(&dev_priv->waiter_lock); | ||
605 | spin_lock_init(&dev_priv->cap_lock); | ||
609 | 606 | ||
610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 607 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
611 | idr_init(&dev_priv->res_idr[i]); | 608 | idr_init(&dev_priv->res_idr[i]); |
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
626 | 623 | ||
627 | dev_priv->enable_fb = enable_fbdev; | 624 | dev_priv->enable_fb = enable_fbdev; |
628 | 625 | ||
629 | mutex_lock(&dev_priv->hw_mutex); | ||
630 | |||
631 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 626 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
632 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | 627 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
633 | if (svga_id != SVGA_ID_2) { | 628 | if (svga_id != SVGA_ID_2) { |
634 | ret = -ENOSYS; | 629 | ret = -ENOSYS; |
635 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); | 630 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
636 | mutex_unlock(&dev_priv->hw_mutex); | ||
637 | goto out_err0; | 631 | goto out_err0; |
638 | } | 632 | } |
639 | 633 | ||
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
683 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 677 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
684 | 678 | ||
685 | ret = vmw_dma_masks(dev_priv); | 679 | ret = vmw_dma_masks(dev_priv); |
686 | if (unlikely(ret != 0)) { | 680 | if (unlikely(ret != 0)) |
687 | mutex_unlock(&dev_priv->hw_mutex); | ||
688 | goto out_err0; | 681 | goto out_err0; |
689 | } | ||
690 | 682 | ||
691 | /* | 683 | /* |
692 | * Limit back buffer size to VRAM size. Remove this once | 684 | * Limit back buffer size to VRAM size. Remove this once |
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
695 | if (dev_priv->prim_bb_mem > dev_priv->vram_size) | 687 | if (dev_priv->prim_bb_mem > dev_priv->vram_size) |
696 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 688 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
697 | 689 | ||
698 | mutex_unlock(&dev_priv->hw_mutex); | ||
699 | |||
700 | vmw_print_capabilities(dev_priv->capabilities); | 690 | vmw_print_capabilities(dev_priv->capabilities); |
701 | 691 | ||
702 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | 692 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, | |||
1160 | if (unlikely(ret != 0)) | 1150 | if (unlikely(ret != 0)) |
1161 | return ret; | 1151 | return ret; |
1162 | vmw_kms_save_vga(dev_priv); | 1152 | vmw_kms_save_vga(dev_priv); |
1163 | mutex_lock(&dev_priv->hw_mutex); | ||
1164 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | 1153 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
1165 | mutex_unlock(&dev_priv->hw_mutex); | ||
1166 | } | 1154 | } |
1167 | 1155 | ||
1168 | if (active) { | 1156 | if (active) { |
@@ -1196,9 +1184,7 @@ out_no_active_lock: | |||
1196 | if (!dev_priv->enable_fb) { | 1184 | if (!dev_priv->enable_fb) { |
1197 | vmw_kms_restore_vga(dev_priv); | 1185 | vmw_kms_restore_vga(dev_priv); |
1198 | vmw_3d_resource_dec(dev_priv, true); | 1186 | vmw_3d_resource_dec(dev_priv, true); |
1199 | mutex_lock(&dev_priv->hw_mutex); | ||
1200 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 1187 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
1201 | mutex_unlock(&dev_priv->hw_mutex); | ||
1202 | } | 1188 | } |
1203 | return ret; | 1189 | return ret; |
1204 | } | 1190 | } |
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
1233 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | 1219 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); |
1234 | vmw_kms_restore_vga(dev_priv); | 1220 | vmw_kms_restore_vga(dev_priv); |
1235 | vmw_3d_resource_dec(dev_priv, true); | 1221 | vmw_3d_resource_dec(dev_priv, true); |
1236 | mutex_lock(&dev_priv->hw_mutex); | ||
1237 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 1222 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
1238 | mutex_unlock(&dev_priv->hw_mutex); | ||
1239 | } | 1223 | } |
1240 | 1224 | ||
1241 | dev_priv->active_master = &dev_priv->fbdev_master; | 1225 | dev_priv->active_master = &dev_priv->fbdev_master; |
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) | |||
1367 | struct drm_device *dev = pci_get_drvdata(pdev); | 1351 | struct drm_device *dev = pci_get_drvdata(pdev); |
1368 | struct vmw_private *dev_priv = vmw_priv(dev); | 1352 | struct vmw_private *dev_priv = vmw_priv(dev); |
1369 | 1353 | ||
1370 | mutex_lock(&dev_priv->hw_mutex); | ||
1371 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 1354 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
1372 | (void) vmw_read(dev_priv, SVGA_REG_ID); | 1355 | (void) vmw_read(dev_priv, SVGA_REG_ID); |
1373 | mutex_unlock(&dev_priv->hw_mutex); | ||
1374 | 1356 | ||
1375 | /** | 1357 | /** |
1376 | * Reclaim 3d reference held by fbdev and potentially | 1358 | * Reclaim 3d reference held by fbdev and potentially |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -399,7 +399,8 @@ struct vmw_private { | |||
399 | uint32_t memory_size; | 399 | uint32_t memory_size; |
400 | bool has_gmr; | 400 | bool has_gmr; |
401 | bool has_mob; | 401 | bool has_mob; |
402 | struct mutex hw_mutex; | 402 | spinlock_t hw_lock; |
403 | spinlock_t cap_lock; | ||
403 | 404 | ||
404 | /* | 405 | /* |
405 | * VGA registers. | 406 | * VGA registers. |
@@ -449,8 +450,9 @@ struct vmw_private { | |||
449 | atomic_t marker_seq; | 450 | atomic_t marker_seq; |
450 | wait_queue_head_t fence_queue; | 451 | wait_queue_head_t fence_queue; |
451 | wait_queue_head_t fifo_queue; | 452 | wait_queue_head_t fifo_queue; |
452 | int fence_queue_waiters; /* Protected by hw_mutex */ | 453 | spinlock_t waiter_lock; |
453 | int goal_queue_waiters; /* Protected by hw_mutex */ | 454 | int fence_queue_waiters; /* Protected by waiter_lock */ |
455 | int goal_queue_waiters; /* Protected by waiter_lock */ | ||
454 | atomic_t fifo_queue_waiters; | 456 | atomic_t fifo_queue_waiters; |
455 | uint32_t last_read_seqno; | 457 | uint32_t last_read_seqno; |
456 | spinlock_t irq_lock; | 458 | spinlock_t irq_lock; |
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) | |||
553 | return (struct vmw_master *) master->driver_priv; | 555 | return (struct vmw_master *) master->driver_priv; |
554 | } | 556 | } |
555 | 557 | ||
558 | /* | ||
559 | * The locking here is fine-grained, so that it is performed once | ||
560 | * for every read- and write operation. This is of course costly, but we | ||
561 | * don't perform much register access in the timing critical paths anyway. | ||
562 | * Instead we have the extra benefit of being sure that we don't forget | ||
563 | * the hw lock around register accesses. | ||
564 | */ | ||
556 | static inline void vmw_write(struct vmw_private *dev_priv, | 565 | static inline void vmw_write(struct vmw_private *dev_priv, |
557 | unsigned int offset, uint32_t value) | 566 | unsigned int offset, uint32_t value) |
558 | { | 567 | { |
568 | unsigned long irq_flags; | ||
569 | |||
570 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
559 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 571 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
560 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); | 572 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); |
573 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | ||
561 | } | 574 | } |
562 | 575 | ||
563 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, | 576 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
564 | unsigned int offset) | 577 | unsigned int offset) |
565 | { | 578 | { |
566 | uint32_t val; | 579 | unsigned long irq_flags; |
580 | u32 val; | ||
567 | 581 | ||
582 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
568 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 583 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
569 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); | 584 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); |
585 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | ||
586 | |||
570 | return val; | 587 | return val; |
571 | } | 588 | } |
572 | 589 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -35,7 +35,7 @@ struct vmw_fence_manager { | |||
35 | struct vmw_private *dev_priv; | 35 | struct vmw_private *dev_priv; |
36 | spinlock_t lock; | 36 | spinlock_t lock; |
37 | struct list_head fence_list; | 37 | struct list_head fence_list; |
38 | struct work_struct work, ping_work; | 38 | struct work_struct work; |
39 | u32 user_fence_size; | 39 | u32 user_fence_size; |
40 | u32 fence_size; | 40 | u32 fence_size; |
41 | u32 event_fence_action_size; | 41 | u32 event_fence_action_size; |
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) | |||
134 | return "svga"; | 134 | return "svga"; |
135 | } | 135 | } |
136 | 136 | ||
137 | static void vmw_fence_ping_func(struct work_struct *work) | ||
138 | { | ||
139 | struct vmw_fence_manager *fman = | ||
140 | container_of(work, struct vmw_fence_manager, ping_work); | ||
141 | |||
142 | vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); | ||
143 | } | ||
144 | |||
145 | static bool vmw_fence_enable_signaling(struct fence *f) | 137 | static bool vmw_fence_enable_signaling(struct fence *f) |
146 | { | 138 | { |
147 | struct vmw_fence_obj *fence = | 139 | struct vmw_fence_obj *fence = |
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) | |||
155 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) | 147 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) |
156 | return false; | 148 | return false; |
157 | 149 | ||
158 | if (mutex_trylock(&dev_priv->hw_mutex)) { | 150 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
159 | vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); | ||
160 | mutex_unlock(&dev_priv->hw_mutex); | ||
161 | } else | ||
162 | schedule_work(&fman->ping_work); | ||
163 | 151 | ||
164 | return true; | 152 | return true; |
165 | } | 153 | } |
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
305 | INIT_LIST_HEAD(&fman->fence_list); | 293 | INIT_LIST_HEAD(&fman->fence_list); |
306 | INIT_LIST_HEAD(&fman->cleanup_list); | 294 | INIT_LIST_HEAD(&fman->cleanup_list); |
307 | INIT_WORK(&fman->work, &vmw_fence_work_func); | 295 | INIT_WORK(&fman->work, &vmw_fence_work_func); |
308 | INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); | ||
309 | fman->fifo_down = true; | 296 | fman->fifo_down = true; |
310 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); | 297 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); |
311 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); | 298 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); |
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) | |||
323 | bool lists_empty; | 310 | bool lists_empty; |
324 | 311 | ||
325 | (void) cancel_work_sync(&fman->work); | 312 | (void) cancel_work_sync(&fman->work); |
326 | (void) cancel_work_sync(&fman->ping_work); | ||
327 | 313 | ||
328 | spin_lock_irqsave(&fman->lock, irq_flags); | 314 | spin_lock_irqsave(&fman->lock, irq_flags); |
329 | lists_empty = list_empty(&fman->fence_list) && | 315 | lists_empty = list_empty(&fman->fence_list) && |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
44 | if (!dev_priv->has_mob) | 44 | if (!dev_priv->has_mob) |
45 | return false; | 45 | return false; |
46 | 46 | ||
47 | mutex_lock(&dev_priv->hw_mutex); | 47 | spin_lock(&dev_priv->cap_lock); |
48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); | 48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); |
49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
50 | mutex_unlock(&dev_priv->hw_mutex); | 50 | spin_unlock(&dev_priv->cap_lock); |
51 | 51 | ||
52 | return (result != 0); | 52 | return (result != 0); |
53 | } | 53 | } |
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
120 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); | 120 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); |
121 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); | 121 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); |
122 | 122 | ||
123 | mutex_lock(&dev_priv->hw_mutex); | ||
124 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 123 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
125 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 124 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
126 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | 125 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); |
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
143 | mb(); | 142 | mb(); |
144 | 143 | ||
145 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); | 144 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); |
146 | mutex_unlock(&dev_priv->hw_mutex); | ||
147 | 145 | ||
148 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 146 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
149 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 147 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
160 | return vmw_fifo_send_fence(dev_priv, &dummy); | 158 | return vmw_fifo_send_fence(dev_priv, &dummy); |
161 | } | 159 | } |
162 | 160 | ||
163 | void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) | 161 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
164 | { | 162 | { |
165 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 163 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
164 | static DEFINE_SPINLOCK(ping_lock); | ||
165 | unsigned long irq_flags; | ||
166 | 166 | ||
167 | /* | ||
168 | * The ping_lock is needed because we don't have an atomic | ||
169 | * test-and-set of the SVGA_FIFO_BUSY register. | ||
170 | */ | ||
171 | spin_lock_irqsave(&ping_lock, irq_flags); | ||
167 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { | 172 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { |
168 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); | 173 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); |
169 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); | 174 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); |
170 | } | 175 | } |
171 | } | 176 | spin_unlock_irqrestore(&ping_lock, irq_flags); |
172 | |||
173 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | ||
174 | { | ||
175 | mutex_lock(&dev_priv->hw_mutex); | ||
176 | |||
177 | vmw_fifo_ping_host_locked(dev_priv, reason); | ||
178 | |||
179 | mutex_unlock(&dev_priv->hw_mutex); | ||
180 | } | 177 | } |
181 | 178 | ||
182 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 179 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
183 | { | 180 | { |
184 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 181 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
185 | 182 | ||
186 | mutex_lock(&dev_priv->hw_mutex); | ||
187 | |||
188 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | 183 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); |
189 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | 184 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
190 | ; | 185 | ; |
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
198 | vmw_write(dev_priv, SVGA_REG_TRACES, | 193 | vmw_write(dev_priv, SVGA_REG_TRACES, |
199 | dev_priv->traces_state); | 194 | dev_priv->traces_state); |
200 | 195 | ||
201 | mutex_unlock(&dev_priv->hw_mutex); | ||
202 | vmw_marker_queue_takedown(&fifo->marker_queue); | 196 | vmw_marker_queue_takedown(&fifo->marker_queue); |
203 | 197 | ||
204 | if (likely(fifo->static_buffer != NULL)) { | 198 | if (likely(fifo->static_buffer != NULL)) { |
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
271 | return vmw_fifo_wait_noirq(dev_priv, bytes, | 265 | return vmw_fifo_wait_noirq(dev_priv, bytes, |
272 | interruptible, timeout); | 266 | interruptible, timeout); |
273 | 267 | ||
274 | mutex_lock(&dev_priv->hw_mutex); | 268 | spin_lock(&dev_priv->waiter_lock); |
275 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { | 269 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { |
276 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 270 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
277 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | 271 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, |
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
280 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 274 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
281 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 275 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
282 | } | 276 | } |
283 | mutex_unlock(&dev_priv->hw_mutex); | 277 | spin_unlock(&dev_priv->waiter_lock); |
284 | 278 | ||
285 | if (interruptible) | 279 | if (interruptible) |
286 | ret = wait_event_interruptible_timeout | 280 | ret = wait_event_interruptible_timeout |
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
296 | else if (likely(ret > 0)) | 290 | else if (likely(ret > 0)) |
297 | ret = 0; | 291 | ret = 0; |
298 | 292 | ||
299 | mutex_lock(&dev_priv->hw_mutex); | 293 | spin_lock(&dev_priv->waiter_lock); |
300 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | 294 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { |
301 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 295 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
302 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; | 296 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; |
303 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 297 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
304 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 298 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
305 | } | 299 | } |
306 | mutex_unlock(&dev_priv->hw_mutex); | 300 | spin_unlock(&dev_priv->waiter_lock); |
307 | 301 | ||
308 | return ret; | 302 | return ret; |
309 | } | 303 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, | |||
135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); | 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); |
136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; | 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; |
137 | 137 | ||
138 | mutex_lock(&dev_priv->hw_mutex); | 138 | spin_lock(&dev_priv->cap_lock); |
139 | for (i = 0; i < max_size; ++i) { | 139 | for (i = 0; i < max_size; ++i) { |
140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
141 | compat_cap->pairs[i][0] = i; | 141 | compat_cap->pairs[i][0] = i; |
142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
143 | } | 143 | } |
144 | mutex_unlock(&dev_priv->hw_mutex); | 144 | spin_unlock(&dev_priv->cap_lock); |
145 | 145 | ||
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
191 | if (num > SVGA3D_DEVCAP_MAX) | 191 | if (num > SVGA3D_DEVCAP_MAX) |
192 | num = SVGA3D_DEVCAP_MAX; | 192 | num = SVGA3D_DEVCAP_MAX; |
193 | 193 | ||
194 | mutex_lock(&dev_priv->hw_mutex); | 194 | spin_lock(&dev_priv->cap_lock); |
195 | for (i = 0; i < num; ++i) { | 195 | for (i = 0; i < num; ++i) { |
196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
198 | } | 198 | } |
199 | mutex_unlock(&dev_priv->hw_mutex); | 199 | spin_unlock(&dev_priv->cap_lock); |
200 | } else if (gb_objects) { | 200 | } else if (gb_objects) { |
201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); | 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); |
202 | if (unlikely(ret != 0)) | 202 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) | |||
62 | 62 | ||
63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) | 63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
64 | { | 64 | { |
65 | uint32_t busy; | ||
66 | 65 | ||
67 | mutex_lock(&dev_priv->hw_mutex); | 66 | return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); |
68 | busy = vmw_read(dev_priv, SVGA_REG_BUSY); | ||
69 | mutex_unlock(&dev_priv->hw_mutex); | ||
70 | |||
71 | return (busy == 0); | ||
72 | } | 67 | } |
73 | 68 | ||
74 | void vmw_update_seqno(struct vmw_private *dev_priv, | 69 | void vmw_update_seqno(struct vmw_private *dev_priv, |
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
184 | 179 | ||
185 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | 180 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) |
186 | { | 181 | { |
187 | mutex_lock(&dev_priv->hw_mutex); | 182 | spin_lock(&dev_priv->waiter_lock); |
188 | if (dev_priv->fence_queue_waiters++ == 0) { | 183 | if (dev_priv->fence_queue_waiters++ == 0) { |
189 | unsigned long irq_flags; | 184 | unsigned long irq_flags; |
190 | 185 | ||
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | |||
195 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 190 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 191 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
197 | } | 192 | } |
198 | mutex_unlock(&dev_priv->hw_mutex); | 193 | spin_unlock(&dev_priv->waiter_lock); |
199 | } | 194 | } |
200 | 195 | ||
201 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | 196 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) |
202 | { | 197 | { |
203 | mutex_lock(&dev_priv->hw_mutex); | 198 | spin_lock(&dev_priv->waiter_lock); |
204 | if (--dev_priv->fence_queue_waiters == 0) { | 199 | if (--dev_priv->fence_queue_waiters == 0) { |
205 | unsigned long irq_flags; | 200 | unsigned long irq_flags; |
206 | 201 | ||
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | |||
209 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 204 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 205 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
211 | } | 206 | } |
212 | mutex_unlock(&dev_priv->hw_mutex); | 207 | spin_unlock(&dev_priv->waiter_lock); |
213 | } | 208 | } |
214 | 209 | ||
215 | 210 | ||
216 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) | 211 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) |
217 | { | 212 | { |
218 | mutex_lock(&dev_priv->hw_mutex); | 213 | spin_lock(&dev_priv->waiter_lock); |
219 | if (dev_priv->goal_queue_waiters++ == 0) { | 214 | if (dev_priv->goal_queue_waiters++ == 0) { |
220 | unsigned long irq_flags; | 215 | unsigned long irq_flags; |
221 | 216 | ||
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) | |||
226 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 221 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 222 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
228 | } | 223 | } |
229 | mutex_unlock(&dev_priv->hw_mutex); | 224 | spin_unlock(&dev_priv->waiter_lock); |
230 | } | 225 | } |
231 | 226 | ||
232 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | 227 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) |
233 | { | 228 | { |
234 | mutex_lock(&dev_priv->hw_mutex); | 229 | spin_lock(&dev_priv->waiter_lock); |
235 | if (--dev_priv->goal_queue_waiters == 0) { | 230 | if (--dev_priv->goal_queue_waiters == 0) { |
236 | unsigned long irq_flags; | 231 | unsigned long irq_flags; |
237 | 232 | ||
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | |||
240 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 235 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
241 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 236 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
242 | } | 237 | } |
243 | mutex_unlock(&dev_priv->hw_mutex); | 238 | spin_unlock(&dev_priv->waiter_lock); |
244 | } | 239 | } |
245 | 240 | ||
246 | int vmw_wait_seqno(struct vmw_private *dev_priv, | 241 | int vmw_wait_seqno(struct vmw_private *dev_priv, |
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) | |||
315 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | 310 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
316 | return; | 311 | return; |
317 | 312 | ||
318 | mutex_lock(&dev_priv->hw_mutex); | ||
319 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); | 313 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); |
320 | mutex_unlock(&dev_priv->hw_mutex); | ||
321 | 314 | ||
322 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 315 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
323 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 316 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) | |||
1828 | struct vmw_private *dev_priv = vmw_priv(dev); | 1828 | struct vmw_private *dev_priv = vmw_priv(dev); |
1829 | struct vmw_display_unit *du = vmw_connector_to_du(connector); | 1829 | struct vmw_display_unit *du = vmw_connector_to_du(connector); |
1830 | 1830 | ||
1831 | mutex_lock(&dev_priv->hw_mutex); | ||
1832 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | 1831 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); |
1833 | mutex_unlock(&dev_priv->hw_mutex); | ||
1834 | 1832 | ||
1835 | return ((vmw_connector_to_du(connector)->unit < num_displays && | 1833 | return ((vmw_connector_to_du(connector)->unit < num_displays && |
1836 | du->pref_active) ? | 1834 | du->pref_active) ? |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 31e8308ba899..ab838d9e28b6 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -881,6 +881,7 @@ config I2C_XLR | |||
881 | config I2C_RCAR | 881 | config I2C_RCAR |
882 | tristate "Renesas R-Car I2C Controller" | 882 | tristate "Renesas R-Car I2C Controller" |
883 | depends on ARCH_SHMOBILE || COMPILE_TEST | 883 | depends on ARCH_SHMOBILE || COMPILE_TEST |
884 | select I2C_SLAVE | ||
884 | help | 885 | help |
885 | If you say yes to this option, support will be included for the | 886 | If you say yes to this option, support will be included for the |
886 | R-Car I2C controller. | 887 | R-Car I2C controller. |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index bff20a589621..958c8db4ec30 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, | |||
785 | int ret; | 785 | int ret; |
786 | 786 | ||
787 | pm_runtime_get_sync(&adap->dev); | 787 | pm_runtime_get_sync(&adap->dev); |
788 | clk_prepare_enable(i2c->clk); | 788 | ret = clk_enable(i2c->clk); |
789 | if (ret) | ||
790 | return ret; | ||
789 | 791 | ||
790 | for (retry = 0; retry < adap->retries; retry++) { | 792 | for (retry = 0; retry < adap->retries; retry++) { |
791 | 793 | ||
792 | ret = s3c24xx_i2c_doxfer(i2c, msgs, num); | 794 | ret = s3c24xx_i2c_doxfer(i2c, msgs, num); |
793 | 795 | ||
794 | if (ret != -EAGAIN) { | 796 | if (ret != -EAGAIN) { |
795 | clk_disable_unprepare(i2c->clk); | 797 | clk_disable(i2c->clk); |
796 | pm_runtime_put(&adap->dev); | 798 | pm_runtime_put(&adap->dev); |
797 | return ret; | 799 | return ret; |
798 | } | 800 | } |
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, | |||
802 | udelay(100); | 804 | udelay(100); |
803 | } | 805 | } |
804 | 806 | ||
805 | clk_disable_unprepare(i2c->clk); | 807 | clk_disable(i2c->clk); |
806 | pm_runtime_put(&adap->dev); | 808 | pm_runtime_put(&adap->dev); |
807 | return -EREMOTEIO; | 809 | return -EREMOTEIO; |
808 | } | 810 | } |
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1197 | 1199 | ||
1198 | clk_prepare_enable(i2c->clk); | 1200 | clk_prepare_enable(i2c->clk); |
1199 | ret = s3c24xx_i2c_init(i2c); | 1201 | ret = s3c24xx_i2c_init(i2c); |
1200 | clk_disable_unprepare(i2c->clk); | 1202 | clk_disable(i2c->clk); |
1201 | if (ret != 0) { | 1203 | if (ret != 0) { |
1202 | dev_err(&pdev->dev, "I2C controller init failed\n"); | 1204 | dev_err(&pdev->dev, "I2C controller init failed\n"); |
1203 | return ret; | 1205 | return ret; |
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1210 | i2c->irq = ret = platform_get_irq(pdev, 0); | 1212 | i2c->irq = ret = platform_get_irq(pdev, 0); |
1211 | if (ret <= 0) { | 1213 | if (ret <= 0) { |
1212 | dev_err(&pdev->dev, "cannot find IRQ\n"); | 1214 | dev_err(&pdev->dev, "cannot find IRQ\n"); |
1215 | clk_unprepare(i2c->clk); | ||
1213 | return ret; | 1216 | return ret; |
1214 | } | 1217 | } |
1215 | 1218 | ||
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1218 | 1221 | ||
1219 | if (ret != 0) { | 1222 | if (ret != 0) { |
1220 | dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); | 1223 | dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); |
1224 | clk_unprepare(i2c->clk); | ||
1221 | return ret; | 1225 | return ret; |
1222 | } | 1226 | } |
1223 | } | 1227 | } |
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1225 | ret = s3c24xx_i2c_register_cpufreq(i2c); | 1229 | ret = s3c24xx_i2c_register_cpufreq(i2c); |
1226 | if (ret < 0) { | 1230 | if (ret < 0) { |
1227 | dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); | 1231 | dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); |
1232 | clk_unprepare(i2c->clk); | ||
1228 | return ret; | 1233 | return ret; |
1229 | } | 1234 | } |
1230 | 1235 | ||
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) | |||
1241 | if (ret < 0) { | 1246 | if (ret < 0) { |
1242 | dev_err(&pdev->dev, "failed to add bus to i2c core\n"); | 1247 | dev_err(&pdev->dev, "failed to add bus to i2c core\n"); |
1243 | s3c24xx_i2c_deregister_cpufreq(i2c); | 1248 | s3c24xx_i2c_deregister_cpufreq(i2c); |
1249 | clk_unprepare(i2c->clk); | ||
1244 | return ret; | 1250 | return ret; |
1245 | } | 1251 | } |
1246 | 1252 | ||
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev) | |||
1262 | { | 1268 | { |
1263 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); | 1269 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); |
1264 | 1270 | ||
1271 | clk_unprepare(i2c->clk); | ||
1272 | |||
1265 | pm_runtime_disable(&i2c->adap.dev); | 1273 | pm_runtime_disable(&i2c->adap.dev); |
1266 | pm_runtime_disable(&pdev->dev); | 1274 | pm_runtime_disable(&pdev->dev); |
1267 | 1275 | ||
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev) | |||
1293 | { | 1301 | { |
1294 | struct platform_device *pdev = to_platform_device(dev); | 1302 | struct platform_device *pdev = to_platform_device(dev); |
1295 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); | 1303 | struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); |
1304 | int ret; | ||
1296 | 1305 | ||
1297 | if (!IS_ERR(i2c->sysreg)) | 1306 | if (!IS_ERR(i2c->sysreg)) |
1298 | regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); | 1307 | regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg); |
1299 | 1308 | ||
1300 | clk_prepare_enable(i2c->clk); | 1309 | ret = clk_enable(i2c->clk); |
1310 | if (ret) | ||
1311 | return ret; | ||
1301 | s3c24xx_i2c_init(i2c); | 1312 | s3c24xx_i2c_init(i2c); |
1302 | clk_disable_unprepare(i2c->clk); | 1313 | clk_disable(i2c->clk); |
1303 | i2c->suspended = 0; | 1314 | i2c->suspended = 0; |
1304 | 1315 | ||
1305 | return 0; | 1316 | return 0; |
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 440d5dbc8b5f..007818b3e174 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c | |||
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data { | |||
139 | int pos; | 139 | int pos; |
140 | int sr; | 140 | int sr; |
141 | bool send_stop; | 141 | bool send_stop; |
142 | bool stop_after_dma; | ||
142 | 143 | ||
143 | struct resource *res; | 144 | struct resource *res; |
144 | struct dma_chan *dma_tx; | 145 | struct dma_chan *dma_tx; |
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd) | |||
407 | 408 | ||
408 | if (pd->pos == pd->msg->len) { | 409 | if (pd->pos == pd->msg->len) { |
409 | /* Send stop if we haven't yet (DMA case) */ | 410 | /* Send stop if we haven't yet (DMA case) */ |
410 | if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY)) | 411 | if (pd->send_stop && pd->stop_after_dma) |
411 | i2c_op(pd, OP_TX_STOP, 0); | 412 | i2c_op(pd, OP_TX_STOP, 0); |
412 | return 1; | 413 | return 1; |
413 | } | 414 | } |
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd) | |||
449 | real_pos = pd->pos - 2; | 450 | real_pos = pd->pos - 2; |
450 | 451 | ||
451 | if (pd->pos == pd->msg->len) { | 452 | if (pd->pos == pd->msg->len) { |
453 | if (pd->stop_after_dma) { | ||
454 | /* Simulate PIO end condition after DMA transfer */ | ||
455 | i2c_op(pd, OP_RX_STOP, 0); | ||
456 | pd->pos++; | ||
457 | break; | ||
458 | } | ||
459 | |||
452 | if (real_pos < 0) { | 460 | if (real_pos < 0) { |
453 | i2c_op(pd, OP_RX_STOP, 0); | 461 | i2c_op(pd, OP_RX_STOP, 0); |
454 | break; | 462 | break; |
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data) | |||
536 | 544 | ||
537 | sh_mobile_i2c_dma_unmap(pd); | 545 | sh_mobile_i2c_dma_unmap(pd); |
538 | pd->pos = pd->msg->len; | 546 | pd->pos = pd->msg->len; |
547 | pd->stop_after_dma = true; | ||
539 | 548 | ||
540 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); | 549 | iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); |
541 | } | 550 | } |
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, | |||
726 | bool do_start = pd->send_stop || !i; | 735 | bool do_start = pd->send_stop || !i; |
727 | msg = &msgs[i]; | 736 | msg = &msgs[i]; |
728 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; | 737 | pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; |
738 | pd->stop_after_dma = false; | ||
729 | 739 | ||
730 | err = start_ch(pd, msg, do_start); | 740 | err = start_ch(pd, msg, do_start); |
731 | if (err) | 741 | if (err) |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 39d25a8cb1ad..e9eae57a2b50 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -2972,6 +2972,7 @@ trace: | |||
2972 | } | 2972 | } |
2973 | EXPORT_SYMBOL(i2c_smbus_xfer); | 2973 | EXPORT_SYMBOL(i2c_smbus_xfer); |
2974 | 2974 | ||
2975 | #if IS_ENABLED(CONFIG_I2C_SLAVE) | ||
2975 | int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) | 2976 | int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) |
2976 | { | 2977 | { |
2977 | int ret; | 2978 | int ret; |
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client) | |||
3019 | return ret; | 3020 | return ret; |
3020 | } | 3021 | } |
3021 | EXPORT_SYMBOL_GPL(i2c_slave_unregister); | 3022 | EXPORT_SYMBOL_GPL(i2c_slave_unregister); |
3023 | #endif | ||
3022 | 3024 | ||
3023 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); | 3025 | MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); |
3024 | MODULE_DESCRIPTION("I2C-Bus main module"); | 3026 | MODULE_DESCRIPTION("I2C-Bus main module"); |
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 6631400b5f02..cf9b09db092f 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c | |||
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj | |||
74 | struct eeprom_data *eeprom; | 74 | struct eeprom_data *eeprom; |
75 | unsigned long flags; | 75 | unsigned long flags; |
76 | 76 | ||
77 | if (off + count >= attr->size) | 77 | if (off + count > attr->size) |
78 | return -EFBIG; | 78 | return -EFBIG; |
79 | 79 | ||
80 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); | 80 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); |
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob | |||
92 | struct eeprom_data *eeprom; | 92 | struct eeprom_data *eeprom; |
93 | unsigned long flags; | 93 | unsigned long flags; |
94 | 94 | ||
95 | if (off + count >= attr->size) | 95 | if (off + count > attr->size) |
96 | return -EFBIG; | 96 | return -EFBIG; |
97 | 97 | ||
98 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); | 98 | eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 77ecf6d32237..6e22682c8255 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1097 | * Asus UX31 0x361f00 20, 15, 0e clickpad | 1097 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1098 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad | 1098 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1099 | * Avatar AVIU-145A2 0x361f00 ? clickpad | 1099 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1100 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons | ||
1101 | * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons | ||
1100 | * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) | 1102 | * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) |
1101 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons | 1103 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons |
1102 | * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) | 1104 | * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) |
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { | |||
1475 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), | 1477 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), |
1476 | }, | 1478 | }, |
1477 | }, | 1479 | }, |
1480 | { | ||
1481 | /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ | ||
1482 | .matches = { | ||
1483 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1484 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), | ||
1485 | }, | ||
1486 | }, | ||
1487 | { | ||
1488 | /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */ | ||
1489 | .matches = { | ||
1490 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1491 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), | ||
1492 | }, | ||
1493 | }, | ||
1478 | #endif | 1494 | #endif |
1479 | { } | 1495 | { } |
1480 | }; | 1496 | }; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index f9472920d986..23e26e0768b5 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = { | |||
135 | 1232, 5710, 1156, 4696 | 135 | 1232, 5710, 1156, 4696 |
136 | }, | 136 | }, |
137 | { | 137 | { |
138 | (const char * const []){"LEN0034", "LEN0036", "LEN0039", | 138 | (const char * const []){"LEN0034", "LEN0036", "LEN0037", |
139 | "LEN2002", "LEN2004", NULL}, | 139 | "LEN0039", "LEN2002", "LEN2004", |
140 | NULL}, | ||
140 | 1024, 5112, 2024, 4832 | 141 | 1024, 5112, 2024, 4832 |
141 | }, | 142 | }, |
142 | { | 143 | { |
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = { | |||
165 | "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ | 166 | "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ |
166 | "LEN0035", /* X240 */ | 167 | "LEN0035", /* X240 */ |
167 | "LEN0036", /* T440 */ | 168 | "LEN0036", /* T440 */ |
168 | "LEN0037", | 169 | "LEN0037", /* X1 Carbon 2nd */ |
169 | "LEN0038", | 170 | "LEN0038", |
170 | "LEN0039", /* T440s */ | 171 | "LEN0039", /* T440s */ |
171 | "LEN0041", | 172 | "LEN0041", |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 764857b4e268..c11556563ef0 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | |||
152 | }, | 152 | }, |
153 | }, | 153 | }, |
154 | { | 154 | { |
155 | /* Medion Akoya E7225 */ | ||
156 | .matches = { | ||
157 | DMI_MATCH(DMI_SYS_VENDOR, "Medion"), | ||
158 | DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), | ||
159 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), | ||
160 | }, | ||
161 | }, | ||
162 | { | ||
155 | /* Blue FB5601 */ | 163 | /* Blue FB5601 */ |
156 | .matches = { | 164 | .matches = { |
157 | DMI_MATCH(DMI_SYS_VENDOR, "blue"), | 165 | DMI_MATCH(DMI_SYS_VENDOR, "blue"), |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 325188eef1c1..baa0d9786f50 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -4,6 +4,7 @@ config IOMMU_API | |||
4 | 4 | ||
5 | menuconfig IOMMU_SUPPORT | 5 | menuconfig IOMMU_SUPPORT |
6 | bool "IOMMU Hardware Support" | 6 | bool "IOMMU Hardware Support" |
7 | depends on MMU | ||
7 | default y | 8 | default y |
8 | ---help--- | 9 | ---help--- |
9 | Say Y here if you want to compile device drivers for IO Memory | 10 | Say Y here if you want to compile device drivers for IO Memory |
@@ -13,13 +14,43 @@ menuconfig IOMMU_SUPPORT | |||
13 | 14 | ||
14 | if IOMMU_SUPPORT | 15 | if IOMMU_SUPPORT |
15 | 16 | ||
17 | menu "Generic IOMMU Pagetable Support" | ||
18 | |||
19 | # Selected by the actual pagetable implementations | ||
20 | config IOMMU_IO_PGTABLE | ||
21 | bool | ||
22 | |||
23 | config IOMMU_IO_PGTABLE_LPAE | ||
24 | bool "ARMv7/v8 Long Descriptor Format" | ||
25 | select IOMMU_IO_PGTABLE | ||
26 | help | ||
27 | Enable support for the ARM long descriptor pagetable format. | ||
28 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | ||
29 | sizes at both stage-1 and stage-2, as well as address spaces | ||
30 | up to 48-bits in size. | ||
31 | |||
32 | config IOMMU_IO_PGTABLE_LPAE_SELFTEST | ||
33 | bool "LPAE selftests" | ||
34 | depends on IOMMU_IO_PGTABLE_LPAE | ||
35 | help | ||
36 | Enable self-tests for LPAE page table allocator. This performs | ||
37 | a series of page-table consistency checks during boot. | ||
38 | |||
39 | If unsure, say N here. | ||
40 | |||
41 | endmenu | ||
42 | |||
43 | config IOMMU_IOVA | ||
44 | bool | ||
45 | |||
16 | config OF_IOMMU | 46 | config OF_IOMMU |
17 | def_bool y | 47 | def_bool y |
18 | depends on OF && IOMMU_API | 48 | depends on OF && IOMMU_API |
19 | 49 | ||
20 | config FSL_PAMU | 50 | config FSL_PAMU |
21 | bool "Freescale IOMMU support" | 51 | bool "Freescale IOMMU support" |
22 | depends on PPC_E500MC | 52 | depends on PPC32 |
53 | depends on PPC_E500MC || COMPILE_TEST | ||
23 | select IOMMU_API | 54 | select IOMMU_API |
24 | select GENERIC_ALLOCATOR | 55 | select GENERIC_ALLOCATOR |
25 | help | 56 | help |
@@ -30,7 +61,8 @@ config FSL_PAMU | |||
30 | # MSM IOMMU support | 61 | # MSM IOMMU support |
31 | config MSM_IOMMU | 62 | config MSM_IOMMU |
32 | bool "MSM IOMMU Support" | 63 | bool "MSM IOMMU Support" |
33 | depends on ARCH_MSM8X60 || ARCH_MSM8960 | 64 | depends on ARM |
65 | depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST | ||
34 | select IOMMU_API | 66 | select IOMMU_API |
35 | help | 67 | help |
36 | Support for the IOMMUs found on certain Qualcomm SOCs. | 68 | Support for the IOMMUs found on certain Qualcomm SOCs. |
@@ -91,6 +123,7 @@ config INTEL_IOMMU | |||
91 | bool "Support for Intel IOMMU using DMA Remapping Devices" | 123 | bool "Support for Intel IOMMU using DMA Remapping Devices" |
92 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) | 124 | depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) |
93 | select IOMMU_API | 125 | select IOMMU_API |
126 | select IOMMU_IOVA | ||
94 | select DMAR_TABLE | 127 | select DMAR_TABLE |
95 | help | 128 | help |
96 | DMA remapping (DMAR) devices support enables independent address | 129 | DMA remapping (DMAR) devices support enables independent address |
@@ -140,7 +173,8 @@ config IRQ_REMAP | |||
140 | # OMAP IOMMU support | 173 | # OMAP IOMMU support |
141 | config OMAP_IOMMU | 174 | config OMAP_IOMMU |
142 | bool "OMAP IOMMU Support" | 175 | bool "OMAP IOMMU Support" |
143 | depends on ARCH_OMAP2PLUS | 176 | depends on ARM && MMU |
177 | depends on ARCH_OMAP2PLUS || COMPILE_TEST | ||
144 | select IOMMU_API | 178 | select IOMMU_API |
145 | 179 | ||
146 | config OMAP_IOMMU_DEBUG | 180 | config OMAP_IOMMU_DEBUG |
@@ -187,7 +221,7 @@ config TEGRA_IOMMU_SMMU | |||
187 | 221 | ||
188 | config EXYNOS_IOMMU | 222 | config EXYNOS_IOMMU |
189 | bool "Exynos IOMMU Support" | 223 | bool "Exynos IOMMU Support" |
190 | depends on ARCH_EXYNOS && ARM | 224 | depends on ARCH_EXYNOS && ARM && MMU |
191 | select IOMMU_API | 225 | select IOMMU_API |
192 | select ARM_DMA_USE_IOMMU | 226 | select ARM_DMA_USE_IOMMU |
193 | help | 227 | help |
@@ -216,7 +250,7 @@ config SHMOBILE_IPMMU_TLB | |||
216 | config SHMOBILE_IOMMU | 250 | config SHMOBILE_IOMMU |
217 | bool "IOMMU for Renesas IPMMU/IPMMUI" | 251 | bool "IOMMU for Renesas IPMMU/IPMMUI" |
218 | default n | 252 | default n |
219 | depends on ARM | 253 | depends on ARM && MMU |
220 | depends on ARCH_SHMOBILE || COMPILE_TEST | 254 | depends on ARCH_SHMOBILE || COMPILE_TEST |
221 | select IOMMU_API | 255 | select IOMMU_API |
222 | select ARM_DMA_USE_IOMMU | 256 | select ARM_DMA_USE_IOMMU |
@@ -287,6 +321,7 @@ config IPMMU_VMSA | |||
287 | depends on ARM_LPAE | 321 | depends on ARM_LPAE |
288 | depends on ARCH_SHMOBILE || COMPILE_TEST | 322 | depends on ARCH_SHMOBILE || COMPILE_TEST |
289 | select IOMMU_API | 323 | select IOMMU_API |
324 | select IOMMU_IO_PGTABLE_LPAE | ||
290 | select ARM_DMA_USE_IOMMU | 325 | select ARM_DMA_USE_IOMMU |
291 | help | 326 | help |
292 | Support for the Renesas VMSA-compatible IPMMU Renesas found in the | 327 | Support for the Renesas VMSA-compatible IPMMU Renesas found in the |
@@ -304,13 +339,13 @@ config SPAPR_TCE_IOMMU | |||
304 | 339 | ||
305 | config ARM_SMMU | 340 | config ARM_SMMU |
306 | bool "ARM Ltd. System MMU (SMMU) Support" | 341 | bool "ARM Ltd. System MMU (SMMU) Support" |
307 | depends on ARM64 || (ARM_LPAE && OF) | 342 | depends on (ARM64 || ARM) && MMU |
308 | select IOMMU_API | 343 | select IOMMU_API |
344 | select IOMMU_IO_PGTABLE_LPAE | ||
309 | select ARM_DMA_USE_IOMMU if ARM | 345 | select ARM_DMA_USE_IOMMU if ARM |
310 | help | 346 | help |
311 | Support for implementations of the ARM System MMU architecture | 347 | Support for implementations of the ARM System MMU architecture |
312 | versions 1 and 2. The driver supports both v7l and v8l table | 348 | versions 1 and 2. |
313 | formats with 4k and 64k page sizes. | ||
314 | 349 | ||
315 | Say Y here if your SoC includes an IOMMU device implementing | 350 | Say Y here if your SoC includes an IOMMU device implementing |
316 | the ARM SMMU architecture. | 351 | the ARM SMMU architecture. |
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 7b976f294a69..080ffab4ed1c 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile | |||
@@ -1,13 +1,16 @@ | |||
1 | obj-$(CONFIG_IOMMU_API) += iommu.o | 1 | obj-$(CONFIG_IOMMU_API) += iommu.o |
2 | obj-$(CONFIG_IOMMU_API) += iommu-traces.o | 2 | obj-$(CONFIG_IOMMU_API) += iommu-traces.o |
3 | obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o | 3 | obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o |
4 | obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o | ||
5 | obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o | ||
6 | obj-$(CONFIG_IOMMU_IOVA) += iova.o | ||
4 | obj-$(CONFIG_OF_IOMMU) += of_iommu.o | 7 | obj-$(CONFIG_OF_IOMMU) += of_iommu.o |
5 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o | 8 | obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o |
6 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o | 9 | obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o |
7 | obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o | 10 | obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o |
8 | obj-$(CONFIG_ARM_SMMU) += arm-smmu.o | 11 | obj-$(CONFIG_ARM_SMMU) += arm-smmu.o |
9 | obj-$(CONFIG_DMAR_TABLE) += dmar.o | 12 | obj-$(CONFIG_DMAR_TABLE) += dmar.o |
10 | obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o | 13 | obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o |
11 | obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o | 14 | obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o |
12 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o | 15 | obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o |
13 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o | 16 | obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 98024856df07..8d1fb7f18bc5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
@@ -843,10 +843,10 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | |||
843 | size_t size, u16 domid, int pde) | 843 | size_t size, u16 domid, int pde) |
844 | { | 844 | { |
845 | u64 pages; | 845 | u64 pages; |
846 | int s; | 846 | bool s; |
847 | 847 | ||
848 | pages = iommu_num_pages(address, size, PAGE_SIZE); | 848 | pages = iommu_num_pages(address, size, PAGE_SIZE); |
849 | s = 0; | 849 | s = false; |
850 | 850 | ||
851 | if (pages > 1) { | 851 | if (pages > 1) { |
852 | /* | 852 | /* |
@@ -854,7 +854,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, | |||
854 | * TLB entries for this domain | 854 | * TLB entries for this domain |
855 | */ | 855 | */ |
856 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | 856 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; |
857 | s = 1; | 857 | s = true; |
858 | } | 858 | } |
859 | 859 | ||
860 | address &= PAGE_MASK; | 860 | address &= PAGE_MASK; |
@@ -874,10 +874,10 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, | |||
874 | u64 address, size_t size) | 874 | u64 address, size_t size) |
875 | { | 875 | { |
876 | u64 pages; | 876 | u64 pages; |
877 | int s; | 877 | bool s; |
878 | 878 | ||
879 | pages = iommu_num_pages(address, size, PAGE_SIZE); | 879 | pages = iommu_num_pages(address, size, PAGE_SIZE); |
880 | s = 0; | 880 | s = false; |
881 | 881 | ||
882 | if (pages > 1) { | 882 | if (pages > 1) { |
883 | /* | 883 | /* |
@@ -885,7 +885,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, | |||
885 | * TLB entries for this domain | 885 | * TLB entries for this domain |
886 | */ | 886 | */ |
887 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; | 887 | address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; |
888 | s = 1; | 888 | s = true; |
889 | } | 889 | } |
890 | 890 | ||
891 | address &= PAGE_MASK; | 891 | address &= PAGE_MASK; |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b0522f15730f..e93eb8cd3df3 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 95ed6deae47f..b62ff5493980 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index cec51a8ba844..c4fffb710c58 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 90f70d0e1141..6d5a5c44453b 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2010-2012 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -31,7 +31,7 @@ | |||
31 | #include "amd_iommu_proto.h" | 31 | #include "amd_iommu_proto.h" |
32 | 32 | ||
33 | MODULE_LICENSE("GPL v2"); | 33 | MODULE_LICENSE("GPL v2"); |
34 | MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>"); | 34 | MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>"); |
35 | 35 | ||
36 | #define MAX_DEVICES 0x10000 | 36 | #define MAX_DEVICES 0x10000 |
37 | #define PRI_QUEUE_SIZE 512 | 37 | #define PRI_QUEUE_SIZE 512 |
@@ -151,18 +151,6 @@ static void put_device_state(struct device_state *dev_state) | |||
151 | wake_up(&dev_state->wq); | 151 | wake_up(&dev_state->wq); |
152 | } | 152 | } |
153 | 153 | ||
154 | static void put_device_state_wait(struct device_state *dev_state) | ||
155 | { | ||
156 | DEFINE_WAIT(wait); | ||
157 | |||
158 | prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE); | ||
159 | if (!atomic_dec_and_test(&dev_state->count)) | ||
160 | schedule(); | ||
161 | finish_wait(&dev_state->wq, &wait); | ||
162 | |||
163 | free_device_state(dev_state); | ||
164 | } | ||
165 | |||
166 | /* Must be called under dev_state->lock */ | 154 | /* Must be called under dev_state->lock */ |
167 | static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, | 155 | static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state, |
168 | int pasid, bool alloc) | 156 | int pasid, bool alloc) |
@@ -278,14 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state) | |||
278 | 266 | ||
279 | static void put_pasid_state_wait(struct pasid_state *pasid_state) | 267 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
280 | { | 268 | { |
281 | DEFINE_WAIT(wait); | 269 | wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); |
282 | |||
283 | prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); | ||
284 | |||
285 | if (!atomic_dec_and_test(&pasid_state->count)) | ||
286 | schedule(); | ||
287 | |||
288 | finish_wait(&pasid_state->wq, &wait); | ||
289 | free_pasid_state(pasid_state); | 270 | free_pasid_state(pasid_state); |
290 | } | 271 | } |
291 | 272 | ||
@@ -851,7 +832,13 @@ void amd_iommu_free_device(struct pci_dev *pdev) | |||
851 | /* Get rid of any remaining pasid states */ | 832 | /* Get rid of any remaining pasid states */ |
852 | free_pasid_states(dev_state); | 833 | free_pasid_states(dev_state); |
853 | 834 | ||
854 | put_device_state_wait(dev_state); | 835 | put_device_state(dev_state); |
836 | /* | ||
837 | * Wait until the last reference is dropped before freeing | ||
838 | * the device state. | ||
839 | */ | ||
840 | wait_event(dev_state->wq, !atomic_read(&dev_state->count)); | ||
841 | free_device_state(dev_state); | ||
855 | } | 842 | } |
856 | EXPORT_SYMBOL(amd_iommu_free_device); | 843 | EXPORT_SYMBOL(amd_iommu_free_device); |
857 | 844 | ||
@@ -921,7 +908,7 @@ static int __init amd_iommu_v2_init(void) | |||
921 | { | 908 | { |
922 | int ret; | 909 | int ret; |
923 | 910 | ||
924 | pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n"); | 911 | pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n"); |
925 | 912 | ||
926 | if (!amd_iommu_v2_supported()) { | 913 | if (!amd_iommu_v2_supported()) { |
927 | pr_info("AMD IOMMUv2 functionality not available on this system\n"); | 914 | pr_info("AMD IOMMUv2 functionality not available on this system\n"); |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 6cd47b75286f..fc13dd56953e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -23,8 +23,6 @@ | |||
23 | * - Stream-matching and stream-indexing | 23 | * - Stream-matching and stream-indexing |
24 | * - v7/v8 long-descriptor format | 24 | * - v7/v8 long-descriptor format |
25 | * - Non-secure access to the SMMU | 25 | * - Non-secure access to the SMMU |
26 | * - 4k and 64k pages, with contiguous pte hints. | ||
27 | * - Up to 48-bit addressing (dependent on VA_BITS) | ||
28 | * - Context fault reporting | 26 | * - Context fault reporting |
29 | */ | 27 | */ |
30 | 28 | ||
@@ -36,7 +34,7 @@ | |||
36 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
37 | #include <linux/io.h> | 35 | #include <linux/io.h> |
38 | #include <linux/iommu.h> | 36 | #include <linux/iommu.h> |
39 | #include <linux/mm.h> | 37 | #include <linux/iopoll.h> |
40 | #include <linux/module.h> | 38 | #include <linux/module.h> |
41 | #include <linux/of.h> | 39 | #include <linux/of.h> |
42 | #include <linux/pci.h> | 40 | #include <linux/pci.h> |
@@ -46,7 +44,7 @@ | |||
46 | 44 | ||
47 | #include <linux/amba/bus.h> | 45 | #include <linux/amba/bus.h> |
48 | 46 | ||
49 | #include <asm/pgalloc.h> | 47 | #include "io-pgtable.h" |
50 | 48 | ||
51 | /* Maximum number of stream IDs assigned to a single device */ | 49 | /* Maximum number of stream IDs assigned to a single device */ |
52 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS | 50 | #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS |
@@ -71,40 +69,6 @@ | |||
71 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ | 69 | ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ |
72 | ? 0x400 : 0)) | 70 | ? 0x400 : 0)) |
73 | 71 | ||
74 | /* Page table bits */ | ||
75 | #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) | ||
76 | #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) | ||
77 | #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) | ||
78 | #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) | ||
79 | #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) | ||
80 | #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) | ||
81 | #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) | ||
82 | |||
83 | #if PAGE_SIZE == SZ_4K | ||
84 | #define ARM_SMMU_PTE_CONT_ENTRIES 16 | ||
85 | #elif PAGE_SIZE == SZ_64K | ||
86 | #define ARM_SMMU_PTE_CONT_ENTRIES 32 | ||
87 | #else | ||
88 | #define ARM_SMMU_PTE_CONT_ENTRIES 1 | ||
89 | #endif | ||
90 | |||
91 | #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) | ||
92 | #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) | ||
93 | |||
94 | /* Stage-1 PTE */ | ||
95 | #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) | ||
96 | #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) | ||
97 | #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 | ||
98 | #define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) | ||
99 | |||
100 | /* Stage-2 PTE */ | ||
101 | #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) | ||
102 | #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) | ||
103 | #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) | ||
104 | #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) | ||
105 | #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) | ||
106 | #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) | ||
107 | |||
108 | /* Configuration registers */ | 72 | /* Configuration registers */ |
109 | #define ARM_SMMU_GR0_sCR0 0x0 | 73 | #define ARM_SMMU_GR0_sCR0 0x0 |
110 | #define sCR0_CLIENTPD (1 << 0) | 74 | #define sCR0_CLIENTPD (1 << 0) |
@@ -132,17 +96,12 @@ | |||
132 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 | 96 | #define ARM_SMMU_GR0_sGFSYNR0 0x50 |
133 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 | 97 | #define ARM_SMMU_GR0_sGFSYNR1 0x54 |
134 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 | 98 | #define ARM_SMMU_GR0_sGFSYNR2 0x58 |
135 | #define ARM_SMMU_GR0_PIDR0 0xfe0 | ||
136 | #define ARM_SMMU_GR0_PIDR1 0xfe4 | ||
137 | #define ARM_SMMU_GR0_PIDR2 0xfe8 | ||
138 | 99 | ||
139 | #define ID0_S1TS (1 << 30) | 100 | #define ID0_S1TS (1 << 30) |
140 | #define ID0_S2TS (1 << 29) | 101 | #define ID0_S2TS (1 << 29) |
141 | #define ID0_NTS (1 << 28) | 102 | #define ID0_NTS (1 << 28) |
142 | #define ID0_SMS (1 << 27) | 103 | #define ID0_SMS (1 << 27) |
143 | #define ID0_PTFS_SHIFT 24 | 104 | #define ID0_ATOSNS (1 << 26) |
144 | #define ID0_PTFS_MASK 0x2 | ||
145 | #define ID0_PTFS_V8_ONLY 0x2 | ||
146 | #define ID0_CTTW (1 << 14) | 105 | #define ID0_CTTW (1 << 14) |
147 | #define ID0_NUMIRPT_SHIFT 16 | 106 | #define ID0_NUMIRPT_SHIFT 16 |
148 | #define ID0_NUMIRPT_MASK 0xff | 107 | #define ID0_NUMIRPT_MASK 0xff |
@@ -169,11 +128,7 @@ | |||
169 | #define ID2_PTFS_16K (1 << 13) | 128 | #define ID2_PTFS_16K (1 << 13) |
170 | #define ID2_PTFS_64K (1 << 14) | 129 | #define ID2_PTFS_64K (1 << 14) |
171 | 130 | ||
172 | #define PIDR2_ARCH_SHIFT 4 | ||
173 | #define PIDR2_ARCH_MASK 0xf | ||
174 | |||
175 | /* Global TLB invalidation */ | 131 | /* Global TLB invalidation */ |
176 | #define ARM_SMMU_GR0_STLBIALL 0x60 | ||
177 | #define ARM_SMMU_GR0_TLBIVMID 0x64 | 132 | #define ARM_SMMU_GR0_TLBIVMID 0x64 |
178 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 | 133 | #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 |
179 | #define ARM_SMMU_GR0_TLBIALLH 0x6c | 134 | #define ARM_SMMU_GR0_TLBIALLH 0x6c |
@@ -231,13 +186,25 @@ | |||
231 | #define ARM_SMMU_CB_TTBCR2 0x10 | 186 | #define ARM_SMMU_CB_TTBCR2 0x10 |
232 | #define ARM_SMMU_CB_TTBR0_LO 0x20 | 187 | #define ARM_SMMU_CB_TTBR0_LO 0x20 |
233 | #define ARM_SMMU_CB_TTBR0_HI 0x24 | 188 | #define ARM_SMMU_CB_TTBR0_HI 0x24 |
189 | #define ARM_SMMU_CB_TTBR1_LO 0x28 | ||
190 | #define ARM_SMMU_CB_TTBR1_HI 0x2c | ||
234 | #define ARM_SMMU_CB_TTBCR 0x30 | 191 | #define ARM_SMMU_CB_TTBCR 0x30 |
235 | #define ARM_SMMU_CB_S1_MAIR0 0x38 | 192 | #define ARM_SMMU_CB_S1_MAIR0 0x38 |
193 | #define ARM_SMMU_CB_S1_MAIR1 0x3c | ||
194 | #define ARM_SMMU_CB_PAR_LO 0x50 | ||
195 | #define ARM_SMMU_CB_PAR_HI 0x54 | ||
236 | #define ARM_SMMU_CB_FSR 0x58 | 196 | #define ARM_SMMU_CB_FSR 0x58 |
237 | #define ARM_SMMU_CB_FAR_LO 0x60 | 197 | #define ARM_SMMU_CB_FAR_LO 0x60 |
238 | #define ARM_SMMU_CB_FAR_HI 0x64 | 198 | #define ARM_SMMU_CB_FAR_HI 0x64 |
239 | #define ARM_SMMU_CB_FSYNR0 0x68 | 199 | #define ARM_SMMU_CB_FSYNR0 0x68 |
200 | #define ARM_SMMU_CB_S1_TLBIVA 0x600 | ||
240 | #define ARM_SMMU_CB_S1_TLBIASID 0x610 | 201 | #define ARM_SMMU_CB_S1_TLBIASID 0x610 |
202 | #define ARM_SMMU_CB_S1_TLBIVAL 0x620 | ||
203 | #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 | ||
204 | #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 | ||
205 | #define ARM_SMMU_CB_ATS1PR_LO 0x800 | ||
206 | #define ARM_SMMU_CB_ATS1PR_HI 0x804 | ||
207 | #define ARM_SMMU_CB_ATSR 0x8f0 | ||
241 | 208 | ||
242 | #define SCTLR_S1_ASIDPNE (1 << 12) | 209 | #define SCTLR_S1_ASIDPNE (1 << 12) |
243 | #define SCTLR_CFCFG (1 << 7) | 210 | #define SCTLR_CFCFG (1 << 7) |
@@ -249,47 +216,16 @@ | |||
249 | #define SCTLR_M (1 << 0) | 216 | #define SCTLR_M (1 << 0) |
250 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) | 217 | #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) |
251 | 218 | ||
252 | #define RESUME_RETRY (0 << 0) | 219 | #define CB_PAR_F (1 << 0) |
253 | #define RESUME_TERMINATE (1 << 0) | ||
254 | |||
255 | #define TTBCR_EAE (1 << 31) | ||
256 | 220 | ||
257 | #define TTBCR_PASIZE_SHIFT 16 | 221 | #define ATSR_ACTIVE (1 << 0) |
258 | #define TTBCR_PASIZE_MASK 0x7 | ||
259 | 222 | ||
260 | #define TTBCR_TG0_4K (0 << 14) | 223 | #define RESUME_RETRY (0 << 0) |
261 | #define TTBCR_TG0_64K (1 << 14) | 224 | #define RESUME_TERMINATE (1 << 0) |
262 | |||
263 | #define TTBCR_SH0_SHIFT 12 | ||
264 | #define TTBCR_SH0_MASK 0x3 | ||
265 | #define TTBCR_SH_NS 0 | ||
266 | #define TTBCR_SH_OS 2 | ||
267 | #define TTBCR_SH_IS 3 | ||
268 | |||
269 | #define TTBCR_ORGN0_SHIFT 10 | ||
270 | #define TTBCR_IRGN0_SHIFT 8 | ||
271 | #define TTBCR_RGN_MASK 0x3 | ||
272 | #define TTBCR_RGN_NC 0 | ||
273 | #define TTBCR_RGN_WBWA 1 | ||
274 | #define TTBCR_RGN_WT 2 | ||
275 | #define TTBCR_RGN_WB 3 | ||
276 | |||
277 | #define TTBCR_SL0_SHIFT 6 | ||
278 | #define TTBCR_SL0_MASK 0x3 | ||
279 | #define TTBCR_SL0_LVL_2 0 | ||
280 | #define TTBCR_SL0_LVL_1 1 | ||
281 | |||
282 | #define TTBCR_T1SZ_SHIFT 16 | ||
283 | #define TTBCR_T0SZ_SHIFT 0 | ||
284 | #define TTBCR_SZ_MASK 0xf | ||
285 | 225 | ||
286 | #define TTBCR2_SEP_SHIFT 15 | 226 | #define TTBCR2_SEP_SHIFT 15 |
287 | #define TTBCR2_SEP_MASK 0x7 | 227 | #define TTBCR2_SEP_MASK 0x7 |
288 | 228 | ||
289 | #define TTBCR2_PASIZE_SHIFT 0 | ||
290 | #define TTBCR2_PASIZE_MASK 0x7 | ||
291 | |||
292 | /* Common definitions for PASize and SEP fields */ | ||
293 | #define TTBCR2_ADDR_32 0 | 229 | #define TTBCR2_ADDR_32 0 |
294 | #define TTBCR2_ADDR_36 1 | 230 | #define TTBCR2_ADDR_36 1 |
295 | #define TTBCR2_ADDR_40 2 | 231 | #define TTBCR2_ADDR_40 2 |
@@ -297,16 +233,7 @@ | |||
297 | #define TTBCR2_ADDR_44 4 | 233 | #define TTBCR2_ADDR_44 4 |
298 | #define TTBCR2_ADDR_48 5 | 234 | #define TTBCR2_ADDR_48 5 |
299 | 235 | ||
300 | #define TTBRn_HI_ASID_SHIFT 16 | 236 | #define TTBRn_HI_ASID_SHIFT 16 |
301 | |||
302 | #define MAIR_ATTR_SHIFT(n) ((n) << 3) | ||
303 | #define MAIR_ATTR_MASK 0xff | ||
304 | #define MAIR_ATTR_DEVICE 0x04 | ||
305 | #define MAIR_ATTR_NC 0x44 | ||
306 | #define MAIR_ATTR_WBRWA 0xff | ||
307 | #define MAIR_ATTR_IDX_NC 0 | ||
308 | #define MAIR_ATTR_IDX_CACHE 1 | ||
309 | #define MAIR_ATTR_IDX_DEV 2 | ||
310 | 237 | ||
311 | #define FSR_MULTI (1 << 31) | 238 | #define FSR_MULTI (1 << 31) |
312 | #define FSR_SS (1 << 30) | 239 | #define FSR_SS (1 << 30) |
@@ -366,6 +293,7 @@ struct arm_smmu_device { | |||
366 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) | 293 | #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) |
367 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) | 294 | #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) |
368 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) | 295 | #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) |
296 | #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) | ||
369 | u32 features; | 297 | u32 features; |
370 | 298 | ||
371 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) | 299 | #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) |
@@ -380,10 +308,9 @@ struct arm_smmu_device { | |||
380 | u32 num_mapping_groups; | 308 | u32 num_mapping_groups; |
381 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); | 309 | DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); |
382 | 310 | ||
383 | unsigned long s1_input_size; | 311 | unsigned long va_size; |
384 | unsigned long s1_output_size; | 312 | unsigned long ipa_size; |
385 | unsigned long s2_input_size; | 313 | unsigned long pa_size; |
386 | unsigned long s2_output_size; | ||
387 | 314 | ||
388 | u32 num_global_irqs; | 315 | u32 num_global_irqs; |
389 | u32 num_context_irqs; | 316 | u32 num_context_irqs; |
@@ -397,7 +324,6 @@ struct arm_smmu_cfg { | |||
397 | u8 cbndx; | 324 | u8 cbndx; |
398 | u8 irptndx; | 325 | u8 irptndx; |
399 | u32 cbar; | 326 | u32 cbar; |
400 | pgd_t *pgd; | ||
401 | }; | 327 | }; |
402 | #define INVALID_IRPTNDX 0xff | 328 | #define INVALID_IRPTNDX 0xff |
403 | 329 | ||
@@ -412,11 +338,15 @@ enum arm_smmu_domain_stage { | |||
412 | 338 | ||
413 | struct arm_smmu_domain { | 339 | struct arm_smmu_domain { |
414 | struct arm_smmu_device *smmu; | 340 | struct arm_smmu_device *smmu; |
341 | struct io_pgtable_ops *pgtbl_ops; | ||
342 | spinlock_t pgtbl_lock; | ||
415 | struct arm_smmu_cfg cfg; | 343 | struct arm_smmu_cfg cfg; |
416 | enum arm_smmu_domain_stage stage; | 344 | enum arm_smmu_domain_stage stage; |
417 | spinlock_t lock; | 345 | struct mutex init_mutex; /* Protects smmu pointer */ |
418 | }; | 346 | }; |
419 | 347 | ||
348 | static struct iommu_ops arm_smmu_ops; | ||
349 | |||
420 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | 350 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
421 | static LIST_HEAD(arm_smmu_devices); | 351 | static LIST_HEAD(arm_smmu_devices); |
422 | 352 | ||
@@ -597,7 +527,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) | |||
597 | } | 527 | } |
598 | 528 | ||
599 | /* Wait for any pending TLB invalidations to complete */ | 529 | /* Wait for any pending TLB invalidations to complete */ |
600 | static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | 530 | static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) |
601 | { | 531 | { |
602 | int count = 0; | 532 | int count = 0; |
603 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 533 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
@@ -615,12 +545,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) | |||
615 | } | 545 | } |
616 | } | 546 | } |
617 | 547 | ||
618 | static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) | 548 | static void arm_smmu_tlb_sync(void *cookie) |
619 | { | 549 | { |
550 | struct arm_smmu_domain *smmu_domain = cookie; | ||
551 | __arm_smmu_tlb_sync(smmu_domain->smmu); | ||
552 | } | ||
553 | |||
554 | static void arm_smmu_tlb_inv_context(void *cookie) | ||
555 | { | ||
556 | struct arm_smmu_domain *smmu_domain = cookie; | ||
620 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 557 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
621 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 558 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
622 | void __iomem *base = ARM_SMMU_GR0(smmu); | ||
623 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | 559 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; |
560 | void __iomem *base; | ||
624 | 561 | ||
625 | if (stage1) { | 562 | if (stage1) { |
626 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 563 | base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
@@ -632,9 +569,76 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) | |||
632 | base + ARM_SMMU_GR0_TLBIVMID); | 569 | base + ARM_SMMU_GR0_TLBIVMID); |
633 | } | 570 | } |
634 | 571 | ||
635 | arm_smmu_tlb_sync(smmu); | 572 | __arm_smmu_tlb_sync(smmu); |
573 | } | ||
574 | |||
575 | static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, | ||
576 | bool leaf, void *cookie) | ||
577 | { | ||
578 | struct arm_smmu_domain *smmu_domain = cookie; | ||
579 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | ||
580 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
581 | bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; | ||
582 | void __iomem *reg; | ||
583 | |||
584 | if (stage1) { | ||
585 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | ||
586 | reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; | ||
587 | |||
588 | if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { | ||
589 | iova &= ~12UL; | ||
590 | iova |= ARM_SMMU_CB_ASID(cfg); | ||
591 | writel_relaxed(iova, reg); | ||
592 | #ifdef CONFIG_64BIT | ||
593 | } else { | ||
594 | iova >>= 12; | ||
595 | iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; | ||
596 | writeq_relaxed(iova, reg); | ||
597 | #endif | ||
598 | } | ||
599 | #ifdef CONFIG_64BIT | ||
600 | } else if (smmu->version == ARM_SMMU_V2) { | ||
601 | reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | ||
602 | reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : | ||
603 | ARM_SMMU_CB_S2_TLBIIPAS2; | ||
604 | writeq_relaxed(iova >> 12, reg); | ||
605 | #endif | ||
606 | } else { | ||
607 | reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; | ||
608 | writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); | ||
609 | } | ||
610 | } | ||
611 | |||
612 | static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) | ||
613 | { | ||
614 | struct arm_smmu_domain *smmu_domain = cookie; | ||
615 | struct arm_smmu_device *smmu = smmu_domain->smmu; | ||
616 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | ||
617 | |||
618 | |||
619 | /* Ensure new page tables are visible to the hardware walker */ | ||
620 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { | ||
621 | dsb(ishst); | ||
622 | } else { | ||
623 | /* | ||
624 | * If the SMMU can't walk tables in the CPU caches, treat them | ||
625 | * like non-coherent DMA since we need to flush the new entries | ||
626 | * all the way out to memory. There's no possibility of | ||
627 | * recursion here as the SMMU table walker will not be wired | ||
628 | * through another SMMU. | ||
629 | */ | ||
630 | dma_map_page(smmu->dev, virt_to_page(addr), offset, size, | ||
631 | DMA_TO_DEVICE); | ||
632 | } | ||
636 | } | 633 | } |
637 | 634 | ||
635 | static struct iommu_gather_ops arm_smmu_gather_ops = { | ||
636 | .tlb_flush_all = arm_smmu_tlb_inv_context, | ||
637 | .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, | ||
638 | .tlb_sync = arm_smmu_tlb_sync, | ||
639 | .flush_pgtable = arm_smmu_flush_pgtable, | ||
640 | }; | ||
641 | |||
638 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | 642 | static irqreturn_t arm_smmu_context_fault(int irq, void *dev) |
639 | { | 643 | { |
640 | int flags, ret; | 644 | int flags, ret; |
@@ -712,29 +716,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) | |||
712 | return IRQ_HANDLED; | 716 | return IRQ_HANDLED; |
713 | } | 717 | } |
714 | 718 | ||
715 | static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, | 719 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
716 | size_t size) | 720 | struct io_pgtable_cfg *pgtbl_cfg) |
717 | { | ||
718 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | ||
719 | |||
720 | |||
721 | /* Ensure new page tables are visible to the hardware walker */ | ||
722 | if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { | ||
723 | dsb(ishst); | ||
724 | } else { | ||
725 | /* | ||
726 | * If the SMMU can't walk tables in the CPU caches, treat them | ||
727 | * like non-coherent DMA since we need to flush the new entries | ||
728 | * all the way out to memory. There's no possibility of | ||
729 | * recursion here as the SMMU table walker will not be wired | ||
730 | * through another SMMU. | ||
731 | */ | ||
732 | dma_map_page(smmu->dev, virt_to_page(addr), offset, size, | ||
733 | DMA_TO_DEVICE); | ||
734 | } | ||
735 | } | ||
736 | |||
737 | static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | ||
738 | { | 721 | { |
739 | u32 reg; | 722 | u32 reg; |
740 | bool stage1; | 723 | bool stage1; |
@@ -771,124 +754,68 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
771 | #else | 754 | #else |
772 | reg = CBA2R_RW64_32BIT; | 755 | reg = CBA2R_RW64_32BIT; |
773 | #endif | 756 | #endif |
774 | writel_relaxed(reg, | 757 | writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); |
775 | gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); | ||
776 | |||
777 | /* TTBCR2 */ | ||
778 | switch (smmu->s1_input_size) { | ||
779 | case 32: | ||
780 | reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); | ||
781 | break; | ||
782 | case 36: | ||
783 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); | ||
784 | break; | ||
785 | case 39: | ||
786 | case 40: | ||
787 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); | ||
788 | break; | ||
789 | case 42: | ||
790 | reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); | ||
791 | break; | ||
792 | case 44: | ||
793 | reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); | ||
794 | break; | ||
795 | case 48: | ||
796 | reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); | ||
797 | break; | ||
798 | } | ||
799 | |||
800 | switch (smmu->s1_output_size) { | ||
801 | case 32: | ||
802 | reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); | ||
803 | break; | ||
804 | case 36: | ||
805 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); | ||
806 | break; | ||
807 | case 39: | ||
808 | case 40: | ||
809 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); | ||
810 | break; | ||
811 | case 42: | ||
812 | reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); | ||
813 | break; | ||
814 | case 44: | ||
815 | reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); | ||
816 | break; | ||
817 | case 48: | ||
818 | reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); | ||
819 | break; | ||
820 | } | ||
821 | |||
822 | if (stage1) | ||
823 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); | ||
824 | } | 758 | } |
825 | 759 | ||
826 | /* TTBR0 */ | 760 | /* TTBRs */ |
827 | arm_smmu_flush_pgtable(smmu, cfg->pgd, | 761 | if (stage1) { |
828 | PTRS_PER_PGD * sizeof(pgd_t)); | 762 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; |
829 | reg = __pa(cfg->pgd); | 763 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); |
830 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | 764 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; |
831 | reg = (phys_addr_t)__pa(cfg->pgd) >> 32; | ||
832 | if (stage1) | ||
833 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; | 765 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; |
834 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | 766 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); |
835 | |||
836 | /* | ||
837 | * TTBCR | ||
838 | * We use long descriptor, with inner-shareable WBWA tables in TTBR0. | ||
839 | */ | ||
840 | if (smmu->version > ARM_SMMU_V1) { | ||
841 | if (PAGE_SIZE == SZ_4K) | ||
842 | reg = TTBCR_TG0_4K; | ||
843 | else | ||
844 | reg = TTBCR_TG0_64K; | ||
845 | 767 | ||
846 | if (!stage1) { | 768 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; |
847 | reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; | 769 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); |
770 | reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; | ||
771 | reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; | ||
772 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); | ||
773 | } else { | ||
774 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; | ||
775 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); | ||
776 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; | ||
777 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); | ||
778 | } | ||
848 | 779 | ||
849 | switch (smmu->s2_output_size) { | 780 | /* TTBCR */ |
781 | if (stage1) { | ||
782 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; | ||
783 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
784 | if (smmu->version > ARM_SMMU_V1) { | ||
785 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | ||
786 | switch (smmu->va_size) { | ||
850 | case 32: | 787 | case 32: |
851 | reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); | 788 | reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); |
852 | break; | 789 | break; |
853 | case 36: | 790 | case 36: |
854 | reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); | 791 | reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); |
855 | break; | 792 | break; |
856 | case 40: | 793 | case 40: |
857 | reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); | 794 | reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); |
858 | break; | 795 | break; |
859 | case 42: | 796 | case 42: |
860 | reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); | 797 | reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); |
861 | break; | 798 | break; |
862 | case 44: | 799 | case 44: |
863 | reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); | 800 | reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); |
864 | break; | 801 | break; |
865 | case 48: | 802 | case 48: |
866 | reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); | 803 | reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); |
867 | break; | 804 | break; |
868 | } | 805 | } |
869 | } else { | 806 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
870 | reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; | ||
871 | } | 807 | } |
872 | } else { | 808 | } else { |
873 | reg = 0; | 809 | reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; |
810 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
874 | } | 811 | } |
875 | 812 | ||
876 | reg |= TTBCR_EAE | | 813 | /* MAIRs (stage-1 only) */ |
877 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | | ||
878 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | | ||
879 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); | ||
880 | |||
881 | if (!stage1) | ||
882 | reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | ||
883 | |||
884 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | ||
885 | |||
886 | /* MAIR0 (stage-1 only) */ | ||
887 | if (stage1) { | 814 | if (stage1) { |
888 | reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | | 815 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; |
889 | (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | | ||
890 | (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); | ||
891 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); | 816 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); |
817 | reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; | ||
818 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); | ||
892 | } | 819 | } |
893 | 820 | ||
894 | /* SCTLR */ | 821 | /* SCTLR */ |
@@ -905,11 +832,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
905 | struct arm_smmu_device *smmu) | 832 | struct arm_smmu_device *smmu) |
906 | { | 833 | { |
907 | int irq, start, ret = 0; | 834 | int irq, start, ret = 0; |
908 | unsigned long flags; | 835 | unsigned long ias, oas; |
836 | struct io_pgtable_ops *pgtbl_ops; | ||
837 | struct io_pgtable_cfg pgtbl_cfg; | ||
838 | enum io_pgtable_fmt fmt; | ||
909 | struct arm_smmu_domain *smmu_domain = domain->priv; | 839 | struct arm_smmu_domain *smmu_domain = domain->priv; |
910 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 840 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
911 | 841 | ||
912 | spin_lock_irqsave(&smmu_domain->lock, flags); | 842 | mutex_lock(&smmu_domain->init_mutex); |
913 | if (smmu_domain->smmu) | 843 | if (smmu_domain->smmu) |
914 | goto out_unlock; | 844 | goto out_unlock; |
915 | 845 | ||
@@ -940,6 +870,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
940 | case ARM_SMMU_DOMAIN_S1: | 870 | case ARM_SMMU_DOMAIN_S1: |
941 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; | 871 | cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; |
942 | start = smmu->num_s2_context_banks; | 872 | start = smmu->num_s2_context_banks; |
873 | ias = smmu->va_size; | ||
874 | oas = smmu->ipa_size; | ||
875 | if (IS_ENABLED(CONFIG_64BIT)) | ||
876 | fmt = ARM_64_LPAE_S1; | ||
877 | else | ||
878 | fmt = ARM_32_LPAE_S1; | ||
943 | break; | 879 | break; |
944 | case ARM_SMMU_DOMAIN_NESTED: | 880 | case ARM_SMMU_DOMAIN_NESTED: |
945 | /* | 881 | /* |
@@ -949,6 +885,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
949 | case ARM_SMMU_DOMAIN_S2: | 885 | case ARM_SMMU_DOMAIN_S2: |
950 | cfg->cbar = CBAR_TYPE_S2_TRANS; | 886 | cfg->cbar = CBAR_TYPE_S2_TRANS; |
951 | start = 0; | 887 | start = 0; |
888 | ias = smmu->ipa_size; | ||
889 | oas = smmu->pa_size; | ||
890 | if (IS_ENABLED(CONFIG_64BIT)) | ||
891 | fmt = ARM_64_LPAE_S2; | ||
892 | else | ||
893 | fmt = ARM_32_LPAE_S2; | ||
952 | break; | 894 | break; |
953 | default: | 895 | default: |
954 | ret = -EINVAL; | 896 | ret = -EINVAL; |
@@ -968,10 +910,30 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
968 | cfg->irptndx = cfg->cbndx; | 910 | cfg->irptndx = cfg->cbndx; |
969 | } | 911 | } |
970 | 912 | ||
971 | ACCESS_ONCE(smmu_domain->smmu) = smmu; | 913 | pgtbl_cfg = (struct io_pgtable_cfg) { |
972 | arm_smmu_init_context_bank(smmu_domain); | 914 | .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, |
973 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | 915 | .ias = ias, |
916 | .oas = oas, | ||
917 | .tlb = &arm_smmu_gather_ops, | ||
918 | }; | ||
919 | |||
920 | smmu_domain->smmu = smmu; | ||
921 | pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); | ||
922 | if (!pgtbl_ops) { | ||
923 | ret = -ENOMEM; | ||
924 | goto out_clear_smmu; | ||
925 | } | ||
926 | |||
927 | /* Update our support page sizes to reflect the page table format */ | ||
928 | arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; | ||
974 | 929 | ||
930 | /* Initialise the context bank with our page table cfg */ | ||
931 | arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); | ||
932 | |||
933 | /* | ||
934 | * Request context fault interrupt. Do this last to avoid the | ||
935 | * handler seeing a half-initialised domain state. | ||
936 | */ | ||
975 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | 937 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
976 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | 938 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
977 | "arm-smmu-context-fault", domain); | 939 | "arm-smmu-context-fault", domain); |
@@ -981,10 +943,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
981 | cfg->irptndx = INVALID_IRPTNDX; | 943 | cfg->irptndx = INVALID_IRPTNDX; |
982 | } | 944 | } |
983 | 945 | ||
946 | mutex_unlock(&smmu_domain->init_mutex); | ||
947 | |||
948 | /* Publish page table ops for map/unmap */ | ||
949 | smmu_domain->pgtbl_ops = pgtbl_ops; | ||
984 | return 0; | 950 | return 0; |
985 | 951 | ||
952 | out_clear_smmu: | ||
953 | smmu_domain->smmu = NULL; | ||
986 | out_unlock: | 954 | out_unlock: |
987 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | 955 | mutex_unlock(&smmu_domain->init_mutex); |
988 | return ret; | 956 | return ret; |
989 | } | 957 | } |
990 | 958 | ||
@@ -999,23 +967,27 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) | |||
999 | if (!smmu) | 967 | if (!smmu) |
1000 | return; | 968 | return; |
1001 | 969 | ||
1002 | /* Disable the context bank and nuke the TLB before freeing it. */ | 970 | /* |
971 | * Disable the context bank and free the page tables before freeing | ||
972 | * it. | ||
973 | */ | ||
1003 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); | 974 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
1004 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); | 975 | writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); |
1005 | arm_smmu_tlb_inv_context(smmu_domain); | ||
1006 | 976 | ||
1007 | if (cfg->irptndx != INVALID_IRPTNDX) { | 977 | if (cfg->irptndx != INVALID_IRPTNDX) { |
1008 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | 978 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
1009 | free_irq(irq, domain); | 979 | free_irq(irq, domain); |
1010 | } | 980 | } |
1011 | 981 | ||
982 | if (smmu_domain->pgtbl_ops) | ||
983 | free_io_pgtable_ops(smmu_domain->pgtbl_ops); | ||
984 | |||
1012 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); | 985 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); |
1013 | } | 986 | } |
1014 | 987 | ||
1015 | static int arm_smmu_domain_init(struct iommu_domain *domain) | 988 | static int arm_smmu_domain_init(struct iommu_domain *domain) |
1016 | { | 989 | { |
1017 | struct arm_smmu_domain *smmu_domain; | 990 | struct arm_smmu_domain *smmu_domain; |
1018 | pgd_t *pgd; | ||
1019 | 991 | ||
1020 | /* | 992 | /* |
1021 | * Allocate the domain and initialise some of its data structures. | 993 | * Allocate the domain and initialise some of its data structures. |
@@ -1026,81 +998,10 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) | |||
1026 | if (!smmu_domain) | 998 | if (!smmu_domain) |
1027 | return -ENOMEM; | 999 | return -ENOMEM; |
1028 | 1000 | ||
1029 | pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); | 1001 | mutex_init(&smmu_domain->init_mutex); |
1030 | if (!pgd) | 1002 | spin_lock_init(&smmu_domain->pgtbl_lock); |
1031 | goto out_free_domain; | ||
1032 | smmu_domain->cfg.pgd = pgd; | ||
1033 | |||
1034 | spin_lock_init(&smmu_domain->lock); | ||
1035 | domain->priv = smmu_domain; | 1003 | domain->priv = smmu_domain; |
1036 | return 0; | 1004 | return 0; |
1037 | |||
1038 | out_free_domain: | ||
1039 | kfree(smmu_domain); | ||
1040 | return -ENOMEM; | ||
1041 | } | ||
1042 | |||
1043 | static void arm_smmu_free_ptes(pmd_t *pmd) | ||
1044 | { | ||
1045 | pgtable_t table = pmd_pgtable(*pmd); | ||
1046 | |||
1047 | __free_page(table); | ||
1048 | } | ||
1049 | |||
1050 | static void arm_smmu_free_pmds(pud_t *pud) | ||
1051 | { | ||
1052 | int i; | ||
1053 | pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); | ||
1054 | |||
1055 | pmd = pmd_base; | ||
1056 | for (i = 0; i < PTRS_PER_PMD; ++i) { | ||
1057 | if (pmd_none(*pmd)) | ||
1058 | continue; | ||
1059 | |||
1060 | arm_smmu_free_ptes(pmd); | ||
1061 | pmd++; | ||
1062 | } | ||
1063 | |||
1064 | pmd_free(NULL, pmd_base); | ||
1065 | } | ||
1066 | |||
1067 | static void arm_smmu_free_puds(pgd_t *pgd) | ||
1068 | { | ||
1069 | int i; | ||
1070 | pud_t *pud, *pud_base = pud_offset(pgd, 0); | ||
1071 | |||
1072 | pud = pud_base; | ||
1073 | for (i = 0; i < PTRS_PER_PUD; ++i) { | ||
1074 | if (pud_none(*pud)) | ||
1075 | continue; | ||
1076 | |||
1077 | arm_smmu_free_pmds(pud); | ||
1078 | pud++; | ||
1079 | } | ||
1080 | |||
1081 | pud_free(NULL, pud_base); | ||
1082 | } | ||
1083 | |||
1084 | static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) | ||
1085 | { | ||
1086 | int i; | ||
1087 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | ||
1088 | pgd_t *pgd, *pgd_base = cfg->pgd; | ||
1089 | |||
1090 | /* | ||
1091 | * Recursively free the page tables for this domain. We don't | ||
1092 | * care about speculative TLB filling because the tables should | ||
1093 | * not be active in any context bank at this point (SCTLR.M is 0). | ||
1094 | */ | ||
1095 | pgd = pgd_base; | ||
1096 | for (i = 0; i < PTRS_PER_PGD; ++i) { | ||
1097 | if (pgd_none(*pgd)) | ||
1098 | continue; | ||
1099 | arm_smmu_free_puds(pgd); | ||
1100 | pgd++; | ||
1101 | } | ||
1102 | |||
1103 | kfree(pgd_base); | ||
1104 | } | 1005 | } |
1105 | 1006 | ||
1106 | static void arm_smmu_domain_destroy(struct iommu_domain *domain) | 1007 | static void arm_smmu_domain_destroy(struct iommu_domain *domain) |
@@ -1112,7 +1013,6 @@ static void arm_smmu_domain_destroy(struct iommu_domain *domain) | |||
1112 | * already been detached. | 1013 | * already been detached. |
1113 | */ | 1014 | */ |
1114 | arm_smmu_destroy_domain_context(domain); | 1015 | arm_smmu_destroy_domain_context(domain); |
1115 | arm_smmu_free_pgtables(smmu_domain); | ||
1116 | kfree(smmu_domain); | 1016 | kfree(smmu_domain); |
1117 | } | 1017 | } |
1118 | 1018 | ||
@@ -1244,7 +1144,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1244 | { | 1144 | { |
1245 | int ret; | 1145 | int ret; |
1246 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1146 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1247 | struct arm_smmu_device *smmu, *dom_smmu; | 1147 | struct arm_smmu_device *smmu; |
1248 | struct arm_smmu_master_cfg *cfg; | 1148 | struct arm_smmu_master_cfg *cfg; |
1249 | 1149 | ||
1250 | smmu = find_smmu_for_device(dev); | 1150 | smmu = find_smmu_for_device(dev); |
@@ -1258,21 +1158,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1258 | return -EEXIST; | 1158 | return -EEXIST; |
1259 | } | 1159 | } |
1260 | 1160 | ||
1161 | /* Ensure that the domain is finalised */ | ||
1162 | ret = arm_smmu_init_domain_context(domain, smmu); | ||
1163 | if (IS_ERR_VALUE(ret)) | ||
1164 | return ret; | ||
1165 | |||
1261 | /* | 1166 | /* |
1262 | * Sanity check the domain. We don't support domains across | 1167 | * Sanity check the domain. We don't support domains across |
1263 | * different SMMUs. | 1168 | * different SMMUs. |
1264 | */ | 1169 | */ |
1265 | dom_smmu = ACCESS_ONCE(smmu_domain->smmu); | 1170 | if (smmu_domain->smmu != smmu) { |
1266 | if (!dom_smmu) { | ||
1267 | /* Now that we have a master, we can finalise the domain */ | ||
1268 | ret = arm_smmu_init_domain_context(domain, smmu); | ||
1269 | if (IS_ERR_VALUE(ret)) | ||
1270 | return ret; | ||
1271 | |||
1272 | dom_smmu = smmu_domain->smmu; | ||
1273 | } | ||
1274 | |||
1275 | if (dom_smmu != smmu) { | ||
1276 | dev_err(dev, | 1171 | dev_err(dev, |
1277 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | 1172 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", |
1278 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); | 1173 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
@@ -1303,293 +1198,103 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | |||
1303 | arm_smmu_domain_remove_master(smmu_domain, cfg); | 1198 | arm_smmu_domain_remove_master(smmu_domain, cfg); |
1304 | } | 1199 | } |
1305 | 1200 | ||
1306 | static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, | 1201 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, |
1307 | unsigned long end) | 1202 | phys_addr_t paddr, size_t size, int prot) |
1308 | { | ||
1309 | return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && | ||
1310 | (addr + ARM_SMMU_PTE_CONT_SIZE <= end); | ||
1311 | } | ||
1312 | |||
1313 | static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | ||
1314 | unsigned long addr, unsigned long end, | ||
1315 | unsigned long pfn, int prot, int stage) | ||
1316 | { | ||
1317 | pte_t *pte, *start; | ||
1318 | pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; | ||
1319 | |||
1320 | if (pmd_none(*pmd)) { | ||
1321 | /* Allocate a new set of tables */ | ||
1322 | pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); | ||
1323 | |||
1324 | if (!table) | ||
1325 | return -ENOMEM; | ||
1326 | |||
1327 | arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); | ||
1328 | pmd_populate(NULL, pmd, table); | ||
1329 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); | ||
1330 | } | ||
1331 | |||
1332 | if (stage == 1) { | ||
1333 | pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; | ||
1334 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) | ||
1335 | pteval |= ARM_SMMU_PTE_AP_RDONLY; | ||
1336 | |||
1337 | if (prot & IOMMU_CACHE) | ||
1338 | pteval |= (MAIR_ATTR_IDX_CACHE << | ||
1339 | ARM_SMMU_PTE_ATTRINDX_SHIFT); | ||
1340 | } else { | ||
1341 | pteval |= ARM_SMMU_PTE_HAP_FAULT; | ||
1342 | if (prot & IOMMU_READ) | ||
1343 | pteval |= ARM_SMMU_PTE_HAP_READ; | ||
1344 | if (prot & IOMMU_WRITE) | ||
1345 | pteval |= ARM_SMMU_PTE_HAP_WRITE; | ||
1346 | if (prot & IOMMU_CACHE) | ||
1347 | pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; | ||
1348 | else | ||
1349 | pteval |= ARM_SMMU_PTE_MEMATTR_NC; | ||
1350 | } | ||
1351 | |||
1352 | if (prot & IOMMU_NOEXEC) | ||
1353 | pteval |= ARM_SMMU_PTE_XN; | ||
1354 | |||
1355 | /* If no access, create a faulting entry to avoid TLB fills */ | ||
1356 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | ||
1357 | pteval &= ~ARM_SMMU_PTE_PAGE; | ||
1358 | |||
1359 | pteval |= ARM_SMMU_PTE_SH_IS; | ||
1360 | start = pmd_page_vaddr(*pmd) + pte_index(addr); | ||
1361 | pte = start; | ||
1362 | |||
1363 | /* | ||
1364 | * Install the page table entries. This is fairly complicated | ||
1365 | * since we attempt to make use of the contiguous hint in the | ||
1366 | * ptes where possible. The contiguous hint indicates a series | ||
1367 | * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically | ||
1368 | * contiguous region with the following constraints: | ||
1369 | * | ||
1370 | * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE | ||
1371 | * - Each pte in the region has the contiguous hint bit set | ||
1372 | * | ||
1373 | * This complicates unmapping (also handled by this code, when | ||
1374 | * neither IOMMU_READ or IOMMU_WRITE are set) because it is | ||
1375 | * possible, yet highly unlikely, that a client may unmap only | ||
1376 | * part of a contiguous range. This requires clearing of the | ||
1377 | * contiguous hint bits in the range before installing the new | ||
1378 | * faulting entries. | ||
1379 | * | ||
1380 | * Note that re-mapping an address range without first unmapping | ||
1381 | * it is not supported, so TLB invalidation is not required here | ||
1382 | * and is instead performed at unmap and domain-init time. | ||
1383 | */ | ||
1384 | do { | ||
1385 | int i = 1; | ||
1386 | |||
1387 | pteval &= ~ARM_SMMU_PTE_CONT; | ||
1388 | |||
1389 | if (arm_smmu_pte_is_contiguous_range(addr, end)) { | ||
1390 | i = ARM_SMMU_PTE_CONT_ENTRIES; | ||
1391 | pteval |= ARM_SMMU_PTE_CONT; | ||
1392 | } else if (pte_val(*pte) & | ||
1393 | (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { | ||
1394 | int j; | ||
1395 | pte_t *cont_start; | ||
1396 | unsigned long idx = pte_index(addr); | ||
1397 | |||
1398 | idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); | ||
1399 | cont_start = pmd_page_vaddr(*pmd) + idx; | ||
1400 | for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) | ||
1401 | pte_val(*(cont_start + j)) &= | ||
1402 | ~ARM_SMMU_PTE_CONT; | ||
1403 | |||
1404 | arm_smmu_flush_pgtable(smmu, cont_start, | ||
1405 | sizeof(*pte) * | ||
1406 | ARM_SMMU_PTE_CONT_ENTRIES); | ||
1407 | } | ||
1408 | |||
1409 | do { | ||
1410 | *pte = pfn_pte(pfn, __pgprot(pteval)); | ||
1411 | } while (pte++, pfn++, addr += PAGE_SIZE, --i); | ||
1412 | } while (addr != end); | ||
1413 | |||
1414 | arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); | ||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, | ||
1419 | unsigned long addr, unsigned long end, | ||
1420 | phys_addr_t phys, int prot, int stage) | ||
1421 | { | 1203 | { |
1422 | int ret; | 1204 | int ret; |
1423 | pmd_t *pmd; | 1205 | unsigned long flags; |
1424 | unsigned long next, pfn = __phys_to_pfn(phys); | 1206 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1425 | 1207 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | |
1426 | #ifndef __PAGETABLE_PMD_FOLDED | ||
1427 | if (pud_none(*pud)) { | ||
1428 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | ||
1429 | if (!pmd) | ||
1430 | return -ENOMEM; | ||
1431 | |||
1432 | arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); | ||
1433 | pud_populate(NULL, pud, pmd); | ||
1434 | arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); | ||
1435 | |||
1436 | pmd += pmd_index(addr); | ||
1437 | } else | ||
1438 | #endif | ||
1439 | pmd = pmd_offset(pud, addr); | ||
1440 | 1208 | ||
1441 | do { | 1209 | if (!ops) |
1442 | next = pmd_addr_end(addr, end); | 1210 | return -ENODEV; |
1443 | ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, | ||
1444 | prot, stage); | ||
1445 | phys += next - addr; | ||
1446 | pfn = __phys_to_pfn(phys); | ||
1447 | } while (pmd++, addr = next, addr < end); | ||
1448 | 1211 | ||
1212 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | ||
1213 | ret = ops->map(ops, iova, paddr, size, prot); | ||
1214 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | ||
1449 | return ret; | 1215 | return ret; |
1450 | } | 1216 | } |
1451 | 1217 | ||
1452 | static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, | 1218 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, |
1453 | unsigned long addr, unsigned long end, | 1219 | size_t size) |
1454 | phys_addr_t phys, int prot, int stage) | ||
1455 | { | 1220 | { |
1456 | int ret = 0; | 1221 | size_t ret; |
1457 | pud_t *pud; | 1222 | unsigned long flags; |
1458 | unsigned long next; | 1223 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1459 | 1224 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; | |
1460 | #ifndef __PAGETABLE_PUD_FOLDED | ||
1461 | if (pgd_none(*pgd)) { | ||
1462 | pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); | ||
1463 | if (!pud) | ||
1464 | return -ENOMEM; | ||
1465 | |||
1466 | arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); | ||
1467 | pgd_populate(NULL, pgd, pud); | ||
1468 | arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); | ||
1469 | |||
1470 | pud += pud_index(addr); | ||
1471 | } else | ||
1472 | #endif | ||
1473 | pud = pud_offset(pgd, addr); | ||
1474 | 1225 | ||
1475 | do { | 1226 | if (!ops) |
1476 | next = pud_addr_end(addr, end); | 1227 | return 0; |
1477 | ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, | ||
1478 | prot, stage); | ||
1479 | phys += next - addr; | ||
1480 | } while (pud++, addr = next, addr < end); | ||
1481 | 1228 | ||
1229 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); | ||
1230 | ret = ops->unmap(ops, iova, size); | ||
1231 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); | ||
1482 | return ret; | 1232 | return ret; |
1483 | } | 1233 | } |
1484 | 1234 | ||
1485 | static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, | 1235 | static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, |
1486 | unsigned long iova, phys_addr_t paddr, | 1236 | dma_addr_t iova) |
1487 | size_t size, int prot) | ||
1488 | { | 1237 | { |
1489 | int ret, stage; | 1238 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1490 | unsigned long end; | ||
1491 | phys_addr_t input_mask, output_mask; | ||
1492 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1239 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1493 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 1240 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
1494 | pgd_t *pgd = cfg->pgd; | 1241 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
1495 | unsigned long flags; | 1242 | struct device *dev = smmu->dev; |
1243 | void __iomem *cb_base; | ||
1244 | u32 tmp; | ||
1245 | u64 phys; | ||
1496 | 1246 | ||
1497 | if (cfg->cbar == CBAR_TYPE_S2_TRANS) { | 1247 | cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); |
1498 | stage = 2; | 1248 | |
1499 | input_mask = (1ULL << smmu->s2_input_size) - 1; | 1249 | if (smmu->version == 1) { |
1500 | output_mask = (1ULL << smmu->s2_output_size) - 1; | 1250 | u32 reg = iova & ~0xfff; |
1251 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); | ||
1501 | } else { | 1252 | } else { |
1502 | stage = 1; | 1253 | u32 reg = iova & ~0xfff; |
1503 | input_mask = (1ULL << smmu->s1_input_size) - 1; | 1254 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); |
1504 | output_mask = (1ULL << smmu->s1_output_size) - 1; | 1255 | reg = ((u64)iova & ~0xfff) >> 32; |
1256 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI); | ||
1505 | } | 1257 | } |
1506 | 1258 | ||
1507 | if (!pgd) | 1259 | if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, |
1508 | return -EINVAL; | 1260 | !(tmp & ATSR_ACTIVE), 5, 50)) { |
1509 | 1261 | dev_err(dev, | |
1510 | if (size & ~PAGE_MASK) | 1262 | "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", |
1511 | return -EINVAL; | 1263 | &iova); |
1512 | 1264 | return ops->iova_to_phys(ops, iova); | |
1513 | if ((phys_addr_t)iova & ~input_mask) | 1265 | } |
1514 | return -ERANGE; | ||
1515 | |||
1516 | if (paddr & ~output_mask) | ||
1517 | return -ERANGE; | ||
1518 | |||
1519 | spin_lock_irqsave(&smmu_domain->lock, flags); | ||
1520 | pgd += pgd_index(iova); | ||
1521 | end = iova + size; | ||
1522 | do { | ||
1523 | unsigned long next = pgd_addr_end(iova, end); | ||
1524 | |||
1525 | ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, | ||
1526 | prot, stage); | ||
1527 | if (ret) | ||
1528 | goto out_unlock; | ||
1529 | |||
1530 | paddr += next - iova; | ||
1531 | iova = next; | ||
1532 | } while (pgd++, iova != end); | ||
1533 | |||
1534 | out_unlock: | ||
1535 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
1536 | |||
1537 | return ret; | ||
1538 | } | ||
1539 | |||
1540 | static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, | ||
1541 | phys_addr_t paddr, size_t size, int prot) | ||
1542 | { | ||
1543 | struct arm_smmu_domain *smmu_domain = domain->priv; | ||
1544 | |||
1545 | if (!smmu_domain) | ||
1546 | return -ENODEV; | ||
1547 | 1266 | ||
1548 | return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); | 1267 | phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); |
1549 | } | 1268 | phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; |
1550 | 1269 | ||
1551 | static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, | 1270 | if (phys & CB_PAR_F) { |
1552 | size_t size) | 1271 | dev_err(dev, "translation fault!\n"); |
1553 | { | 1272 | dev_err(dev, "PAR = 0x%llx\n", phys); |
1554 | int ret; | 1273 | return 0; |
1555 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1274 | } |
1556 | 1275 | ||
1557 | ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); | 1276 | return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); |
1558 | arm_smmu_tlb_inv_context(smmu_domain); | ||
1559 | return ret ? 0 : size; | ||
1560 | } | 1277 | } |
1561 | 1278 | ||
1562 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, | 1279 | static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, |
1563 | dma_addr_t iova) | 1280 | dma_addr_t iova) |
1564 | { | 1281 | { |
1565 | pgd_t *pgdp, pgd; | 1282 | phys_addr_t ret; |
1566 | pud_t pud; | 1283 | unsigned long flags; |
1567 | pmd_t pmd; | ||
1568 | pte_t pte; | ||
1569 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1284 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1570 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 1285 | struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; |
1571 | 1286 | ||
1572 | pgdp = cfg->pgd; | 1287 | if (!ops) |
1573 | if (!pgdp) | ||
1574 | return 0; | 1288 | return 0; |
1575 | 1289 | ||
1576 | pgd = *(pgdp + pgd_index(iova)); | 1290 | spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); |
1577 | if (pgd_none(pgd)) | 1291 | if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS) |
1578 | return 0; | 1292 | ret = arm_smmu_iova_to_phys_hard(domain, iova); |
1579 | 1293 | else | |
1580 | pud = *pud_offset(&pgd, iova); | 1294 | ret = ops->iova_to_phys(ops, iova); |
1581 | if (pud_none(pud)) | 1295 | spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); |
1582 | return 0; | ||
1583 | |||
1584 | pmd = *pmd_offset(&pud, iova); | ||
1585 | if (pmd_none(pmd)) | ||
1586 | return 0; | ||
1587 | 1296 | ||
1588 | pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); | 1297 | return ret; |
1589 | if (pte_none(pte)) | ||
1590 | return 0; | ||
1591 | |||
1592 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); | ||
1593 | } | 1298 | } |
1594 | 1299 | ||
1595 | static bool arm_smmu_capable(enum iommu_cap cap) | 1300 | static bool arm_smmu_capable(enum iommu_cap cap) |
@@ -1698,24 +1403,34 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, | |||
1698 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, | 1403 | static int arm_smmu_domain_set_attr(struct iommu_domain *domain, |
1699 | enum iommu_attr attr, void *data) | 1404 | enum iommu_attr attr, void *data) |
1700 | { | 1405 | { |
1406 | int ret = 0; | ||
1701 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1407 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1702 | 1408 | ||
1409 | mutex_lock(&smmu_domain->init_mutex); | ||
1410 | |||
1703 | switch (attr) { | 1411 | switch (attr) { |
1704 | case DOMAIN_ATTR_NESTING: | 1412 | case DOMAIN_ATTR_NESTING: |
1705 | if (smmu_domain->smmu) | 1413 | if (smmu_domain->smmu) { |
1706 | return -EPERM; | 1414 | ret = -EPERM; |
1415 | goto out_unlock; | ||
1416 | } | ||
1417 | |||
1707 | if (*(int *)data) | 1418 | if (*(int *)data) |
1708 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; | 1419 | smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; |
1709 | else | 1420 | else |
1710 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; | 1421 | smmu_domain->stage = ARM_SMMU_DOMAIN_S1; |
1711 | 1422 | ||
1712 | return 0; | 1423 | break; |
1713 | default: | 1424 | default: |
1714 | return -ENODEV; | 1425 | ret = -ENODEV; |
1715 | } | 1426 | } |
1427 | |||
1428 | out_unlock: | ||
1429 | mutex_unlock(&smmu_domain->init_mutex); | ||
1430 | return ret; | ||
1716 | } | 1431 | } |
1717 | 1432 | ||
1718 | static const struct iommu_ops arm_smmu_ops = { | 1433 | static struct iommu_ops arm_smmu_ops = { |
1719 | .capable = arm_smmu_capable, | 1434 | .capable = arm_smmu_capable, |
1720 | .domain_init = arm_smmu_domain_init, | 1435 | .domain_init = arm_smmu_domain_init, |
1721 | .domain_destroy = arm_smmu_domain_destroy, | 1436 | .domain_destroy = arm_smmu_domain_destroy, |
@@ -1729,9 +1444,7 @@ static const struct iommu_ops arm_smmu_ops = { | |||
1729 | .remove_device = arm_smmu_remove_device, | 1444 | .remove_device = arm_smmu_remove_device, |
1730 | .domain_get_attr = arm_smmu_domain_get_attr, | 1445 | .domain_get_attr = arm_smmu_domain_get_attr, |
1731 | .domain_set_attr = arm_smmu_domain_set_attr, | 1446 | .domain_set_attr = arm_smmu_domain_set_attr, |
1732 | .pgsize_bitmap = (SECTION_SIZE | | 1447 | .pgsize_bitmap = -1UL, /* Restricted during device attach */ |
1733 | ARM_SMMU_PTE_CONT_SIZE | | ||
1734 | PAGE_SIZE), | ||
1735 | }; | 1448 | }; |
1736 | 1449 | ||
1737 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | 1450 | static void arm_smmu_device_reset(struct arm_smmu_device *smmu) |
@@ -1760,7 +1473,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1760 | } | 1473 | } |
1761 | 1474 | ||
1762 | /* Invalidate the TLB, just in case */ | 1475 | /* Invalidate the TLB, just in case */ |
1763 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); | ||
1764 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); | 1476 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); |
1765 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); | 1477 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); |
1766 | 1478 | ||
@@ -1782,7 +1494,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1782 | reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); | 1494 | reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); |
1783 | 1495 | ||
1784 | /* Push the button */ | 1496 | /* Push the button */ |
1785 | arm_smmu_tlb_sync(smmu); | 1497 | __arm_smmu_tlb_sync(smmu); |
1786 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); | 1498 | writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); |
1787 | } | 1499 | } |
1788 | 1500 | ||
@@ -1816,12 +1528,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1816 | 1528 | ||
1817 | /* ID0 */ | 1529 | /* ID0 */ |
1818 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); | 1530 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); |
1819 | #ifndef CONFIG_64BIT | ||
1820 | if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { | ||
1821 | dev_err(smmu->dev, "\tno v7 descriptor support!\n"); | ||
1822 | return -ENODEV; | ||
1823 | } | ||
1824 | #endif | ||
1825 | 1531 | ||
1826 | /* Restrict available stages based on module parameter */ | 1532 | /* Restrict available stages based on module parameter */ |
1827 | if (force_stage == 1) | 1533 | if (force_stage == 1) |
@@ -1850,6 +1556,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1850 | return -ENODEV; | 1556 | return -ENODEV; |
1851 | } | 1557 | } |
1852 | 1558 | ||
1559 | if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) { | ||
1560 | smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; | ||
1561 | dev_notice(smmu->dev, "\taddress translation ops\n"); | ||
1562 | } | ||
1563 | |||
1853 | if (id & ID0_CTTW) { | 1564 | if (id & ID0_CTTW) { |
1854 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; | 1565 | smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; |
1855 | dev_notice(smmu->dev, "\tcoherent table walk\n"); | 1566 | dev_notice(smmu->dev, "\tcoherent table walk\n"); |
@@ -1894,16 +1605,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1894 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; | 1605 | smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; |
1895 | 1606 | ||
1896 | /* Check for size mismatch of SMMU address space from mapped region */ | 1607 | /* Check for size mismatch of SMMU address space from mapped region */ |
1897 | size = 1 << | 1608 | size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); |
1898 | (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); | ||
1899 | size *= 2 << smmu->pgshift; | 1609 | size *= 2 << smmu->pgshift; |
1900 | if (smmu->size != size) | 1610 | if (smmu->size != size) |
1901 | dev_warn(smmu->dev, | 1611 | dev_warn(smmu->dev, |
1902 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", | 1612 | "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", |
1903 | size, smmu->size); | 1613 | size, smmu->size); |
1904 | 1614 | ||
1905 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & | 1615 | smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; |
1906 | ID1_NUMS2CB_MASK; | ||
1907 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; | 1616 | smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; |
1908 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { | 1617 | if (smmu->num_s2_context_banks > smmu->num_context_banks) { |
1909 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); | 1618 | dev_err(smmu->dev, "impossible number of S2 context banks!\n"); |
@@ -1915,46 +1624,40 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1915 | /* ID2 */ | 1624 | /* ID2 */ |
1916 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); | 1625 | id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); |
1917 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); | 1626 | size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); |
1918 | smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); | 1627 | smmu->ipa_size = size; |
1919 | |||
1920 | /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ | ||
1921 | #ifdef CONFIG_64BIT | ||
1922 | smmu->s2_input_size = min_t(unsigned long, VA_BITS, size); | ||
1923 | #else | ||
1924 | smmu->s2_input_size = min(32UL, size); | ||
1925 | #endif | ||
1926 | 1628 | ||
1927 | /* The stage-2 output mask is also applied for bypass */ | 1629 | /* The output mask is also applied for bypass */ |
1928 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | 1630 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
1929 | smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); | 1631 | smmu->pa_size = size; |
1930 | 1632 | ||
1931 | if (smmu->version == ARM_SMMU_V1) { | 1633 | if (smmu->version == ARM_SMMU_V1) { |
1932 | smmu->s1_input_size = 32; | 1634 | smmu->va_size = smmu->ipa_size; |
1635 | size = SZ_4K | SZ_2M | SZ_1G; | ||
1933 | } else { | 1636 | } else { |
1934 | #ifdef CONFIG_64BIT | ||
1935 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; | 1637 | size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; |
1936 | size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); | 1638 | smmu->va_size = arm_smmu_id_size_to_bits(size); |
1937 | #else | 1639 | #ifndef CONFIG_64BIT |
1938 | size = 32; | 1640 | smmu->va_size = min(32UL, smmu->va_size); |
1939 | #endif | 1641 | #endif |
1940 | smmu->s1_input_size = size; | 1642 | size = 0; |
1941 | 1643 | if (id & ID2_PTFS_4K) | |
1942 | if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || | 1644 | size |= SZ_4K | SZ_2M | SZ_1G; |
1943 | (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || | 1645 | if (id & ID2_PTFS_16K) |
1944 | (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { | 1646 | size |= SZ_16K | SZ_32M; |
1945 | dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", | 1647 | if (id & ID2_PTFS_64K) |
1946 | PAGE_SIZE); | 1648 | size |= SZ_64K | SZ_512M; |
1947 | return -ENODEV; | ||
1948 | } | ||
1949 | } | 1649 | } |
1950 | 1650 | ||
1651 | arm_smmu_ops.pgsize_bitmap &= size; | ||
1652 | dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); | ||
1653 | |||
1951 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) | 1654 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) |
1952 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", | 1655 | dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", |
1953 | smmu->s1_input_size, smmu->s1_output_size); | 1656 | smmu->va_size, smmu->ipa_size); |
1954 | 1657 | ||
1955 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) | 1658 | if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) |
1956 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", | 1659 | dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", |
1957 | smmu->s2_input_size, smmu->s2_output_size); | 1660 | smmu->ipa_size, smmu->pa_size); |
1958 | 1661 | ||
1959 | return 0; | 1662 | return 0; |
1960 | } | 1663 | } |
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index 80ac68d884c5..abeedc9a78c2 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c | |||
@@ -18,22 +18,13 @@ | |||
18 | 18 | ||
19 | #define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ | 19 | #define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ |
20 | 20 | ||
21 | #include <linux/init.h> | 21 | #include "fsl_pamu.h" |
22 | #include <linux/iommu.h> | 22 | |
23 | #include <linux/slab.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
28 | #include <linux/device.h> | ||
29 | #include <linux/of_platform.h> | ||
30 | #include <linux/bootmem.h> | ||
31 | #include <linux/genalloc.h> | 24 | #include <linux/genalloc.h> |
32 | #include <asm/io.h> | ||
33 | #include <asm/bitops.h> | ||
34 | #include <asm/fsl_guts.h> | ||
35 | 25 | ||
36 | #include "fsl_pamu.h" | 26 | #include <asm/mpc85xx.h> |
27 | #include <asm/fsl_guts.h> | ||
37 | 28 | ||
38 | /* define indexes for each operation mapping scenario */ | 29 | /* define indexes for each operation mapping scenario */ |
39 | #define OMI_QMAN 0x00 | 30 | #define OMI_QMAN 0x00 |
@@ -44,13 +35,13 @@ | |||
44 | #define make64(high, low) (((u64)(high) << 32) | (low)) | 35 | #define make64(high, low) (((u64)(high) << 32) | (low)) |
45 | 36 | ||
46 | struct pamu_isr_data { | 37 | struct pamu_isr_data { |
47 | void __iomem *pamu_reg_base; /* Base address of PAMU regs*/ | 38 | void __iomem *pamu_reg_base; /* Base address of PAMU regs */ |
48 | unsigned int count; /* The number of PAMUs */ | 39 | unsigned int count; /* The number of PAMUs */ |
49 | }; | 40 | }; |
50 | 41 | ||
51 | static struct paace *ppaact; | 42 | static struct paace *ppaact; |
52 | static struct paace *spaact; | 43 | static struct paace *spaact; |
53 | static struct ome *omt; | 44 | static struct ome *omt __initdata; |
54 | 45 | ||
55 | /* | 46 | /* |
56 | * Table for matching compatible strings, for device tree | 47 | * Table for matching compatible strings, for device tree |
@@ -58,14 +49,13 @@ static struct ome *omt; | |||
58 | * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 | 49 | * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 |
59 | * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" | 50 | * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" |
60 | * string would be used. | 51 | * string would be used. |
61 | */ | 52 | */ |
62 | static const struct of_device_id guts_device_ids[] = { | 53 | static const struct of_device_id guts_device_ids[] __initconst = { |
63 | { .compatible = "fsl,qoriq-device-config-1.0", }, | 54 | { .compatible = "fsl,qoriq-device-config-1.0", }, |
64 | { .compatible = "fsl,qoriq-device-config-2.0", }, | 55 | { .compatible = "fsl,qoriq-device-config-2.0", }, |
65 | {} | 56 | {} |
66 | }; | 57 | }; |
67 | 58 | ||
68 | |||
69 | /* | 59 | /* |
70 | * Table for matching compatible strings, for device tree | 60 | * Table for matching compatible strings, for device tree |
71 | * L3 cache controller node. | 61 | * L3 cache controller node. |
@@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = { | |||
73 | * "fsl,b4860-l3-cache-controller" corresponds to B4 & | 63 | * "fsl,b4860-l3-cache-controller" corresponds to B4 & |
74 | * "fsl,p4080-l3-cache-controller" corresponds to other, | 64 | * "fsl,p4080-l3-cache-controller" corresponds to other, |
75 | * SOCs. | 65 | * SOCs. |
76 | */ | 66 | */ |
77 | static const struct of_device_id l3_device_ids[] = { | 67 | static const struct of_device_id l3_device_ids[] = { |
78 | { .compatible = "fsl,t4240-l3-cache-controller", }, | 68 | { .compatible = "fsl,t4240-l3-cache-controller", }, |
79 | { .compatible = "fsl,b4860-l3-cache-controller", }, | 69 | { .compatible = "fsl,b4860-l3-cache-controller", }, |
@@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = { | |||
85 | static u32 max_subwindow_count; | 75 | static u32 max_subwindow_count; |
86 | 76 | ||
87 | /* Pool for fspi allocation */ | 77 | /* Pool for fspi allocation */ |
88 | struct gen_pool *spaace_pool; | 78 | static struct gen_pool *spaace_pool; |
89 | 79 | ||
90 | /** | 80 | /** |
91 | * pamu_get_max_subwin_cnt() - Return the maximum supported | 81 | * pamu_get_max_subwin_cnt() - Return the maximum supported |
@@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn) | |||
170 | static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) | 160 | static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) |
171 | { | 161 | { |
172 | /* Bug if not a power of 2 */ | 162 | /* Bug if not a power of 2 */ |
173 | BUG_ON((addrspace_size & (addrspace_size - 1))); | 163 | BUG_ON(addrspace_size & (addrspace_size - 1)); |
174 | 164 | ||
175 | /* window size is 2^(WSE+1) bytes */ | 165 | /* window size is 2^(WSE+1) bytes */ |
176 | return fls64(addrspace_size) - 2; | 166 | return fls64(addrspace_size) - 2; |
@@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) | |||
179 | /* Derive the PAACE window count encoding for the subwindow count */ | 169 | /* Derive the PAACE window count encoding for the subwindow count */ |
180 | static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) | 170 | static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt) |
181 | { | 171 | { |
182 | /* window count is 2^(WCE+1) bytes */ | 172 | /* window count is 2^(WCE+1) bytes */ |
183 | return __ffs(subwindow_cnt) - 1; | 173 | return __ffs(subwindow_cnt) - 1; |
184 | } | 174 | } |
185 | 175 | ||
186 | /* | 176 | /* |
@@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum) | |||
241 | * If no SPAACE entry is available or the allocator can not reserve the required | 231 | * If no SPAACE entry is available or the allocator can not reserve the required |
242 | * number of contiguous entries function returns ULONG_MAX indicating a failure. | 232 | * number of contiguous entries function returns ULONG_MAX indicating a failure. |
243 | * | 233 | * |
244 | */ | 234 | */ |
245 | static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) | 235 | static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt) |
246 | { | 236 | { |
247 | unsigned long spaace_addr; | 237 | unsigned long spaace_addr; |
@@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value) | |||
288 | } | 278 | } |
289 | if (subwin) { | 279 | if (subwin) { |
290 | paace = pamu_get_spaace(paace, subwin - 1); | 280 | paace = pamu_get_spaace(paace, subwin - 1); |
291 | if (!paace) { | 281 | if (!paace) |
292 | return -ENOENT; | 282 | return -ENOENT; |
293 | } | ||
294 | } | 283 | } |
295 | set_bf(paace->impl_attr, PAACE_IA_CID, value); | 284 | set_bf(paace->impl_attr, PAACE_IA_CID, value); |
296 | 285 | ||
@@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin) | |||
311 | } | 300 | } |
312 | if (subwin) { | 301 | if (subwin) { |
313 | paace = pamu_get_spaace(paace, subwin - 1); | 302 | paace = pamu_get_spaace(paace, subwin - 1); |
314 | if (!paace) { | 303 | if (!paace) |
315 | return -ENOENT; | 304 | return -ENOENT; |
316 | } | 305 | set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID); |
317 | set_bf(paace->addr_bitfields, PAACE_AF_V, | ||
318 | PAACE_V_INVALID); | ||
319 | } else { | 306 | } else { |
320 | set_bf(paace->addr_bitfields, PAACE_AF_AP, | 307 | set_bf(paace->addr_bitfields, PAACE_AF_AP, |
321 | PAACE_AP_PERMS_DENIED); | 308 | PAACE_AP_PERMS_DENIED); |
322 | } | 309 | } |
323 | 310 | ||
324 | mb(); | 311 | mb(); |
@@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin) | |||
326 | return 0; | 313 | return 0; |
327 | } | 314 | } |
328 | 315 | ||
329 | |||
330 | /** | 316 | /** |
331 | * pamu_config_paace() - Sets up PPAACE entry for specified liodn | 317 | * pamu_config_paace() - Sets up PPAACE entry for specified liodn |
332 | * | 318 | * |
@@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, | |||
352 | unsigned long fspi; | 338 | unsigned long fspi; |
353 | 339 | ||
354 | if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { | 340 | if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { |
355 | pr_debug("window size too small or not a power of two %llx\n", win_size); | 341 | pr_debug("window size too small or not a power of two %pa\n", |
342 | &win_size); | ||
356 | return -EINVAL; | 343 | return -EINVAL; |
357 | } | 344 | } |
358 | 345 | ||
@@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, | |||
362 | } | 349 | } |
363 | 350 | ||
364 | ppaace = pamu_get_ppaace(liodn); | 351 | ppaace = pamu_get_ppaace(liodn); |
365 | if (!ppaace) { | 352 | if (!ppaace) |
366 | return -ENOENT; | 353 | return -ENOENT; |
367 | } | ||
368 | 354 | ||
369 | /* window size is 2^(WSE+1) bytes */ | 355 | /* window size is 2^(WSE+1) bytes */ |
370 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, | 356 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, |
371 | map_addrspace_size_to_wse(win_size)); | 357 | map_addrspace_size_to_wse(win_size)); |
372 | 358 | ||
373 | pamu_init_ppaace(ppaace); | 359 | pamu_init_ppaace(ppaace); |
374 | 360 | ||
@@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, | |||
442 | { | 428 | { |
443 | struct paace *paace; | 429 | struct paace *paace; |
444 | 430 | ||
445 | |||
446 | /* setup sub-windows */ | 431 | /* setup sub-windows */ |
447 | if (!subwin_cnt) { | 432 | if (!subwin_cnt) { |
448 | pr_debug("Invalid subwindow count\n"); | 433 | pr_debug("Invalid subwindow count\n"); |
@@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, | |||
510 | } | 495 | } |
511 | 496 | ||
512 | /** | 497 | /** |
513 | * get_ome_index() - Returns the index in the operation mapping table | 498 | * get_ome_index() - Returns the index in the operation mapping table |
514 | * for device. | 499 | * for device. |
515 | * @*omi_index: pointer for storing the index value | 500 | * @*omi_index: pointer for storing the index value |
516 | * | 501 | * |
517 | */ | 502 | */ |
518 | void get_ome_index(u32 *omi_index, struct device *dev) | 503 | void get_ome_index(u32 *omi_index, struct device *dev) |
519 | { | 504 | { |
520 | if (of_device_is_compatible(dev->of_node, "fsl,qman-portal")) | 505 | if (of_device_is_compatible(dev->of_node, "fsl,qman-portal")) |
@@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) | |||
544 | if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { | 529 | if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { |
545 | node = of_find_matching_node(NULL, l3_device_ids); | 530 | node = of_find_matching_node(NULL, l3_device_ids); |
546 | if (node) { | 531 | if (node) { |
547 | prop = of_get_property(node, "cache-stash-id", 0); | 532 | prop = of_get_property(node, "cache-stash-id", NULL); |
548 | if (!prop) { | 533 | if (!prop) { |
549 | pr_debug("missing cache-stash-id at %s\n", node->full_name); | 534 | pr_debug("missing cache-stash-id at %s\n", |
535 | node->full_name); | ||
550 | of_node_put(node); | 536 | of_node_put(node); |
551 | return ~(u32)0; | 537 | return ~(u32)0; |
552 | } | 538 | } |
@@ -570,9 +556,10 @@ found_cpu_node: | |||
570 | /* find the hwnode that represents the cache */ | 556 | /* find the hwnode that represents the cache */ |
571 | for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { | 557 | for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { |
572 | if (stash_dest_hint == cache_level) { | 558 | if (stash_dest_hint == cache_level) { |
573 | prop = of_get_property(node, "cache-stash-id", 0); | 559 | prop = of_get_property(node, "cache-stash-id", NULL); |
574 | if (!prop) { | 560 | if (!prop) { |
575 | pr_debug("missing cache-stash-id at %s\n", node->full_name); | 561 | pr_debug("missing cache-stash-id at %s\n", |
562 | node->full_name); | ||
576 | of_node_put(node); | 563 | of_node_put(node); |
577 | return ~(u32)0; | 564 | return ~(u32)0; |
578 | } | 565 | } |
@@ -580,10 +567,10 @@ found_cpu_node: | |||
580 | return be32_to_cpup(prop); | 567 | return be32_to_cpup(prop); |
581 | } | 568 | } |
582 | 569 | ||
583 | prop = of_get_property(node, "next-level-cache", 0); | 570 | prop = of_get_property(node, "next-level-cache", NULL); |
584 | if (!prop) { | 571 | if (!prop) { |
585 | pr_debug("can't find next-level-cache at %s\n", | 572 | pr_debug("can't find next-level-cache at %s\n", |
586 | node->full_name); | 573 | node->full_name); |
587 | of_node_put(node); | 574 | of_node_put(node); |
588 | return ~(u32)0; /* can't traverse any further */ | 575 | return ~(u32)0; /* can't traverse any further */ |
589 | } | 576 | } |
@@ -598,7 +585,7 @@ found_cpu_node: | |||
598 | } | 585 | } |
599 | 586 | ||
600 | pr_debug("stash dest not found for %d on vcpu %d\n", | 587 | pr_debug("stash dest not found for %d on vcpu %d\n", |
601 | stash_dest_hint, vcpu); | 588 | stash_dest_hint, vcpu); |
602 | return ~(u32)0; | 589 | return ~(u32)0; |
603 | } | 590 | } |
604 | 591 | ||
@@ -612,7 +599,7 @@ found_cpu_node: | |||
612 | * Memory accesses to QMAN and BMAN private memory need not be coherent, so | 599 | * Memory accesses to QMAN and BMAN private memory need not be coherent, so |
613 | * clear the PAACE entry coherency attribute for them. | 600 | * clear the PAACE entry coherency attribute for them. |
614 | */ | 601 | */ |
615 | static void setup_qbman_paace(struct paace *ppaace, int paace_type) | 602 | static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) |
616 | { | 603 | { |
617 | switch (paace_type) { | 604 | switch (paace_type) { |
618 | case QMAN_PAACE: | 605 | case QMAN_PAACE: |
@@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type) | |||
626 | case QMAN_PORTAL_PAACE: | 613 | case QMAN_PORTAL_PAACE: |
627 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); | 614 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); |
628 | ppaace->op_encode.index_ot.omi = OMI_QMAN; | 615 | ppaace->op_encode.index_ot.omi = OMI_QMAN; |
629 | /*Set DQRR and Frame stashing for the L3 cache */ | 616 | /* Set DQRR and Frame stashing for the L3 cache */ |
630 | set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); | 617 | set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); |
631 | break; | 618 | break; |
632 | case BMAN_PAACE: | 619 | case BMAN_PAACE: |
@@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt) | |||
679 | * Get the maximum number of PAACT table entries | 666 | * Get the maximum number of PAACT table entries |
680 | * and subwindows supported by PAMU | 667 | * and subwindows supported by PAMU |
681 | */ | 668 | */ |
682 | static void get_pamu_cap_values(unsigned long pamu_reg_base) | 669 | static void __init get_pamu_cap_values(unsigned long pamu_reg_base) |
683 | { | 670 | { |
684 | u32 pc_val; | 671 | u32 pc_val; |
685 | 672 | ||
@@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base) | |||
689 | } | 676 | } |
690 | 677 | ||
691 | /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ | 678 | /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ |
692 | int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, | 679 | static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, |
693 | phys_addr_t ppaact_phys, phys_addr_t spaact_phys, | 680 | phys_addr_t ppaact_phys, phys_addr_t spaact_phys, |
694 | phys_addr_t omt_phys) | 681 | phys_addr_t omt_phys) |
695 | { | 682 | { |
696 | u32 *pc; | 683 | u32 *pc; |
697 | struct pamu_mmap_regs *pamu_regs; | 684 | struct pamu_mmap_regs *pamu_regs; |
@@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, | |||
727 | */ | 714 | */ |
728 | 715 | ||
729 | out_be32((u32 *)(pamu_reg_base + PAMU_PICS), | 716 | out_be32((u32 *)(pamu_reg_base + PAMU_PICS), |
730 | PAMU_ACCESS_VIOLATION_ENABLE); | 717 | PAMU_ACCESS_VIOLATION_ENABLE); |
731 | out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); | 718 | out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); |
732 | return 0; | 719 | return 0; |
733 | } | 720 | } |
@@ -757,9 +744,9 @@ static void __init setup_liodns(void) | |||
757 | ppaace->wbah = 0; | 744 | ppaace->wbah = 0; |
758 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); | 745 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); |
759 | set_bf(ppaace->impl_attr, PAACE_IA_ATM, | 746 | set_bf(ppaace->impl_attr, PAACE_IA_ATM, |
760 | PAACE_ATM_NO_XLATE); | 747 | PAACE_ATM_NO_XLATE); |
761 | set_bf(ppaace->addr_bitfields, PAACE_AF_AP, | 748 | set_bf(ppaace->addr_bitfields, PAACE_AF_AP, |
762 | PAACE_AP_PERMS_ALL); | 749 | PAACE_AP_PERMS_ALL); |
763 | if (of_device_is_compatible(node, "fsl,qman-portal")) | 750 | if (of_device_is_compatible(node, "fsl,qman-portal")) |
764 | setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); | 751 | setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); |
765 | if (of_device_is_compatible(node, "fsl,qman")) | 752 | if (of_device_is_compatible(node, "fsl,qman")) |
@@ -772,7 +759,7 @@ static void __init setup_liodns(void) | |||
772 | } | 759 | } |
773 | } | 760 | } |
774 | 761 | ||
775 | irqreturn_t pamu_av_isr(int irq, void *arg) | 762 | static irqreturn_t pamu_av_isr(int irq, void *arg) |
776 | { | 763 | { |
777 | struct pamu_isr_data *data = arg; | 764 | struct pamu_isr_data *data = arg; |
778 | phys_addr_t phys; | 765 | phys_addr_t phys; |
@@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg) | |||
792 | pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2)); | 779 | pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2)); |
793 | pr_emerg("AVS1=%08x\n", avs1); | 780 | pr_emerg("AVS1=%08x\n", avs1); |
794 | pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2)); | 781 | pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2)); |
795 | pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH), | 782 | pr_emerg("AVA=%016llx\n", |
796 | in_be32(p + PAMU_AVAL))); | 783 | make64(in_be32(p + PAMU_AVAH), |
784 | in_be32(p + PAMU_AVAL))); | ||
797 | pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD)); | 785 | pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD)); |
798 | pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH), | 786 | pr_emerg("POEA=%016llx\n", |
799 | in_be32(p + PAMU_POEAL))); | 787 | make64(in_be32(p + PAMU_POEAH), |
788 | in_be32(p + PAMU_POEAL))); | ||
800 | 789 | ||
801 | phys = make64(in_be32(p + PAMU_POEAH), | 790 | phys = make64(in_be32(p + PAMU_POEAH), |
802 | in_be32(p + PAMU_POEAL)); | 791 | in_be32(p + PAMU_POEAL)); |
803 | 792 | ||
804 | /* Assume that POEA points to a PAACE */ | 793 | /* Assume that POEA points to a PAACE */ |
805 | if (phys) { | 794 | if (phys) { |
@@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg) | |||
807 | 796 | ||
808 | /* Only the first four words are relevant */ | 797 | /* Only the first four words are relevant */ |
809 | for (j = 0; j < 4; j++) | 798 | for (j = 0; j < 4; j++) |
810 | pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j)); | 799 | pr_emerg("PAACE[%u]=%08x\n", |
800 | j, in_be32(paace + j)); | ||
811 | } | 801 | } |
812 | 802 | ||
813 | /* clear access violation condition */ | 803 | /* clear access violation condition */ |
814 | out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK); | 804 | out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK); |
815 | paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT); | 805 | paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT); |
816 | BUG_ON(!paace); | 806 | BUG_ON(!paace); |
817 | /* check if we got a violation for a disabled LIODN */ | 807 | /* check if we got a violation for a disabled LIODN */ |
@@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg) | |||
827 | /* Disable the LIODN */ | 817 | /* Disable the LIODN */ |
828 | ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT); | 818 | ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT); |
829 | BUG_ON(ret); | 819 | BUG_ON(ret); |
830 | pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT); | 820 | pr_emerg("Disabling liodn %x\n", |
821 | avs1 >> PAMU_AVS1_LIODN_SHIFT); | ||
831 | } | 822 | } |
832 | out_be32((p + PAMU_PICS), pics); | 823 | out_be32((p + PAMU_PICS), pics); |
833 | } | 824 | } |
834 | } | 825 | } |
835 | 826 | ||
836 | |||
837 | return IRQ_HANDLED; | 827 | return IRQ_HANDLED; |
838 | } | 828 | } |
839 | 829 | ||
@@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) | |||
952 | } | 942 | } |
953 | 943 | ||
954 | if (i == 0 || i == num_laws) { | 944 | if (i == 0 || i == num_laws) { |
955 | /* This should never happen*/ | 945 | /* This should never happen */ |
956 | ret = -ENOENT; | 946 | ret = -ENOENT; |
957 | goto error; | 947 | goto error; |
958 | } | 948 | } |
@@ -998,26 +988,27 @@ error: | |||
998 | static const struct { | 988 | static const struct { |
999 | u32 svr; | 989 | u32 svr; |
1000 | u32 port_id; | 990 | u32 port_id; |
1001 | } port_id_map[] = { | 991 | } port_id_map[] __initconst = { |
1002 | {0x82100010, 0xFF000000}, /* P2040 1.0 */ | 992 | {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ |
1003 | {0x82100011, 0xFF000000}, /* P2040 1.1 */ | 993 | {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ |
1004 | {0x82100110, 0xFF000000}, /* P2041 1.0 */ | 994 | {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ |
1005 | {0x82100111, 0xFF000000}, /* P2041 1.1 */ | 995 | {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */ |
1006 | {0x82110310, 0xFF000000}, /* P3041 1.0 */ | 996 | {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */ |
1007 | {0x82110311, 0xFF000000}, /* P3041 1.1 */ | 997 | {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */ |
1008 | {0x82010020, 0xFFF80000}, /* P4040 2.0 */ | 998 | {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */ |
1009 | {0x82000020, 0xFFF80000}, /* P4080 2.0 */ | 999 | {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */ |
1010 | {0x82210010, 0xFC000000}, /* P5010 1.0 */ | 1000 | {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */ |
1011 | {0x82210020, 0xFC000000}, /* P5010 2.0 */ | 1001 | {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */ |
1012 | {0x82200010, 0xFC000000}, /* P5020 1.0 */ | 1002 | {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */ |
1013 | {0x82050010, 0xFF800000}, /* P5021 1.0 */ | 1003 | {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */ |
1014 | {0x82040010, 0xFF800000}, /* P5040 1.0 */ | 1004 | {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */ |
1015 | }; | 1005 | }; |
1016 | 1006 | ||
1017 | #define SVR_SECURITY 0x80000 /* The Security (E) bit */ | 1007 | #define SVR_SECURITY 0x80000 /* The Security (E) bit */ |
1018 | 1008 | ||
1019 | static int __init fsl_pamu_probe(struct platform_device *pdev) | 1009 | static int __init fsl_pamu_probe(struct platform_device *pdev) |
1020 | { | 1010 | { |
1011 | struct device *dev = &pdev->dev; | ||
1021 | void __iomem *pamu_regs = NULL; | 1012 | void __iomem *pamu_regs = NULL; |
1022 | struct ccsr_guts __iomem *guts_regs = NULL; | 1013 | struct ccsr_guts __iomem *guts_regs = NULL; |
1023 | u32 pamubypenr, pamu_counter; | 1014 | u32 pamubypenr, pamu_counter; |
@@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1042 | * NOTE : All PAMUs share the same LIODN tables. | 1033 | * NOTE : All PAMUs share the same LIODN tables. |
1043 | */ | 1034 | */ |
1044 | 1035 | ||
1045 | pamu_regs = of_iomap(pdev->dev.of_node, 0); | 1036 | pamu_regs = of_iomap(dev->of_node, 0); |
1046 | if (!pamu_regs) { | 1037 | if (!pamu_regs) { |
1047 | dev_err(&pdev->dev, "ioremap of PAMU node failed\n"); | 1038 | dev_err(dev, "ioremap of PAMU node failed\n"); |
1048 | return -ENOMEM; | 1039 | return -ENOMEM; |
1049 | } | 1040 | } |
1050 | of_get_address(pdev->dev.of_node, 0, &size, NULL); | 1041 | of_get_address(dev->of_node, 0, &size, NULL); |
1051 | 1042 | ||
1052 | irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | 1043 | irq = irq_of_parse_and_map(dev->of_node, 0); |
1053 | if (irq == NO_IRQ) { | 1044 | if (irq == NO_IRQ) { |
1054 | dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n"); | 1045 | dev_warn(dev, "no interrupts listed in PAMU node\n"); |
1055 | goto error; | 1046 | goto error; |
1056 | } | 1047 | } |
1057 | 1048 | ||
1058 | data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL); | 1049 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1059 | if (!data) { | 1050 | if (!data) { |
1060 | dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n"); | ||
1061 | ret = -ENOMEM; | 1051 | ret = -ENOMEM; |
1062 | goto error; | 1052 | goto error; |
1063 | } | 1053 | } |
@@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1067 | /* The ISR needs access to the regs, so we won't iounmap them */ | 1057 | /* The ISR needs access to the regs, so we won't iounmap them */ |
1068 | ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); | 1058 | ret = request_irq(irq, pamu_av_isr, 0, "pamu", data); |
1069 | if (ret < 0) { | 1059 | if (ret < 0) { |
1070 | dev_err(&pdev->dev, "error %i installing ISR for irq %i\n", | 1060 | dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq); |
1071 | ret, irq); | ||
1072 | goto error; | 1061 | goto error; |
1073 | } | 1062 | } |
1074 | 1063 | ||
1075 | guts_node = of_find_matching_node(NULL, guts_device_ids); | 1064 | guts_node = of_find_matching_node(NULL, guts_device_ids); |
1076 | if (!guts_node) { | 1065 | if (!guts_node) { |
1077 | dev_err(&pdev->dev, "could not find GUTS node %s\n", | 1066 | dev_err(dev, "could not find GUTS node %s\n", |
1078 | pdev->dev.of_node->full_name); | 1067 | dev->of_node->full_name); |
1079 | ret = -ENODEV; | 1068 | ret = -ENODEV; |
1080 | goto error; | 1069 | goto error; |
1081 | } | 1070 | } |
@@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1083 | guts_regs = of_iomap(guts_node, 0); | 1072 | guts_regs = of_iomap(guts_node, 0); |
1084 | of_node_put(guts_node); | 1073 | of_node_put(guts_node); |
1085 | if (!guts_regs) { | 1074 | if (!guts_regs) { |
1086 | dev_err(&pdev->dev, "ioremap of GUTS node failed\n"); | 1075 | dev_err(dev, "ioremap of GUTS node failed\n"); |
1087 | ret = -ENODEV; | 1076 | ret = -ENODEV; |
1088 | goto error; | 1077 | goto error; |
1089 | } | 1078 | } |
@@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1103 | 1092 | ||
1104 | p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | 1093 | p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
1105 | if (!p) { | 1094 | if (!p) { |
1106 | dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n"); | 1095 | dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n"); |
1107 | ret = -ENOMEM; | 1096 | ret = -ENOMEM; |
1108 | goto error; | 1097 | goto error; |
1109 | } | 1098 | } |
@@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1113 | 1102 | ||
1114 | /* Make sure the memory is naturally aligned */ | 1103 | /* Make sure the memory is naturally aligned */ |
1115 | if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { | 1104 | if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { |
1116 | dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n"); | 1105 | dev_err(dev, "PAACT/OMT block is unaligned\n"); |
1117 | ret = -ENOMEM; | 1106 | ret = -ENOMEM; |
1118 | goto error; | 1107 | goto error; |
1119 | } | 1108 | } |
@@ -1121,8 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1121 | spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); | 1110 | spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); |
1122 | omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); | 1111 | omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); |
1123 | 1112 | ||
1124 | dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact, | 1113 | dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys); |
1125 | (unsigned long long) ppaact_phys); | ||
1126 | 1114 | ||
1127 | /* Check to see if we need to implement the work-around on this SOC */ | 1115 | /* Check to see if we need to implement the work-around on this SOC */ |
1128 | 1116 | ||
@@ -1130,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1130 | for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { | 1118 | for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { |
1131 | if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { | 1119 | if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { |
1132 | csd_port_id = port_id_map[i].port_id; | 1120 | csd_port_id = port_id_map[i].port_id; |
1133 | dev_dbg(&pdev->dev, "found matching SVR %08x\n", | 1121 | dev_dbg(dev, "found matching SVR %08x\n", |
1134 | port_id_map[i].svr); | 1122 | port_id_map[i].svr); |
1135 | break; | 1123 | break; |
1136 | } | 1124 | } |
1137 | } | 1125 | } |
1138 | 1126 | ||
1139 | if (csd_port_id) { | 1127 | if (csd_port_id) { |
1140 | dev_dbg(&pdev->dev, "creating coherency subdomain at address " | 1128 | dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x", |
1141 | "0x%llx, size %zu, port id 0x%08x", ppaact_phys, | 1129 | &ppaact_phys, mem_size, csd_port_id); |
1142 | mem_size, csd_port_id); | ||
1143 | 1130 | ||
1144 | ret = create_csd(ppaact_phys, mem_size, csd_port_id); | 1131 | ret = create_csd(ppaact_phys, mem_size, csd_port_id); |
1145 | if (ret) { | 1132 | if (ret) { |
1146 | dev_err(&pdev->dev, "could not create coherence " | 1133 | dev_err(dev, "could not create coherence subdomain\n"); |
1147 | "subdomain\n"); | ||
1148 | return ret; | 1134 | return ret; |
1149 | } | 1135 | } |
1150 | } | 1136 | } |
@@ -1155,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1155 | spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); | 1141 | spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1); |
1156 | if (!spaace_pool) { | 1142 | if (!spaace_pool) { |
1157 | ret = -ENOMEM; | 1143 | ret = -ENOMEM; |
1158 | dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n"); | 1144 | dev_err(dev, "Failed to allocate spaace gen pool\n"); |
1159 | goto error; | 1145 | goto error; |
1160 | } | 1146 | } |
1161 | 1147 | ||
@@ -1168,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1168 | for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; | 1154 | for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; |
1169 | pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { | 1155 | pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { |
1170 | 1156 | ||
1171 | pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off; | 1157 | pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off; |
1172 | setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, | 1158 | setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys, |
1173 | spaact_phys, omt_phys); | 1159 | spaact_phys, omt_phys); |
1174 | /* Disable PAMU bypass for this PAMU */ | 1160 | /* Disable PAMU bypass for this PAMU */ |
1175 | pamubypenr &= ~pamu_counter; | 1161 | pamubypenr &= ~pamu_counter; |
1176 | } | 1162 | } |
@@ -1182,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) | |||
1182 | 1168 | ||
1183 | iounmap(guts_regs); | 1169 | iounmap(guts_regs); |
1184 | 1170 | ||
1185 | /* Enable DMA for the LIODNs in the device tree*/ | 1171 | /* Enable DMA for the LIODNs in the device tree */ |
1186 | 1172 | ||
1187 | setup_liodns(); | 1173 | setup_liodns(); |
1188 | 1174 | ||
@@ -1214,17 +1200,7 @@ error: | |||
1214 | return ret; | 1200 | return ret; |
1215 | } | 1201 | } |
1216 | 1202 | ||
1217 | static const struct of_device_id fsl_of_pamu_ids[] = { | 1203 | static struct platform_driver fsl_of_pamu_driver __initdata = { |
1218 | { | ||
1219 | .compatible = "fsl,p4080-pamu", | ||
1220 | }, | ||
1221 | { | ||
1222 | .compatible = "fsl,pamu", | ||
1223 | }, | ||
1224 | {}, | ||
1225 | }; | ||
1226 | |||
1227 | static struct platform_driver fsl_of_pamu_driver = { | ||
1228 | .driver = { | 1204 | .driver = { |
1229 | .name = "fsl-of-pamu", | 1205 | .name = "fsl-of-pamu", |
1230 | }, | 1206 | }, |
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index 8fc1a125b16e..aab723f91f12 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h | |||
@@ -19,13 +19,15 @@ | |||
19 | #ifndef __FSL_PAMU_H | 19 | #ifndef __FSL_PAMU_H |
20 | #define __FSL_PAMU_H | 20 | #define __FSL_PAMU_H |
21 | 21 | ||
22 | #include <linux/iommu.h> | ||
23 | |||
22 | #include <asm/fsl_pamu_stash.h> | 24 | #include <asm/fsl_pamu_stash.h> |
23 | 25 | ||
24 | /* Bit Field macros | 26 | /* Bit Field macros |
25 | * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load | 27 | * v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load |
26 | */ | 28 | */ |
27 | #define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m))) | 29 | #define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m))) |
28 | #define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT)) | 30 | #define get_bf(v, m) (((v) & (m)) >> m##_SHIFT) |
29 | 31 | ||
30 | /* PAMU CCSR space */ | 32 | /* PAMU CCSR space */ |
31 | #define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */ | 33 | #define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */ |
@@ -65,7 +67,7 @@ struct pamu_mmap_regs { | |||
65 | #define PAMU_AVS1_GCV 0x2000 | 67 | #define PAMU_AVS1_GCV 0x2000 |
66 | #define PAMU_AVS1_PDV 0x4000 | 68 | #define PAMU_AVS1_PDV 0x4000 |
67 | #define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \ | 69 | #define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \ |
68 | | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV) | 70 | | PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV) |
69 | #define PAMU_AVS1_LIODN_SHIFT 16 | 71 | #define PAMU_AVS1_LIODN_SHIFT 16 |
70 | #define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400 | 72 | #define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400 |
71 | 73 | ||
@@ -198,8 +200,7 @@ struct pamu_mmap_regs { | |||
198 | #define PAACE_ATM_NO_XLATE 0x00 | 200 | #define PAACE_ATM_NO_XLATE 0x00 |
199 | #define PAACE_ATM_WINDOW_XLATE 0x01 | 201 | #define PAACE_ATM_WINDOW_XLATE 0x01 |
200 | #define PAACE_ATM_PAGE_XLATE 0x02 | 202 | #define PAACE_ATM_PAGE_XLATE 0x02 |
201 | #define PAACE_ATM_WIN_PG_XLATE \ | 203 | #define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE) |
202 | (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE) | ||
203 | #define PAACE_OTM_NO_XLATE 0x00 | 204 | #define PAACE_OTM_NO_XLATE 0x00 |
204 | #define PAACE_OTM_IMMEDIATE 0x01 | 205 | #define PAACE_OTM_IMMEDIATE 0x01 |
205 | #define PAACE_OTM_INDEXED 0x02 | 206 | #define PAACE_OTM_INDEXED 0x02 |
@@ -219,7 +220,7 @@ struct pamu_mmap_regs { | |||
219 | #define PAACE_TCEF_FORMAT0_8B 0x00 | 220 | #define PAACE_TCEF_FORMAT0_8B 0x00 |
220 | #define PAACE_TCEF_FORMAT1_RSVD 0x01 | 221 | #define PAACE_TCEF_FORMAT1_RSVD 0x01 |
221 | /* | 222 | /* |
222 | * Hard coded value for the PAACT size to accomodate | 223 | * Hard coded value for the PAACT size to accommodate |
223 | * maximum LIODN value generated by u-boot. | 224 | * maximum LIODN value generated by u-boot. |
224 | */ | 225 | */ |
225 | #define PAACE_NUMBER_ENTRIES 0x500 | 226 | #define PAACE_NUMBER_ENTRIES 0x500 |
@@ -332,7 +333,7 @@ struct paace { | |||
332 | #define NUM_MOE 128 | 333 | #define NUM_MOE 128 |
333 | struct ome { | 334 | struct ome { |
334 | u8 moe[NUM_MOE]; | 335 | u8 moe[NUM_MOE]; |
335 | } __attribute__((packed)); | 336 | } __packed; |
336 | 337 | ||
337 | #define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES) | 338 | #define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES) |
338 | #define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES) | 339 | #define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES) |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index c828f80d48b0..ceebd287b660 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
@@ -19,26 +19,10 @@ | |||
19 | 19 | ||
20 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ | 20 | #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__ |
21 | 21 | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/iommu.h> | ||
24 | #include <linux/notifier.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/device.h> | ||
31 | #include <linux/of_platform.h> | ||
32 | #include <linux/bootmem.h> | ||
33 | #include <linux/err.h> | ||
34 | #include <asm/io.h> | ||
35 | #include <asm/bitops.h> | ||
36 | |||
37 | #include <asm/pci-bridge.h> | ||
38 | #include <sysdev/fsl_pci.h> | ||
39 | |||
40 | #include "fsl_pamu_domain.h" | 22 | #include "fsl_pamu_domain.h" |
41 | 23 | ||
24 | #include <sysdev/fsl_pci.h> | ||
25 | |||
42 | /* | 26 | /* |
43 | * Global spinlock that needs to be held while | 27 | * Global spinlock that needs to be held while |
44 | * configuring PAMU. | 28 | * configuring PAMU. |
@@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock); | |||
51 | 35 | ||
52 | static int __init iommu_init_mempool(void) | 36 | static int __init iommu_init_mempool(void) |
53 | { | 37 | { |
54 | |||
55 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", | 38 | fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", |
56 | sizeof(struct fsl_dma_domain), | 39 | sizeof(struct fsl_dma_domain), |
57 | 0, | 40 | 0, |
58 | SLAB_HWCACHE_ALIGN, | 41 | SLAB_HWCACHE_ALIGN, |
59 | 42 | NULL); | |
60 | NULL); | ||
61 | if (!fsl_pamu_domain_cache) { | 43 | if (!fsl_pamu_domain_cache) { |
62 | pr_debug("Couldn't create fsl iommu_domain cache\n"); | 44 | pr_debug("Couldn't create fsl iommu_domain cache\n"); |
63 | return -ENOMEM; | 45 | return -ENOMEM; |
64 | } | 46 | } |
65 | 47 | ||
66 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", | 48 | iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", |
67 | sizeof(struct device_domain_info), | 49 | sizeof(struct device_domain_info), |
68 | 0, | 50 | 0, |
69 | SLAB_HWCACHE_ALIGN, | 51 | SLAB_HWCACHE_ALIGN, |
70 | NULL); | 52 | NULL); |
71 | if (!iommu_devinfo_cache) { | 53 | if (!iommu_devinfo_cache) { |
72 | pr_debug("Couldn't create devinfo cache\n"); | 54 | pr_debug("Couldn't create devinfo cache\n"); |
73 | kmem_cache_destroy(fsl_pamu_domain_cache); | 55 | kmem_cache_destroy(fsl_pamu_domain_cache); |
@@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void) | |||
80 | static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) | 62 | static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) |
81 | { | 63 | { |
82 | u32 win_cnt = dma_domain->win_cnt; | 64 | u32 win_cnt = dma_domain->win_cnt; |
83 | struct dma_window *win_ptr = | 65 | struct dma_window *win_ptr = &dma_domain->win_arr[0]; |
84 | &dma_domain->win_arr[0]; | ||
85 | struct iommu_domain_geometry *geom; | 66 | struct iommu_domain_geometry *geom; |
86 | 67 | ||
87 | geom = &dma_domain->iommu_domain->geometry; | 68 | geom = &dma_domain->iommu_domain->geometry; |
@@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i | |||
103 | } | 84 | } |
104 | 85 | ||
105 | if (win_ptr->valid) | 86 | if (win_ptr->valid) |
106 | return (win_ptr->paddr + (iova & (win_ptr->size - 1))); | 87 | return win_ptr->paddr + (iova & (win_ptr->size - 1)); |
107 | 88 | ||
108 | return 0; | 89 | return 0; |
109 | } | 90 | } |
110 | 91 | ||
111 | static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) | 92 | static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) |
112 | { | 93 | { |
113 | struct dma_window *sub_win_ptr = | 94 | struct dma_window *sub_win_ptr = &dma_domain->win_arr[0]; |
114 | &dma_domain->win_arr[0]; | ||
115 | int i, ret; | 95 | int i, ret; |
116 | unsigned long rpn, flags; | 96 | unsigned long rpn, flags; |
117 | 97 | ||
118 | for (i = 0; i < dma_domain->win_cnt; i++) { | 98 | for (i = 0; i < dma_domain->win_cnt; i++) { |
119 | if (sub_win_ptr[i].valid) { | 99 | if (sub_win_ptr[i].valid) { |
120 | rpn = sub_win_ptr[i].paddr >> | 100 | rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT; |
121 | PAMU_PAGE_SHIFT; | ||
122 | spin_lock_irqsave(&iommu_lock, flags); | 101 | spin_lock_irqsave(&iommu_lock, flags); |
123 | ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, | 102 | ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i, |
124 | sub_win_ptr[i].size, | 103 | sub_win_ptr[i].size, |
@@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain) | |||
130 | sub_win_ptr[i].prot); | 109 | sub_win_ptr[i].prot); |
131 | spin_unlock_irqrestore(&iommu_lock, flags); | 110 | spin_unlock_irqrestore(&iommu_lock, flags); |
132 | if (ret) { | 111 | if (ret) { |
133 | pr_debug("PAMU SPAACE configuration failed for liodn %d\n", | 112 | pr_debug("SPAACE configuration failed for liodn %d\n", |
134 | liodn); | 113 | liodn); |
135 | return ret; | 114 | return ret; |
136 | } | 115 | } |
@@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) | |||
156 | 0, wnd->prot); | 135 | 0, wnd->prot); |
157 | spin_unlock_irqrestore(&iommu_lock, flags); | 136 | spin_unlock_irqrestore(&iommu_lock, flags); |
158 | if (ret) | 137 | if (ret) |
159 | pr_debug("PAMU PAACE configuration failed for liodn %d\n", | 138 | pr_debug("PAACE configuration failed for liodn %d\n", liodn); |
160 | liodn); | ||
161 | 139 | ||
162 | return ret; | 140 | return ret; |
163 | } | 141 | } |
@@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain) | |||
169 | return map_subwins(liodn, dma_domain); | 147 | return map_subwins(liodn, dma_domain); |
170 | else | 148 | else |
171 | return map_win(liodn, dma_domain); | 149 | return map_win(liodn, dma_domain); |
172 | |||
173 | } | 150 | } |
174 | 151 | ||
175 | /* Update window/subwindow mapping for the LIODN */ | 152 | /* Update window/subwindow mapping for the LIODN */ |
@@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
190 | (wnd_nr > 0) ? 1 : 0, | 167 | (wnd_nr > 0) ? 1 : 0, |
191 | wnd->prot); | 168 | wnd->prot); |
192 | if (ret) | 169 | if (ret) |
193 | pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn); | 170 | pr_debug("Subwindow reconfiguration failed for liodn %d\n", |
171 | liodn); | ||
194 | } else { | 172 | } else { |
195 | phys_addr_t wnd_addr; | 173 | phys_addr_t wnd_addr; |
196 | 174 | ||
@@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
200 | wnd->size, | 178 | wnd->size, |
201 | ~(u32)0, | 179 | ~(u32)0, |
202 | wnd->paddr >> PAMU_PAGE_SHIFT, | 180 | wnd->paddr >> PAMU_PAGE_SHIFT, |
203 | dma_domain->snoop_id, dma_domain->stash_id, | 181 | dma_domain->snoop_id, dma_domain->stash_id, |
204 | 0, wnd->prot); | 182 | 0, wnd->prot); |
205 | if (ret) | 183 | if (ret) |
206 | pr_debug("Window reconfiguration failed for liodn %d\n", liodn); | 184 | pr_debug("Window reconfiguration failed for liodn %d\n", |
185 | liodn); | ||
207 | } | 186 | } |
208 | 187 | ||
209 | spin_unlock_irqrestore(&iommu_lock, flags); | 188 | spin_unlock_irqrestore(&iommu_lock, flags); |
@@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr | |||
212 | } | 191 | } |
213 | 192 | ||
214 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | 193 | static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, |
215 | u32 val) | 194 | u32 val) |
216 | { | 195 | { |
217 | int ret = 0, i; | 196 | int ret = 0, i; |
218 | unsigned long flags; | 197 | unsigned long flags; |
219 | 198 | ||
220 | spin_lock_irqsave(&iommu_lock, flags); | 199 | spin_lock_irqsave(&iommu_lock, flags); |
221 | if (!dma_domain->win_arr) { | 200 | if (!dma_domain->win_arr) { |
222 | pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn); | 201 | pr_debug("Windows not configured, stash destination update failed for liodn %d\n", |
202 | liodn); | ||
223 | spin_unlock_irqrestore(&iommu_lock, flags); | 203 | spin_unlock_irqrestore(&iommu_lock, flags); |
224 | return -EINVAL; | 204 | return -EINVAL; |
225 | } | 205 | } |
@@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | |||
227 | for (i = 0; i < dma_domain->win_cnt; i++) { | 207 | for (i = 0; i < dma_domain->win_cnt; i++) { |
228 | ret = pamu_update_paace_stash(liodn, i, val); | 208 | ret = pamu_update_paace_stash(liodn, i, val); |
229 | if (ret) { | 209 | if (ret) { |
230 | pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn); | 210 | pr_debug("Failed to update SPAACE %d field for liodn %d\n ", |
211 | i, liodn); | ||
231 | spin_unlock_irqrestore(&iommu_lock, flags); | 212 | spin_unlock_irqrestore(&iommu_lock, flags); |
232 | return ret; | 213 | return ret; |
233 | } | 214 | } |
@@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, | |||
240 | 221 | ||
241 | /* Set the geometry parameters for a LIODN */ | 222 | /* Set the geometry parameters for a LIODN */ |
242 | static int pamu_set_liodn(int liodn, struct device *dev, | 223 | static int pamu_set_liodn(int liodn, struct device *dev, |
243 | struct fsl_dma_domain *dma_domain, | 224 | struct fsl_dma_domain *dma_domain, |
244 | struct iommu_domain_geometry *geom_attr, | 225 | struct iommu_domain_geometry *geom_attr, |
245 | u32 win_cnt) | 226 | u32 win_cnt) |
246 | { | 227 | { |
247 | phys_addr_t window_addr, window_size; | 228 | phys_addr_t window_addr, window_size; |
248 | phys_addr_t subwin_size; | 229 | phys_addr_t subwin_size; |
@@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev, | |||
268 | dma_domain->stash_id, win_cnt, 0); | 249 | dma_domain->stash_id, win_cnt, 0); |
269 | spin_unlock_irqrestore(&iommu_lock, flags); | 250 | spin_unlock_irqrestore(&iommu_lock, flags); |
270 | if (ret) { | 251 | if (ret) { |
271 | pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt); | 252 | pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n", |
253 | liodn, win_cnt); | ||
272 | return ret; | 254 | return ret; |
273 | } | 255 | } |
274 | 256 | ||
@@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev, | |||
285 | 0, 0); | 267 | 0, 0); |
286 | spin_unlock_irqrestore(&iommu_lock, flags); | 268 | spin_unlock_irqrestore(&iommu_lock, flags); |
287 | if (ret) { | 269 | if (ret) { |
288 | pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn); | 270 | pr_debug("SPAACE configuration failed for liodn %d\n", |
271 | liodn); | ||
289 | return ret; | 272 | return ret; |
290 | } | 273 | } |
291 | } | 274 | } |
@@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova) | |||
301 | * to PAMU page size. | 284 | * to PAMU page size. |
302 | */ | 285 | */ |
303 | if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { | 286 | if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { |
304 | pr_debug("%s: size too small or not a power of two\n", __func__); | 287 | pr_debug("Size too small or not a power of two\n"); |
305 | return -EINVAL; | 288 | return -EINVAL; |
306 | } | 289 | } |
307 | 290 | ||
308 | /* iova must be page size aligned*/ | 291 | /* iova must be page size aligned */ |
309 | if (iova & (size - 1)) { | 292 | if (iova & (size - 1)) { |
310 | pr_debug("%s: address is not aligned with window size\n", __func__); | 293 | pr_debug("Address is not aligned with window size\n"); |
311 | return -EINVAL; | 294 | return -EINVAL; |
312 | } | 295 | } |
313 | 296 | ||
@@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d | |||
396 | if (!dev->archdata.iommu_domain) | 379 | if (!dev->archdata.iommu_domain) |
397 | dev->archdata.iommu_domain = info; | 380 | dev->archdata.iommu_domain = info; |
398 | spin_unlock_irqrestore(&device_domain_lock, flags); | 381 | spin_unlock_irqrestore(&device_domain_lock, flags); |
399 | |||
400 | } | 382 | } |
401 | 383 | ||
402 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, | 384 | static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, |
403 | dma_addr_t iova) | 385 | dma_addr_t iova) |
404 | { | 386 | { |
405 | struct fsl_dma_domain *dma_domain = domain->priv; | 387 | struct fsl_dma_domain *dma_domain = domain->priv; |
406 | 388 | ||
407 | if ((iova < domain->geometry.aperture_start) || | 389 | if (iova < domain->geometry.aperture_start || |
408 | iova > (domain->geometry.aperture_end)) | 390 | iova > domain->geometry.aperture_end) |
409 | return 0; | 391 | return 0; |
410 | 392 | ||
411 | return get_phys_addr(dma_domain, iova); | 393 | return get_phys_addr(dma_domain, iova); |
@@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain, | |||
460 | 442 | ||
461 | list_for_each_entry(info, &dma_domain->devices, link) { | 443 | list_for_each_entry(info, &dma_domain->devices, link) { |
462 | ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, | 444 | ret = pamu_set_liodn(info->liodn, info->dev, dma_domain, |
463 | geom_attr, win_cnt); | 445 | geom_attr, win_cnt); |
464 | if (ret) | 446 | if (ret) |
465 | break; | 447 | break; |
466 | } | 448 | } |
@@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |||
543 | } | 525 | } |
544 | 526 | ||
545 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | 527 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
546 | |||
547 | } | 528 | } |
548 | 529 | ||
549 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | 530 | static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, |
@@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |||
576 | 557 | ||
577 | win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); | 558 | win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt); |
578 | if (size > win_size) { | 559 | if (size > win_size) { |
579 | pr_debug("Invalid window size \n"); | 560 | pr_debug("Invalid window size\n"); |
580 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | 561 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
581 | return -EINVAL; | 562 | return -EINVAL; |
582 | } | 563 | } |
@@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |||
622 | * and window mappings. | 603 | * and window mappings. |
623 | */ | 604 | */ |
624 | static int handle_attach_device(struct fsl_dma_domain *dma_domain, | 605 | static int handle_attach_device(struct fsl_dma_domain *dma_domain, |
625 | struct device *dev, const u32 *liodn, | 606 | struct device *dev, const u32 *liodn, |
626 | int num) | 607 | int num) |
627 | { | 608 | { |
628 | unsigned long flags; | 609 | unsigned long flags; |
629 | struct iommu_domain *domain = dma_domain->iommu_domain; | 610 | struct iommu_domain *domain = dma_domain->iommu_domain; |
@@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |||
632 | 613 | ||
633 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | 614 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
634 | for (i = 0; i < num; i++) { | 615 | for (i = 0; i < num; i++) { |
635 | |||
636 | /* Ensure that LIODN value is valid */ | 616 | /* Ensure that LIODN value is valid */ |
637 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { | 617 | if (liodn[i] >= PAACE_NUMBER_ENTRIES) { |
638 | pr_debug("Invalid liodn %d, attach device failed for %s\n", | 618 | pr_debug("Invalid liodn %d, attach device failed for %s\n", |
639 | liodn[i], dev->of_node->full_name); | 619 | liodn[i], dev->of_node->full_name); |
640 | ret = -EINVAL; | 620 | ret = -EINVAL; |
641 | break; | 621 | break; |
642 | } | 622 | } |
@@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, | |||
649 | */ | 629 | */ |
650 | if (dma_domain->win_arr) { | 630 | if (dma_domain->win_arr) { |
651 | u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; | 631 | u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0; |
632 | |||
652 | ret = pamu_set_liodn(liodn[i], dev, dma_domain, | 633 | ret = pamu_set_liodn(liodn[i], dev, dma_domain, |
653 | &domain->geometry, | 634 | &domain->geometry, win_cnt); |
654 | win_cnt); | ||
655 | if (ret) | 635 | if (ret) |
656 | break; | 636 | break; |
657 | if (dma_domain->mapped) { | 637 | if (dma_domain->mapped) { |
@@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, | |||
698 | liodn = of_get_property(dev->of_node, "fsl,liodn", &len); | 678 | liodn = of_get_property(dev->of_node, "fsl,liodn", &len); |
699 | if (liodn) { | 679 | if (liodn) { |
700 | liodn_cnt = len / sizeof(u32); | 680 | liodn_cnt = len / sizeof(u32); |
701 | ret = handle_attach_device(dma_domain, dev, | 681 | ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt); |
702 | liodn, liodn_cnt); | ||
703 | } else { | 682 | } else { |
704 | pr_debug("missing fsl,liodn property at %s\n", | 683 | pr_debug("missing fsl,liodn property at %s\n", |
705 | dev->of_node->full_name); | 684 | dev->of_node->full_name); |
706 | ret = -EINVAL; | 685 | ret = -EINVAL; |
707 | } | 686 | } |
708 | 687 | ||
709 | return ret; | 688 | return ret; |
710 | } | 689 | } |
711 | 690 | ||
712 | static void fsl_pamu_detach_device(struct iommu_domain *domain, | 691 | static void fsl_pamu_detach_device(struct iommu_domain *domain, |
713 | struct device *dev) | 692 | struct device *dev) |
714 | { | 693 | { |
715 | struct fsl_dma_domain *dma_domain = domain->priv; | 694 | struct fsl_dma_domain *dma_domain = domain->priv; |
716 | const u32 *prop; | 695 | const u32 *prop; |
@@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, | |||
738 | detach_device(dev, dma_domain); | 717 | detach_device(dev, dma_domain); |
739 | else | 718 | else |
740 | pr_debug("missing fsl,liodn property at %s\n", | 719 | pr_debug("missing fsl,liodn property at %s\n", |
741 | dev->of_node->full_name); | 720 | dev->of_node->full_name); |
742 | } | 721 | } |
743 | 722 | ||
744 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) | 723 | static int configure_domain_geometry(struct iommu_domain *domain, void *data) |
@@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data) | |||
754 | * DMA outside of the geometry. | 733 | * DMA outside of the geometry. |
755 | */ | 734 | */ |
756 | if (check_size(geom_size, geom_attr->aperture_start) || | 735 | if (check_size(geom_size, geom_attr->aperture_start) || |
757 | !geom_attr->force_aperture) { | 736 | !geom_attr->force_aperture) { |
758 | pr_debug("Invalid PAMU geometry attributes\n"); | 737 | pr_debug("Invalid PAMU geometry attributes\n"); |
759 | return -EINVAL; | 738 | return -EINVAL; |
760 | } | 739 | } |
761 | 740 | ||
762 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | 741 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
763 | if (dma_domain->enabled) { | 742 | if (dma_domain->enabled) { |
@@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) | |||
786 | spin_lock_irqsave(&dma_domain->domain_lock, flags); | 765 | spin_lock_irqsave(&dma_domain->domain_lock, flags); |
787 | 766 | ||
788 | memcpy(&dma_domain->dma_stash, stash_attr, | 767 | memcpy(&dma_domain->dma_stash, stash_attr, |
789 | sizeof(struct pamu_stash_attribute)); | 768 | sizeof(struct pamu_stash_attribute)); |
790 | 769 | ||
791 | dma_domain->stash_id = get_stash_id(stash_attr->cache, | 770 | dma_domain->stash_id = get_stash_id(stash_attr->cache, |
792 | stash_attr->cpu); | 771 | stash_attr->cpu); |
@@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data) | |||
803 | return ret; | 782 | return ret; |
804 | } | 783 | } |
805 | 784 | ||
806 | /* Configure domain dma state i.e. enable/disable DMA*/ | 785 | /* Configure domain dma state i.e. enable/disable DMA */ |
807 | static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) | 786 | static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable) |
808 | { | 787 | { |
809 | struct device_domain_info *info; | 788 | struct device_domain_info *info; |
@@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en | |||
819 | } | 798 | } |
820 | 799 | ||
821 | dma_domain->enabled = enable; | 800 | dma_domain->enabled = enable; |
822 | list_for_each_entry(info, &dma_domain->devices, | 801 | list_for_each_entry(info, &dma_domain->devices, link) { |
823 | link) { | ||
824 | ret = (enable) ? pamu_enable_liodn(info->liodn) : | 802 | ret = (enable) ? pamu_enable_liodn(info->liodn) : |
825 | pamu_disable_liodn(info->liodn); | 803 | pamu_disable_liodn(info->liodn); |
826 | if (ret) | 804 | if (ret) |
@@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en | |||
833 | } | 811 | } |
834 | 812 | ||
835 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, | 813 | static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, |
836 | enum iommu_attr attr_type, void *data) | 814 | enum iommu_attr attr_type, void *data) |
837 | { | 815 | { |
838 | struct fsl_dma_domain *dma_domain = domain->priv; | 816 | struct fsl_dma_domain *dma_domain = domain->priv; |
839 | int ret = 0; | 817 | int ret = 0; |
840 | 818 | ||
841 | |||
842 | switch (attr_type) { | 819 | switch (attr_type) { |
843 | case DOMAIN_ATTR_GEOMETRY: | 820 | case DOMAIN_ATTR_GEOMETRY: |
844 | ret = configure_domain_geometry(domain, data); | 821 | ret = configure_domain_geometry(domain, data); |
@@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, | |||
853 | pr_debug("Unsupported attribute type\n"); | 830 | pr_debug("Unsupported attribute type\n"); |
854 | ret = -EINVAL; | 831 | ret = -EINVAL; |
855 | break; | 832 | break; |
856 | }; | 833 | } |
857 | 834 | ||
858 | return ret; | 835 | return ret; |
859 | } | 836 | } |
860 | 837 | ||
861 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | 838 | static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, |
862 | enum iommu_attr attr_type, void *data) | 839 | enum iommu_attr attr_type, void *data) |
863 | { | 840 | { |
864 | struct fsl_dma_domain *dma_domain = domain->priv; | 841 | struct fsl_dma_domain *dma_domain = domain->priv; |
865 | int ret = 0; | 842 | int ret = 0; |
866 | 843 | ||
867 | |||
868 | switch (attr_type) { | 844 | switch (attr_type) { |
869 | case DOMAIN_ATTR_FSL_PAMU_STASH: | 845 | case DOMAIN_ATTR_FSL_PAMU_STASH: |
870 | memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash, | 846 | memcpy(data, &dma_domain->dma_stash, |
871 | sizeof(struct pamu_stash_attribute)); | 847 | sizeof(struct pamu_stash_attribute)); |
872 | break; | 848 | break; |
873 | case DOMAIN_ATTR_FSL_PAMU_ENABLE: | 849 | case DOMAIN_ATTR_FSL_PAMU_ENABLE: |
874 | *(int *)data = dma_domain->enabled; | 850 | *(int *)data = dma_domain->enabled; |
@@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, | |||
880 | pr_debug("Unsupported attribute type\n"); | 856 | pr_debug("Unsupported attribute type\n"); |
881 | ret = -EINVAL; | 857 | ret = -EINVAL; |
882 | break; | 858 | break; |
883 | }; | 859 | } |
884 | 860 | ||
885 | return ret; | 861 | return ret; |
886 | } | 862 | } |
@@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl) | |||
903 | /* Check the PCI controller version number by readding BRR1 register */ | 879 | /* Check the PCI controller version number by readding BRR1 register */ |
904 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); | 880 | version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2)); |
905 | version &= PCI_FSL_BRR1_VER; | 881 | version &= PCI_FSL_BRR1_VER; |
906 | /* If PCI controller version is >= 0x204 we can partition endpoints*/ | 882 | /* If PCI controller version is >= 0x204 we can partition endpoints */ |
907 | if (version >= 0x204) | 883 | return version >= 0x204; |
908 | return 1; | ||
909 | |||
910 | return 0; | ||
911 | } | 884 | } |
912 | 885 | ||
913 | /* Get iommu group information from peer devices or devices on the parent bus */ | 886 | /* Get iommu group information from peer devices or devices on the parent bus */ |
@@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev) | |||
968 | if (pci_ctl->parent->iommu_group) { | 941 | if (pci_ctl->parent->iommu_group) { |
969 | group = get_device_iommu_group(pci_ctl->parent); | 942 | group = get_device_iommu_group(pci_ctl->parent); |
970 | iommu_group_remove_device(pci_ctl->parent); | 943 | iommu_group_remove_device(pci_ctl->parent); |
971 | } else | 944 | } else { |
972 | group = get_shared_pci_device_group(pdev); | 945 | group = get_shared_pci_device_group(pdev); |
946 | } | ||
973 | } | 947 | } |
974 | 948 | ||
975 | if (!group) | 949 | if (!group) |
@@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) | |||
1055 | } | 1029 | } |
1056 | 1030 | ||
1057 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, | 1031 | ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, |
1058 | ((w_count > 1) ? w_count : 0)); | 1032 | w_count > 1 ? w_count : 0); |
1059 | if (!ret) { | 1033 | if (!ret) { |
1060 | kfree(dma_domain->win_arr); | 1034 | kfree(dma_domain->win_arr); |
1061 | dma_domain->win_arr = kzalloc(sizeof(struct dma_window) * | 1035 | dma_domain->win_arr = kcalloc(w_count, |
1062 | w_count, GFP_ATOMIC); | 1036 | sizeof(*dma_domain->win_arr), |
1037 | GFP_ATOMIC); | ||
1063 | if (!dma_domain->win_arr) { | 1038 | if (!dma_domain->win_arr) { |
1064 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); | 1039 | spin_unlock_irqrestore(&dma_domain->domain_lock, flags); |
1065 | return -ENOMEM; | 1040 | return -ENOMEM; |
@@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = { | |||
1095 | .remove_device = fsl_pamu_remove_device, | 1070 | .remove_device = fsl_pamu_remove_device, |
1096 | }; | 1071 | }; |
1097 | 1072 | ||
1098 | int pamu_domain_init(void) | 1073 | int __init pamu_domain_init(void) |
1099 | { | 1074 | { |
1100 | int ret = 0; | 1075 | int ret = 0; |
1101 | 1076 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 40dfbc0444c0..ae4c1a854e57 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -71,6 +71,9 @@ | |||
71 | __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) | 71 | __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) |
72 | #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) | 72 | #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) |
73 | 73 | ||
74 | /* IO virtual address start page frame number */ | ||
75 | #define IOVA_START_PFN (1) | ||
76 | |||
74 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) | 77 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
75 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) | 78 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
76 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) | 79 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
@@ -485,7 +488,6 @@ __setup("intel_iommu=", intel_iommu_setup); | |||
485 | 488 | ||
486 | static struct kmem_cache *iommu_domain_cache; | 489 | static struct kmem_cache *iommu_domain_cache; |
487 | static struct kmem_cache *iommu_devinfo_cache; | 490 | static struct kmem_cache *iommu_devinfo_cache; |
488 | static struct kmem_cache *iommu_iova_cache; | ||
489 | 491 | ||
490 | static inline void *alloc_pgtable_page(int node) | 492 | static inline void *alloc_pgtable_page(int node) |
491 | { | 493 | { |
@@ -523,16 +525,6 @@ static inline void free_devinfo_mem(void *vaddr) | |||
523 | kmem_cache_free(iommu_devinfo_cache, vaddr); | 525 | kmem_cache_free(iommu_devinfo_cache, vaddr); |
524 | } | 526 | } |
525 | 527 | ||
526 | struct iova *alloc_iova_mem(void) | ||
527 | { | ||
528 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
529 | } | ||
530 | |||
531 | void free_iova_mem(struct iova *iova) | ||
532 | { | ||
533 | kmem_cache_free(iommu_iova_cache, iova); | ||
534 | } | ||
535 | |||
536 | static inline int domain_type_is_vm(struct dmar_domain *domain) | 528 | static inline int domain_type_is_vm(struct dmar_domain *domain) |
537 | { | 529 | { |
538 | return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; | 530 | return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; |
@@ -1643,7 +1635,8 @@ static int dmar_init_reserved_ranges(void) | |||
1643 | struct iova *iova; | 1635 | struct iova *iova; |
1644 | int i; | 1636 | int i; |
1645 | 1637 | ||
1646 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); | 1638 | init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN, |
1639 | DMA_32BIT_PFN); | ||
1647 | 1640 | ||
1648 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, | 1641 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, |
1649 | &reserved_rbtree_key); | 1642 | &reserved_rbtree_key); |
@@ -1701,7 +1694,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1701 | int adjust_width, agaw; | 1694 | int adjust_width, agaw; |
1702 | unsigned long sagaw; | 1695 | unsigned long sagaw; |
1703 | 1696 | ||
1704 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1697 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, |
1698 | DMA_32BIT_PFN); | ||
1705 | domain_reserve_special_ranges(domain); | 1699 | domain_reserve_special_ranges(domain); |
1706 | 1700 | ||
1707 | /* calculate AGAW */ | 1701 | /* calculate AGAW */ |
@@ -3427,23 +3421,6 @@ static inline int iommu_devinfo_cache_init(void) | |||
3427 | return ret; | 3421 | return ret; |
3428 | } | 3422 | } |
3429 | 3423 | ||
3430 | static inline int iommu_iova_cache_init(void) | ||
3431 | { | ||
3432 | int ret = 0; | ||
3433 | |||
3434 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
3435 | sizeof(struct iova), | ||
3436 | 0, | ||
3437 | SLAB_HWCACHE_ALIGN, | ||
3438 | NULL); | ||
3439 | if (!iommu_iova_cache) { | ||
3440 | printk(KERN_ERR "Couldn't create iova cache\n"); | ||
3441 | ret = -ENOMEM; | ||
3442 | } | ||
3443 | |||
3444 | return ret; | ||
3445 | } | ||
3446 | |||
3447 | static int __init iommu_init_mempool(void) | 3424 | static int __init iommu_init_mempool(void) |
3448 | { | 3425 | { |
3449 | int ret; | 3426 | int ret; |
@@ -3461,7 +3438,7 @@ static int __init iommu_init_mempool(void) | |||
3461 | 3438 | ||
3462 | kmem_cache_destroy(iommu_domain_cache); | 3439 | kmem_cache_destroy(iommu_domain_cache); |
3463 | domain_error: | 3440 | domain_error: |
3464 | kmem_cache_destroy(iommu_iova_cache); | 3441 | iommu_iova_cache_destroy(); |
3465 | 3442 | ||
3466 | return -ENOMEM; | 3443 | return -ENOMEM; |
3467 | } | 3444 | } |
@@ -3470,8 +3447,7 @@ static void __init iommu_exit_mempool(void) | |||
3470 | { | 3447 | { |
3471 | kmem_cache_destroy(iommu_devinfo_cache); | 3448 | kmem_cache_destroy(iommu_devinfo_cache); |
3472 | kmem_cache_destroy(iommu_domain_cache); | 3449 | kmem_cache_destroy(iommu_domain_cache); |
3473 | kmem_cache_destroy(iommu_iova_cache); | 3450 | iommu_iova_cache_destroy(); |
3474 | |||
3475 | } | 3451 | } |
3476 | 3452 | ||
3477 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) | 3453 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) |
@@ -4342,7 +4318,8 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
4342 | { | 4318 | { |
4343 | int adjust_width; | 4319 | int adjust_width; |
4344 | 4320 | ||
4345 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 4321 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, |
4322 | DMA_32BIT_PFN); | ||
4346 | domain_reserve_special_ranges(domain); | 4323 | domain_reserve_special_ranges(domain); |
4347 | 4324 | ||
4348 | /* calculate AGAW */ | 4325 | /* calculate AGAW */ |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c new file mode 100644 index 000000000000..5a500edf00cc --- /dev/null +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -0,0 +1,986 @@ | |||
1 | /* | ||
2 | * CPU-agnostic ARM page table allocator. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | * | ||
16 | * Copyright (C) 2014 ARM Limited | ||
17 | * | ||
18 | * Author: Will Deacon <will.deacon@arm.com> | ||
19 | */ | ||
20 | |||
21 | #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt | ||
22 | |||
23 | #include <linux/iommu.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/sizes.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/types.h> | ||
28 | |||
29 | #include "io-pgtable.h" | ||
30 | |||
31 | #define ARM_LPAE_MAX_ADDR_BITS 48 | ||
32 | #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 | ||
33 | #define ARM_LPAE_MAX_LEVELS 4 | ||
34 | |||
35 | /* Struct accessors */ | ||
36 | #define io_pgtable_to_data(x) \ | ||
37 | container_of((x), struct arm_lpae_io_pgtable, iop) | ||
38 | |||
39 | #define io_pgtable_ops_to_pgtable(x) \ | ||
40 | container_of((x), struct io_pgtable, ops) | ||
41 | |||
42 | #define io_pgtable_ops_to_data(x) \ | ||
43 | io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) | ||
44 | |||
45 | /* | ||
46 | * For consistency with the architecture, we always consider | ||
47 | * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 | ||
48 | */ | ||
49 | #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) | ||
50 | |||
51 | /* | ||
52 | * Calculate the right shift amount to get to the portion describing level l | ||
53 | * in a virtual address mapped by the pagetable in d. | ||
54 | */ | ||
55 | #define ARM_LPAE_LVL_SHIFT(l,d) \ | ||
56 | ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ | ||
57 | * (d)->bits_per_level) + (d)->pg_shift) | ||
58 | |||
59 | #define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) | ||
60 | |||
61 | /* | ||
62 | * Calculate the index at level l used to map virtual address a using the | ||
63 | * pagetable in d. | ||
64 | */ | ||
65 | #define ARM_LPAE_PGD_IDX(l,d) \ | ||
66 | ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) | ||
67 | |||
68 | #define ARM_LPAE_LVL_IDX(a,l,d) \ | ||
69 | (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ | ||
70 | ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) | ||
71 | |||
72 | /* Calculate the block/page mapping size at level l for pagetable in d. */ | ||
73 | #define ARM_LPAE_BLOCK_SIZE(l,d) \ | ||
74 | (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ | ||
75 | ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) | ||
76 | |||
77 | /* Page table bits */ | ||
78 | #define ARM_LPAE_PTE_TYPE_SHIFT 0 | ||
79 | #define ARM_LPAE_PTE_TYPE_MASK 0x3 | ||
80 | |||
81 | #define ARM_LPAE_PTE_TYPE_BLOCK 1 | ||
82 | #define ARM_LPAE_PTE_TYPE_TABLE 3 | ||
83 | #define ARM_LPAE_PTE_TYPE_PAGE 3 | ||
84 | |||
85 | #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) | ||
86 | #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) | ||
87 | #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) | ||
88 | #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) | ||
89 | #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) | ||
90 | #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) | ||
91 | #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) | ||
92 | #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) | ||
93 | |||
94 | #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) | ||
95 | /* Ignore the contiguous bit for block splitting */ | ||
96 | #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) | ||
97 | #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ | ||
98 | ARM_LPAE_PTE_ATTR_HI_MASK) | ||
99 | |||
100 | /* Stage-1 PTE */ | ||
101 | #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) | ||
102 | #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) | ||
103 | #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 | ||
104 | #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) | ||
105 | |||
106 | /* Stage-2 PTE */ | ||
107 | #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) | ||
108 | #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) | ||
109 | #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) | ||
110 | #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) | ||
111 | #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) | ||
112 | #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) | ||
113 | |||
114 | /* Register bits */ | ||
115 | #define ARM_32_LPAE_TCR_EAE (1 << 31) | ||
116 | #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) | ||
117 | |||
118 | #define ARM_LPAE_TCR_TG0_4K (0 << 14) | ||
119 | #define ARM_LPAE_TCR_TG0_64K (1 << 14) | ||
120 | #define ARM_LPAE_TCR_TG0_16K (2 << 14) | ||
121 | |||
122 | #define ARM_LPAE_TCR_SH0_SHIFT 12 | ||
123 | #define ARM_LPAE_TCR_SH0_MASK 0x3 | ||
124 | #define ARM_LPAE_TCR_SH_NS 0 | ||
125 | #define ARM_LPAE_TCR_SH_OS 2 | ||
126 | #define ARM_LPAE_TCR_SH_IS 3 | ||
127 | |||
128 | #define ARM_LPAE_TCR_ORGN0_SHIFT 10 | ||
129 | #define ARM_LPAE_TCR_IRGN0_SHIFT 8 | ||
130 | #define ARM_LPAE_TCR_RGN_MASK 0x3 | ||
131 | #define ARM_LPAE_TCR_RGN_NC 0 | ||
132 | #define ARM_LPAE_TCR_RGN_WBWA 1 | ||
133 | #define ARM_LPAE_TCR_RGN_WT 2 | ||
134 | #define ARM_LPAE_TCR_RGN_WB 3 | ||
135 | |||
136 | #define ARM_LPAE_TCR_SL0_SHIFT 6 | ||
137 | #define ARM_LPAE_TCR_SL0_MASK 0x3 | ||
138 | |||
139 | #define ARM_LPAE_TCR_T0SZ_SHIFT 0 | ||
140 | #define ARM_LPAE_TCR_SZ_MASK 0xf | ||
141 | |||
142 | #define ARM_LPAE_TCR_PS_SHIFT 16 | ||
143 | #define ARM_LPAE_TCR_PS_MASK 0x7 | ||
144 | |||
145 | #define ARM_LPAE_TCR_IPS_SHIFT 32 | ||
146 | #define ARM_LPAE_TCR_IPS_MASK 0x7 | ||
147 | |||
148 | #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL | ||
149 | #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL | ||
150 | #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL | ||
151 | #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL | ||
152 | #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL | ||
153 | #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL | ||
154 | |||
155 | #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) | ||
156 | #define ARM_LPAE_MAIR_ATTR_MASK 0xff | ||
157 | #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 | ||
158 | #define ARM_LPAE_MAIR_ATTR_NC 0x44 | ||
159 | #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff | ||
160 | #define ARM_LPAE_MAIR_ATTR_IDX_NC 0 | ||
161 | #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 | ||
162 | #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 | ||
163 | |||
164 | /* IOPTE accessors */ | ||
165 | #define iopte_deref(pte,d) \ | ||
166 | (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ | ||
167 | & ~((1ULL << (d)->pg_shift) - 1))) | ||
168 | |||
169 | #define iopte_type(pte,l) \ | ||
170 | (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) | ||
171 | |||
172 | #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) | ||
173 | |||
174 | #define iopte_leaf(pte,l) \ | ||
175 | (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ | ||
176 | (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ | ||
177 | (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) | ||
178 | |||
179 | #define iopte_to_pfn(pte,d) \ | ||
180 | (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) | ||
181 | |||
182 | #define pfn_to_iopte(pfn,d) \ | ||
183 | (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) | ||
184 | |||
185 | struct arm_lpae_io_pgtable { | ||
186 | struct io_pgtable iop; | ||
187 | |||
188 | int levels; | ||
189 | size_t pgd_size; | ||
190 | unsigned long pg_shift; | ||
191 | unsigned long bits_per_level; | ||
192 | |||
193 | void *pgd; | ||
194 | }; | ||
195 | |||
196 | typedef u64 arm_lpae_iopte; | ||
197 | |||
198 | static bool selftest_running = false; | ||
199 | |||
200 | static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, | ||
201 | unsigned long iova, phys_addr_t paddr, | ||
202 | arm_lpae_iopte prot, int lvl, | ||
203 | arm_lpae_iopte *ptep) | ||
204 | { | ||
205 | arm_lpae_iopte pte = prot; | ||
206 | |||
207 | /* We require an unmap first */ | ||
208 | if (iopte_leaf(*ptep, lvl)) { | ||
209 | WARN_ON(!selftest_running); | ||
210 | return -EEXIST; | ||
211 | } | ||
212 | |||
213 | if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) | ||
214 | pte |= ARM_LPAE_PTE_NS; | ||
215 | |||
216 | if (lvl == ARM_LPAE_MAX_LEVELS - 1) | ||
217 | pte |= ARM_LPAE_PTE_TYPE_PAGE; | ||
218 | else | ||
219 | pte |= ARM_LPAE_PTE_TYPE_BLOCK; | ||
220 | |||
221 | pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; | ||
222 | pte |= pfn_to_iopte(paddr >> data->pg_shift, data); | ||
223 | |||
224 | *ptep = pte; | ||
225 | data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie); | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, | ||
230 | phys_addr_t paddr, size_t size, arm_lpae_iopte prot, | ||
231 | int lvl, arm_lpae_iopte *ptep) | ||
232 | { | ||
233 | arm_lpae_iopte *cptep, pte; | ||
234 | void *cookie = data->iop.cookie; | ||
235 | size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); | ||
236 | |||
237 | /* Find our entry at the current level */ | ||
238 | ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); | ||
239 | |||
240 | /* If we can install a leaf entry at this level, then do so */ | ||
241 | if (size == block_size && (size & data->iop.cfg.pgsize_bitmap)) | ||
242 | return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); | ||
243 | |||
244 | /* We can't allocate tables at the final level */ | ||
245 | if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) | ||
246 | return -EINVAL; | ||
247 | |||
248 | /* Grab a pointer to the next level */ | ||
249 | pte = *ptep; | ||
250 | if (!pte) { | ||
251 | cptep = alloc_pages_exact(1UL << data->pg_shift, | ||
252 | GFP_ATOMIC | __GFP_ZERO); | ||
253 | if (!cptep) | ||
254 | return -ENOMEM; | ||
255 | |||
256 | data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift, | ||
257 | cookie); | ||
258 | pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; | ||
259 | if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) | ||
260 | pte |= ARM_LPAE_PTE_NSTABLE; | ||
261 | *ptep = pte; | ||
262 | data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); | ||
263 | } else { | ||
264 | cptep = iopte_deref(pte, data); | ||
265 | } | ||
266 | |||
267 | /* Rinse, repeat */ | ||
268 | return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); | ||
269 | } | ||
270 | |||
271 | static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, | ||
272 | int prot) | ||
273 | { | ||
274 | arm_lpae_iopte pte; | ||
275 | |||
276 | if (data->iop.fmt == ARM_64_LPAE_S1 || | ||
277 | data->iop.fmt == ARM_32_LPAE_S1) { | ||
278 | pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; | ||
279 | |||
280 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) | ||
281 | pte |= ARM_LPAE_PTE_AP_RDONLY; | ||
282 | |||
283 | if (prot & IOMMU_CACHE) | ||
284 | pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE | ||
285 | << ARM_LPAE_PTE_ATTRINDX_SHIFT); | ||
286 | } else { | ||
287 | pte = ARM_LPAE_PTE_HAP_FAULT; | ||
288 | if (prot & IOMMU_READ) | ||
289 | pte |= ARM_LPAE_PTE_HAP_READ; | ||
290 | if (prot & IOMMU_WRITE) | ||
291 | pte |= ARM_LPAE_PTE_HAP_WRITE; | ||
292 | if (prot & IOMMU_CACHE) | ||
293 | pte |= ARM_LPAE_PTE_MEMATTR_OIWB; | ||
294 | else | ||
295 | pte |= ARM_LPAE_PTE_MEMATTR_NC; | ||
296 | } | ||
297 | |||
298 | if (prot & IOMMU_NOEXEC) | ||
299 | pte |= ARM_LPAE_PTE_XN; | ||
300 | |||
301 | return pte; | ||
302 | } | ||
303 | |||
304 | static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, | ||
305 | phys_addr_t paddr, size_t size, int iommu_prot) | ||
306 | { | ||
307 | struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
308 | arm_lpae_iopte *ptep = data->pgd; | ||
309 | int lvl = ARM_LPAE_START_LVL(data); | ||
310 | arm_lpae_iopte prot; | ||
311 | |||
312 | /* If no access, then nothing to do */ | ||
313 | if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) | ||
314 | return 0; | ||
315 | |||
316 | prot = arm_lpae_prot_to_pte(data, iommu_prot); | ||
317 | return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); | ||
318 | } | ||
319 | |||
320 | static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, | ||
321 | arm_lpae_iopte *ptep) | ||
322 | { | ||
323 | arm_lpae_iopte *start, *end; | ||
324 | unsigned long table_size; | ||
325 | |||
326 | /* Only leaf entries at the last level */ | ||
327 | if (lvl == ARM_LPAE_MAX_LEVELS - 1) | ||
328 | return; | ||
329 | |||
330 | if (lvl == ARM_LPAE_START_LVL(data)) | ||
331 | table_size = data->pgd_size; | ||
332 | else | ||
333 | table_size = 1UL << data->pg_shift; | ||
334 | |||
335 | start = ptep; | ||
336 | end = (void *)ptep + table_size; | ||
337 | |||
338 | while (ptep != end) { | ||
339 | arm_lpae_iopte pte = *ptep++; | ||
340 | |||
341 | if (!pte || iopte_leaf(pte, lvl)) | ||
342 | continue; | ||
343 | |||
344 | __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); | ||
345 | } | ||
346 | |||
347 | free_pages_exact(start, table_size); | ||
348 | } | ||
349 | |||
350 | static void arm_lpae_free_pgtable(struct io_pgtable *iop) | ||
351 | { | ||
352 | struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); | ||
353 | |||
354 | __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); | ||
355 | kfree(data); | ||
356 | } | ||
357 | |||
358 | static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, | ||
359 | unsigned long iova, size_t size, | ||
360 | arm_lpae_iopte prot, int lvl, | ||
361 | arm_lpae_iopte *ptep, size_t blk_size) | ||
362 | { | ||
363 | unsigned long blk_start, blk_end; | ||
364 | phys_addr_t blk_paddr; | ||
365 | arm_lpae_iopte table = 0; | ||
366 | void *cookie = data->iop.cookie; | ||
367 | const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; | ||
368 | |||
369 | blk_start = iova & ~(blk_size - 1); | ||
370 | blk_end = blk_start + blk_size; | ||
371 | blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; | ||
372 | |||
373 | for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { | ||
374 | arm_lpae_iopte *tablep; | ||
375 | |||
376 | /* Unmap! */ | ||
377 | if (blk_start == iova) | ||
378 | continue; | ||
379 | |||
380 | /* __arm_lpae_map expects a pointer to the start of the table */ | ||
381 | tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); | ||
382 | if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, | ||
383 | tablep) < 0) { | ||
384 | if (table) { | ||
385 | /* Free the table we allocated */ | ||
386 | tablep = iopte_deref(table, data); | ||
387 | __arm_lpae_free_pgtable(data, lvl + 1, tablep); | ||
388 | } | ||
389 | return 0; /* Bytes unmapped */ | ||
390 | } | ||
391 | } | ||
392 | |||
393 | *ptep = table; | ||
394 | tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); | ||
395 | iova &= ~(blk_size - 1); | ||
396 | tlb->tlb_add_flush(iova, blk_size, true, cookie); | ||
397 | return size; | ||
398 | } | ||
399 | |||
400 | static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, | ||
401 | unsigned long iova, size_t size, int lvl, | ||
402 | arm_lpae_iopte *ptep) | ||
403 | { | ||
404 | arm_lpae_iopte pte; | ||
405 | const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; | ||
406 | void *cookie = data->iop.cookie; | ||
407 | size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); | ||
408 | |||
409 | ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); | ||
410 | pte = *ptep; | ||
411 | |||
412 | /* Something went horribly wrong and we ran out of page table */ | ||
413 | if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) | ||
414 | return 0; | ||
415 | |||
416 | /* If the size matches this level, we're in the right place */ | ||
417 | if (size == blk_size) { | ||
418 | *ptep = 0; | ||
419 | tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); | ||
420 | |||
421 | if (!iopte_leaf(pte, lvl)) { | ||
422 | /* Also flush any partial walks */ | ||
423 | tlb->tlb_add_flush(iova, size, false, cookie); | ||
424 | tlb->tlb_sync(data->iop.cookie); | ||
425 | ptep = iopte_deref(pte, data); | ||
426 | __arm_lpae_free_pgtable(data, lvl + 1, ptep); | ||
427 | } else { | ||
428 | tlb->tlb_add_flush(iova, size, true, cookie); | ||
429 | } | ||
430 | |||
431 | return size; | ||
432 | } else if (iopte_leaf(pte, lvl)) { | ||
433 | /* | ||
434 | * Insert a table at the next level to map the old region, | ||
435 | * minus the part we want to unmap | ||
436 | */ | ||
437 | return arm_lpae_split_blk_unmap(data, iova, size, | ||
438 | iopte_prot(pte), lvl, ptep, | ||
439 | blk_size); | ||
440 | } | ||
441 | |||
442 | /* Keep on walkin' */ | ||
443 | ptep = iopte_deref(pte, data); | ||
444 | return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); | ||
445 | } | ||
446 | |||
447 | static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, | ||
448 | size_t size) | ||
449 | { | ||
450 | size_t unmapped; | ||
451 | struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
452 | struct io_pgtable *iop = &data->iop; | ||
453 | arm_lpae_iopte *ptep = data->pgd; | ||
454 | int lvl = ARM_LPAE_START_LVL(data); | ||
455 | |||
456 | unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); | ||
457 | if (unmapped) | ||
458 | iop->cfg.tlb->tlb_sync(iop->cookie); | ||
459 | |||
460 | return unmapped; | ||
461 | } | ||
462 | |||
463 | static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, | ||
464 | unsigned long iova) | ||
465 | { | ||
466 | struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
467 | arm_lpae_iopte pte, *ptep = data->pgd; | ||
468 | int lvl = ARM_LPAE_START_LVL(data); | ||
469 | |||
470 | do { | ||
471 | /* Valid IOPTE pointer? */ | ||
472 | if (!ptep) | ||
473 | return 0; | ||
474 | |||
475 | /* Grab the IOPTE we're interested in */ | ||
476 | pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); | ||
477 | |||
478 | /* Valid entry? */ | ||
479 | if (!pte) | ||
480 | return 0; | ||
481 | |||
482 | /* Leaf entry? */ | ||
483 | if (iopte_leaf(pte,lvl)) | ||
484 | goto found_translation; | ||
485 | |||
486 | /* Take it to the next level */ | ||
487 | ptep = iopte_deref(pte, data); | ||
488 | } while (++lvl < ARM_LPAE_MAX_LEVELS); | ||
489 | |||
490 | /* Ran out of page tables to walk */ | ||
491 | return 0; | ||
492 | |||
493 | found_translation: | ||
494 | iova &= ((1 << data->pg_shift) - 1); | ||
495 | return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; | ||
496 | } | ||
497 | |||
498 | static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) | ||
499 | { | ||
500 | unsigned long granule; | ||
501 | |||
502 | /* | ||
503 | * We need to restrict the supported page sizes to match the | ||
504 | * translation regime for a particular granule. Aim to match | ||
505 | * the CPU page size if possible, otherwise prefer smaller sizes. | ||
506 | * While we're at it, restrict the block sizes to match the | ||
507 | * chosen granule. | ||
508 | */ | ||
509 | if (cfg->pgsize_bitmap & PAGE_SIZE) | ||
510 | granule = PAGE_SIZE; | ||
511 | else if (cfg->pgsize_bitmap & ~PAGE_MASK) | ||
512 | granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); | ||
513 | else if (cfg->pgsize_bitmap & PAGE_MASK) | ||
514 | granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); | ||
515 | else | ||
516 | granule = 0; | ||
517 | |||
518 | switch (granule) { | ||
519 | case SZ_4K: | ||
520 | cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); | ||
521 | break; | ||
522 | case SZ_16K: | ||
523 | cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); | ||
524 | break; | ||
525 | case SZ_64K: | ||
526 | cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); | ||
527 | break; | ||
528 | default: | ||
529 | cfg->pgsize_bitmap = 0; | ||
530 | } | ||
531 | } | ||
532 | |||
533 | static struct arm_lpae_io_pgtable * | ||
534 | arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) | ||
535 | { | ||
536 | unsigned long va_bits, pgd_bits; | ||
537 | struct arm_lpae_io_pgtable *data; | ||
538 | |||
539 | arm_lpae_restrict_pgsizes(cfg); | ||
540 | |||
541 | if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) | ||
542 | return NULL; | ||
543 | |||
544 | if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) | ||
545 | return NULL; | ||
546 | |||
547 | if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) | ||
548 | return NULL; | ||
549 | |||
550 | data = kmalloc(sizeof(*data), GFP_KERNEL); | ||
551 | if (!data) | ||
552 | return NULL; | ||
553 | |||
554 | data->pg_shift = __ffs(cfg->pgsize_bitmap); | ||
555 | data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); | ||
556 | |||
557 | va_bits = cfg->ias - data->pg_shift; | ||
558 | data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); | ||
559 | |||
560 | /* Calculate the actual size of our pgd (without concatenation) */ | ||
561 | pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); | ||
562 | data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); | ||
563 | |||
564 | data->iop.ops = (struct io_pgtable_ops) { | ||
565 | .map = arm_lpae_map, | ||
566 | .unmap = arm_lpae_unmap, | ||
567 | .iova_to_phys = arm_lpae_iova_to_phys, | ||
568 | }; | ||
569 | |||
570 | return data; | ||
571 | } | ||
572 | |||
573 | static struct io_pgtable * | ||
574 | arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) | ||
575 | { | ||
576 | u64 reg; | ||
577 | struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); | ||
578 | |||
579 | if (!data) | ||
580 | return NULL; | ||
581 | |||
582 | /* TCR */ | ||
583 | reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | | ||
584 | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | | ||
585 | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); | ||
586 | |||
587 | switch (1 << data->pg_shift) { | ||
588 | case SZ_4K: | ||
589 | reg |= ARM_LPAE_TCR_TG0_4K; | ||
590 | break; | ||
591 | case SZ_16K: | ||
592 | reg |= ARM_LPAE_TCR_TG0_16K; | ||
593 | break; | ||
594 | case SZ_64K: | ||
595 | reg |= ARM_LPAE_TCR_TG0_64K; | ||
596 | break; | ||
597 | } | ||
598 | |||
599 | switch (cfg->oas) { | ||
600 | case 32: | ||
601 | reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); | ||
602 | break; | ||
603 | case 36: | ||
604 | reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); | ||
605 | break; | ||
606 | case 40: | ||
607 | reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); | ||
608 | break; | ||
609 | case 42: | ||
610 | reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); | ||
611 | break; | ||
612 | case 44: | ||
613 | reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); | ||
614 | break; | ||
615 | case 48: | ||
616 | reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); | ||
617 | break; | ||
618 | default: | ||
619 | goto out_free_data; | ||
620 | } | ||
621 | |||
622 | reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; | ||
623 | cfg->arm_lpae_s1_cfg.tcr = reg; | ||
624 | |||
625 | /* MAIRs */ | ||
626 | reg = (ARM_LPAE_MAIR_ATTR_NC | ||
627 | << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | | ||
628 | (ARM_LPAE_MAIR_ATTR_WBRWA | ||
629 | << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | | ||
630 | (ARM_LPAE_MAIR_ATTR_DEVICE | ||
631 | << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); | ||
632 | |||
633 | cfg->arm_lpae_s1_cfg.mair[0] = reg; | ||
634 | cfg->arm_lpae_s1_cfg.mair[1] = 0; | ||
635 | |||
636 | /* Looking good; allocate a pgd */ | ||
637 | data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); | ||
638 | if (!data->pgd) | ||
639 | goto out_free_data; | ||
640 | |||
641 | cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); | ||
642 | |||
643 | /* TTBRs */ | ||
644 | cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); | ||
645 | cfg->arm_lpae_s1_cfg.ttbr[1] = 0; | ||
646 | return &data->iop; | ||
647 | |||
648 | out_free_data: | ||
649 | kfree(data); | ||
650 | return NULL; | ||
651 | } | ||
652 | |||
653 | static struct io_pgtable * | ||
654 | arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) | ||
655 | { | ||
656 | u64 reg, sl; | ||
657 | struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); | ||
658 | |||
659 | if (!data) | ||
660 | return NULL; | ||
661 | |||
662 | /* | ||
663 | * Concatenate PGDs at level 1 if possible in order to reduce | ||
664 | * the depth of the stage-2 walk. | ||
665 | */ | ||
666 | if (data->levels == ARM_LPAE_MAX_LEVELS) { | ||
667 | unsigned long pgd_pages; | ||
668 | |||
669 | pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); | ||
670 | if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { | ||
671 | data->pgd_size = pgd_pages << data->pg_shift; | ||
672 | data->levels--; | ||
673 | } | ||
674 | } | ||
675 | |||
676 | /* VTCR */ | ||
677 | reg = ARM_64_LPAE_S2_TCR_RES1 | | ||
678 | (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | | ||
679 | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | | ||
680 | (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); | ||
681 | |||
682 | sl = ARM_LPAE_START_LVL(data); | ||
683 | |||
684 | switch (1 << data->pg_shift) { | ||
685 | case SZ_4K: | ||
686 | reg |= ARM_LPAE_TCR_TG0_4K; | ||
687 | sl++; /* SL0 format is different for 4K granule size */ | ||
688 | break; | ||
689 | case SZ_16K: | ||
690 | reg |= ARM_LPAE_TCR_TG0_16K; | ||
691 | break; | ||
692 | case SZ_64K: | ||
693 | reg |= ARM_LPAE_TCR_TG0_64K; | ||
694 | break; | ||
695 | } | ||
696 | |||
697 | switch (cfg->oas) { | ||
698 | case 32: | ||
699 | reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); | ||
700 | break; | ||
701 | case 36: | ||
702 | reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); | ||
703 | break; | ||
704 | case 40: | ||
705 | reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); | ||
706 | break; | ||
707 | case 42: | ||
708 | reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); | ||
709 | break; | ||
710 | case 44: | ||
711 | reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); | ||
712 | break; | ||
713 | case 48: | ||
714 | reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); | ||
715 | break; | ||
716 | default: | ||
717 | goto out_free_data; | ||
718 | } | ||
719 | |||
720 | reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; | ||
721 | reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; | ||
722 | cfg->arm_lpae_s2_cfg.vtcr = reg; | ||
723 | |||
724 | /* Allocate pgd pages */ | ||
725 | data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); | ||
726 | if (!data->pgd) | ||
727 | goto out_free_data; | ||
728 | |||
729 | cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); | ||
730 | |||
731 | /* VTTBR */ | ||
732 | cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); | ||
733 | return &data->iop; | ||
734 | |||
735 | out_free_data: | ||
736 | kfree(data); | ||
737 | return NULL; | ||
738 | } | ||
739 | |||
740 | static struct io_pgtable * | ||
741 | arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) | ||
742 | { | ||
743 | struct io_pgtable *iop; | ||
744 | |||
745 | if (cfg->ias > 32 || cfg->oas > 40) | ||
746 | return NULL; | ||
747 | |||
748 | cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); | ||
749 | iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); | ||
750 | if (iop) { | ||
751 | cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; | ||
752 | cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; | ||
753 | } | ||
754 | |||
755 | return iop; | ||
756 | } | ||
757 | |||
758 | static struct io_pgtable * | ||
759 | arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) | ||
760 | { | ||
761 | struct io_pgtable *iop; | ||
762 | |||
763 | if (cfg->ias > 40 || cfg->oas > 40) | ||
764 | return NULL; | ||
765 | |||
766 | cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); | ||
767 | iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); | ||
768 | if (iop) | ||
769 | cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; | ||
770 | |||
771 | return iop; | ||
772 | } | ||
773 | |||
774 | struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { | ||
775 | .alloc = arm_64_lpae_alloc_pgtable_s1, | ||
776 | .free = arm_lpae_free_pgtable, | ||
777 | }; | ||
778 | |||
779 | struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { | ||
780 | .alloc = arm_64_lpae_alloc_pgtable_s2, | ||
781 | .free = arm_lpae_free_pgtable, | ||
782 | }; | ||
783 | |||
784 | struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { | ||
785 | .alloc = arm_32_lpae_alloc_pgtable_s1, | ||
786 | .free = arm_lpae_free_pgtable, | ||
787 | }; | ||
788 | |||
789 | struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { | ||
790 | .alloc = arm_32_lpae_alloc_pgtable_s2, | ||
791 | .free = arm_lpae_free_pgtable, | ||
792 | }; | ||
793 | |||
794 | #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST | ||
795 | |||
796 | static struct io_pgtable_cfg *cfg_cookie; | ||
797 | |||
798 | static void dummy_tlb_flush_all(void *cookie) | ||
799 | { | ||
800 | WARN_ON(cookie != cfg_cookie); | ||
801 | } | ||
802 | |||
803 | static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, | ||
804 | void *cookie) | ||
805 | { | ||
806 | WARN_ON(cookie != cfg_cookie); | ||
807 | WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); | ||
808 | } | ||
809 | |||
810 | static void dummy_tlb_sync(void *cookie) | ||
811 | { | ||
812 | WARN_ON(cookie != cfg_cookie); | ||
813 | } | ||
814 | |||
815 | static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie) | ||
816 | { | ||
817 | WARN_ON(cookie != cfg_cookie); | ||
818 | } | ||
819 | |||
820 | static struct iommu_gather_ops dummy_tlb_ops __initdata = { | ||
821 | .tlb_flush_all = dummy_tlb_flush_all, | ||
822 | .tlb_add_flush = dummy_tlb_add_flush, | ||
823 | .tlb_sync = dummy_tlb_sync, | ||
824 | .flush_pgtable = dummy_flush_pgtable, | ||
825 | }; | ||
826 | |||
827 | static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) | ||
828 | { | ||
829 | struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); | ||
830 | struct io_pgtable_cfg *cfg = &data->iop.cfg; | ||
831 | |||
832 | pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", | ||
833 | cfg->pgsize_bitmap, cfg->ias); | ||
834 | pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", | ||
835 | data->levels, data->pgd_size, data->pg_shift, | ||
836 | data->bits_per_level, data->pgd); | ||
837 | } | ||
838 | |||
839 | #define __FAIL(ops, i) ({ \ | ||
840 | WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ | ||
841 | arm_lpae_dump_ops(ops); \ | ||
842 | selftest_running = false; \ | ||
843 | -EFAULT; \ | ||
844 | }) | ||
845 | |||
846 | static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) | ||
847 | { | ||
848 | static const enum io_pgtable_fmt fmts[] = { | ||
849 | ARM_64_LPAE_S1, | ||
850 | ARM_64_LPAE_S2, | ||
851 | }; | ||
852 | |||
853 | int i, j; | ||
854 | unsigned long iova; | ||
855 | size_t size; | ||
856 | struct io_pgtable_ops *ops; | ||
857 | |||
858 | selftest_running = true; | ||
859 | |||
860 | for (i = 0; i < ARRAY_SIZE(fmts); ++i) { | ||
861 | cfg_cookie = cfg; | ||
862 | ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); | ||
863 | if (!ops) { | ||
864 | pr_err("selftest: failed to allocate io pgtable ops\n"); | ||
865 | return -ENOMEM; | ||
866 | } | ||
867 | |||
868 | /* | ||
869 | * Initial sanity checks. | ||
870 | * Empty page tables shouldn't provide any translations. | ||
871 | */ | ||
872 | if (ops->iova_to_phys(ops, 42)) | ||
873 | return __FAIL(ops, i); | ||
874 | |||
875 | if (ops->iova_to_phys(ops, SZ_1G + 42)) | ||
876 | return __FAIL(ops, i); | ||
877 | |||
878 | if (ops->iova_to_phys(ops, SZ_2G + 42)) | ||
879 | return __FAIL(ops, i); | ||
880 | |||
881 | /* | ||
882 | * Distinct mappings of different granule sizes. | ||
883 | */ | ||
884 | iova = 0; | ||
885 | j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); | ||
886 | while (j != BITS_PER_LONG) { | ||
887 | size = 1UL << j; | ||
888 | |||
889 | if (ops->map(ops, iova, iova, size, IOMMU_READ | | ||
890 | IOMMU_WRITE | | ||
891 | IOMMU_NOEXEC | | ||
892 | IOMMU_CACHE)) | ||
893 | return __FAIL(ops, i); | ||
894 | |||
895 | /* Overlapping mappings */ | ||
896 | if (!ops->map(ops, iova, iova + size, size, | ||
897 | IOMMU_READ | IOMMU_NOEXEC)) | ||
898 | return __FAIL(ops, i); | ||
899 | |||
900 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) | ||
901 | return __FAIL(ops, i); | ||
902 | |||
903 | iova += SZ_1G; | ||
904 | j++; | ||
905 | j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); | ||
906 | } | ||
907 | |||
908 | /* Partial unmap */ | ||
909 | size = 1UL << __ffs(cfg->pgsize_bitmap); | ||
910 | if (ops->unmap(ops, SZ_1G + size, size) != size) | ||
911 | return __FAIL(ops, i); | ||
912 | |||
913 | /* Remap of partial unmap */ | ||
914 | if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) | ||
915 | return __FAIL(ops, i); | ||
916 | |||
917 | if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) | ||
918 | return __FAIL(ops, i); | ||
919 | |||
920 | /* Full unmap */ | ||
921 | iova = 0; | ||
922 | j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); | ||
923 | while (j != BITS_PER_LONG) { | ||
924 | size = 1UL << j; | ||
925 | |||
926 | if (ops->unmap(ops, iova, size) != size) | ||
927 | return __FAIL(ops, i); | ||
928 | |||
929 | if (ops->iova_to_phys(ops, iova + 42)) | ||
930 | return __FAIL(ops, i); | ||
931 | |||
932 | /* Remap full block */ | ||
933 | if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) | ||
934 | return __FAIL(ops, i); | ||
935 | |||
936 | if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) | ||
937 | return __FAIL(ops, i); | ||
938 | |||
939 | iova += SZ_1G; | ||
940 | j++; | ||
941 | j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); | ||
942 | } | ||
943 | |||
944 | free_io_pgtable_ops(ops); | ||
945 | } | ||
946 | |||
947 | selftest_running = false; | ||
948 | return 0; | ||
949 | } | ||
950 | |||
951 | static int __init arm_lpae_do_selftests(void) | ||
952 | { | ||
953 | static const unsigned long pgsize[] = { | ||
954 | SZ_4K | SZ_2M | SZ_1G, | ||
955 | SZ_16K | SZ_32M, | ||
956 | SZ_64K | SZ_512M, | ||
957 | }; | ||
958 | |||
959 | static const unsigned int ias[] = { | ||
960 | 32, 36, 40, 42, 44, 48, | ||
961 | }; | ||
962 | |||
963 | int i, j, pass = 0, fail = 0; | ||
964 | struct io_pgtable_cfg cfg = { | ||
965 | .tlb = &dummy_tlb_ops, | ||
966 | .oas = 48, | ||
967 | }; | ||
968 | |||
969 | for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { | ||
970 | for (j = 0; j < ARRAY_SIZE(ias); ++j) { | ||
971 | cfg.pgsize_bitmap = pgsize[i]; | ||
972 | cfg.ias = ias[j]; | ||
973 | pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", | ||
974 | pgsize[i], ias[j]); | ||
975 | if (arm_lpae_run_tests(&cfg)) | ||
976 | fail++; | ||
977 | else | ||
978 | pass++; | ||
979 | } | ||
980 | } | ||
981 | |||
982 | pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); | ||
983 | return fail ? -EFAULT : 0; | ||
984 | } | ||
985 | subsys_initcall(arm_lpae_do_selftests); | ||
986 | #endif | ||
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c new file mode 100644 index 000000000000..6436fe24bc2f --- /dev/null +++ b/drivers/iommu/io-pgtable.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Generic page table allocator for IOMMUs. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | * | ||
16 | * Copyright (C) 2014 ARM Limited | ||
17 | * | ||
18 | * Author: Will Deacon <will.deacon@arm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/bug.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | #include "io-pgtable.h" | ||
26 | |||
27 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; | ||
28 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; | ||
29 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; | ||
30 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; | ||
31 | |||
32 | static const struct io_pgtable_init_fns * | ||
33 | io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = | ||
34 | { | ||
35 | #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE | ||
36 | [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, | ||
37 | [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, | ||
38 | [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, | ||
39 | [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, | ||
40 | #endif | ||
41 | }; | ||
42 | |||
43 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, | ||
44 | struct io_pgtable_cfg *cfg, | ||
45 | void *cookie) | ||
46 | { | ||
47 | struct io_pgtable *iop; | ||
48 | const struct io_pgtable_init_fns *fns; | ||
49 | |||
50 | if (fmt >= IO_PGTABLE_NUM_FMTS) | ||
51 | return NULL; | ||
52 | |||
53 | fns = io_pgtable_init_table[fmt]; | ||
54 | if (!fns) | ||
55 | return NULL; | ||
56 | |||
57 | iop = fns->alloc(cfg, cookie); | ||
58 | if (!iop) | ||
59 | return NULL; | ||
60 | |||
61 | iop->fmt = fmt; | ||
62 | iop->cookie = cookie; | ||
63 | iop->cfg = *cfg; | ||
64 | |||
65 | return &iop->ops; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * It is the IOMMU driver's responsibility to ensure that the page table | ||
70 | * is no longer accessible to the walker by this point. | ||
71 | */ | ||
72 | void free_io_pgtable_ops(struct io_pgtable_ops *ops) | ||
73 | { | ||
74 | struct io_pgtable *iop; | ||
75 | |||
76 | if (!ops) | ||
77 | return; | ||
78 | |||
79 | iop = container_of(ops, struct io_pgtable, ops); | ||
80 | iop->cfg.tlb->tlb_flush_all(iop->cookie); | ||
81 | io_pgtable_init_table[iop->fmt]->free(iop); | ||
82 | } | ||
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h new file mode 100644 index 000000000000..10e32f69c668 --- /dev/null +++ b/drivers/iommu/io-pgtable.h | |||
@@ -0,0 +1,143 @@ | |||
1 | #ifndef __IO_PGTABLE_H | ||
2 | #define __IO_PGTABLE_H | ||
3 | |||
4 | /* | ||
5 | * Public API for use by IOMMU drivers | ||
6 | */ | ||
7 | enum io_pgtable_fmt { | ||
8 | ARM_32_LPAE_S1, | ||
9 | ARM_32_LPAE_S2, | ||
10 | ARM_64_LPAE_S1, | ||
11 | ARM_64_LPAE_S2, | ||
12 | IO_PGTABLE_NUM_FMTS, | ||
13 | }; | ||
14 | |||
15 | /** | ||
16 | * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. | ||
17 | * | ||
18 | * @tlb_flush_all: Synchronously invalidate the entire TLB context. | ||
19 | * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. | ||
20 | * @tlb_sync: Ensure any queue TLB invalidation has taken effect. | ||
21 | * @flush_pgtable: Ensure page table updates are visible to the IOMMU. | ||
22 | * | ||
23 | * Note that these can all be called in atomic context and must therefore | ||
24 | * not block. | ||
25 | */ | ||
26 | struct iommu_gather_ops { | ||
27 | void (*tlb_flush_all)(void *cookie); | ||
28 | void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, | ||
29 | void *cookie); | ||
30 | void (*tlb_sync)(void *cookie); | ||
31 | void (*flush_pgtable)(void *ptr, size_t size, void *cookie); | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * struct io_pgtable_cfg - Configuration data for a set of page tables. | ||
36 | * | ||
37 | * @quirks: A bitmap of hardware quirks that require some special | ||
38 | * action by the low-level page table allocator. | ||
39 | * @pgsize_bitmap: A bitmap of page sizes supported by this set of page | ||
40 | * tables. | ||
41 | * @ias: Input address (iova) size, in bits. | ||
42 | * @oas: Output address (paddr) size, in bits. | ||
43 | * @tlb: TLB management callbacks for this set of tables. | ||
44 | */ | ||
45 | struct io_pgtable_cfg { | ||
46 | #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ | ||
47 | int quirks; | ||
48 | unsigned long pgsize_bitmap; | ||
49 | unsigned int ias; | ||
50 | unsigned int oas; | ||
51 | const struct iommu_gather_ops *tlb; | ||
52 | |||
53 | /* Low-level data specific to the table format */ | ||
54 | union { | ||
55 | struct { | ||
56 | u64 ttbr[2]; | ||
57 | u64 tcr; | ||
58 | u64 mair[2]; | ||
59 | } arm_lpae_s1_cfg; | ||
60 | |||
61 | struct { | ||
62 | u64 vttbr; | ||
63 | u64 vtcr; | ||
64 | } arm_lpae_s2_cfg; | ||
65 | }; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. | ||
70 | * | ||
71 | * @map: Map a physically contiguous memory region. | ||
72 | * @unmap: Unmap a physically contiguous memory region. | ||
73 | * @iova_to_phys: Translate iova to physical address. | ||
74 | * | ||
75 | * These functions map directly onto the iommu_ops member functions with | ||
76 | * the same names. | ||
77 | */ | ||
78 | struct io_pgtable_ops { | ||
79 | int (*map)(struct io_pgtable_ops *ops, unsigned long iova, | ||
80 | phys_addr_t paddr, size_t size, int prot); | ||
81 | int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, | ||
82 | size_t size); | ||
83 | phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, | ||
84 | unsigned long iova); | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. | ||
89 | * | ||
90 | * @fmt: The page table format. | ||
91 | * @cfg: The page table configuration. This will be modified to represent | ||
92 | * the configuration actually provided by the allocator (e.g. the | ||
93 | * pgsize_bitmap may be restricted). | ||
94 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | ||
95 | * the callback routines in cfg->tlb. | ||
96 | */ | ||
97 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, | ||
98 | struct io_pgtable_cfg *cfg, | ||
99 | void *cookie); | ||
100 | |||
101 | /** | ||
102 | * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller | ||
103 | * *must* ensure that the page table is no longer | ||
104 | * live, but the TLB can be dirty. | ||
105 | * | ||
106 | * @ops: The ops returned from alloc_io_pgtable_ops. | ||
107 | */ | ||
108 | void free_io_pgtable_ops(struct io_pgtable_ops *ops); | ||
109 | |||
110 | |||
111 | /* | ||
112 | * Internal structures for page table allocator implementations. | ||
113 | */ | ||
114 | |||
115 | /** | ||
116 | * struct io_pgtable - Internal structure describing a set of page tables. | ||
117 | * | ||
118 | * @fmt: The page table format. | ||
119 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | ||
120 | * any callback routines. | ||
121 | * @cfg: A copy of the page table configuration. | ||
122 | * @ops: The page table operations in use for this set of page tables. | ||
123 | */ | ||
124 | struct io_pgtable { | ||
125 | enum io_pgtable_fmt fmt; | ||
126 | void *cookie; | ||
127 | struct io_pgtable_cfg cfg; | ||
128 | struct io_pgtable_ops ops; | ||
129 | }; | ||
130 | |||
131 | /** | ||
132 | * struct io_pgtable_init_fns - Alloc/free a set of page tables for a | ||
133 | * particular format. | ||
134 | * | ||
135 | * @alloc: Allocate a set of page tables described by cfg. | ||
136 | * @free: Free the page tables associated with iop. | ||
137 | */ | ||
138 | struct io_pgtable_init_fns { | ||
139 | struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); | ||
140 | void (*free)(struct io_pgtable *iop); | ||
141 | }; | ||
142 | |||
143 | #endif /* __IO_PGTABLE_H */ | ||
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index f7718d73e984..72e683df0731 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
@@ -1084,7 +1084,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, | |||
1084 | if (ret) | 1084 | if (ret) |
1085 | iommu_unmap(domain, orig_iova, orig_size - size); | 1085 | iommu_unmap(domain, orig_iova, orig_size - size); |
1086 | else | 1086 | else |
1087 | trace_map(iova, paddr, size); | 1087 | trace_map(orig_iova, paddr, orig_size); |
1088 | 1088 | ||
1089 | return ret; | 1089 | return ret; |
1090 | } | 1090 | } |
@@ -1094,6 +1094,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | |||
1094 | { | 1094 | { |
1095 | size_t unmapped_page, unmapped = 0; | 1095 | size_t unmapped_page, unmapped = 0; |
1096 | unsigned int min_pagesz; | 1096 | unsigned int min_pagesz; |
1097 | unsigned long orig_iova = iova; | ||
1097 | 1098 | ||
1098 | if (unlikely(domain->ops->unmap == NULL || | 1099 | if (unlikely(domain->ops->unmap == NULL || |
1099 | domain->ops->pgsize_bitmap == 0UL)) | 1100 | domain->ops->pgsize_bitmap == 0UL)) |
@@ -1133,7 +1134,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) | |||
1133 | unmapped += unmapped_page; | 1134 | unmapped += unmapped_page; |
1134 | } | 1135 | } |
1135 | 1136 | ||
1136 | trace_unmap(iova, 0, size); | 1137 | trace_unmap(orig_iova, size, unmapped); |
1137 | return unmapped; | 1138 | return unmapped; |
1138 | } | 1139 | } |
1139 | EXPORT_SYMBOL_GPL(iommu_unmap); | 1140 | EXPORT_SYMBOL_GPL(iommu_unmap); |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index f6b17e6af2fb..9dd8208312c2 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -18,13 +18,58 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
21 | #include <linux/slab.h> | ||
22 | |||
23 | static struct kmem_cache *iommu_iova_cache; | ||
24 | |||
25 | int iommu_iova_cache_init(void) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
30 | sizeof(struct iova), | ||
31 | 0, | ||
32 | SLAB_HWCACHE_ALIGN, | ||
33 | NULL); | ||
34 | if (!iommu_iova_cache) { | ||
35 | pr_err("Couldn't create iova cache\n"); | ||
36 | ret = -ENOMEM; | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | void iommu_iova_cache_destroy(void) | ||
43 | { | ||
44 | kmem_cache_destroy(iommu_iova_cache); | ||
45 | } | ||
46 | |||
47 | struct iova *alloc_iova_mem(void) | ||
48 | { | ||
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
50 | } | ||
51 | |||
52 | void free_iova_mem(struct iova *iova) | ||
53 | { | ||
54 | kmem_cache_free(iommu_iova_cache, iova); | ||
55 | } | ||
21 | 56 | ||
22 | void | 57 | void |
23 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) | 58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | ||
24 | { | 60 | { |
61 | /* | ||
62 | * IOVA granularity will normally be equal to the smallest | ||
63 | * supported IOMMU page size; both *must* be capable of | ||
64 | * representing individual CPU pages exactly. | ||
65 | */ | ||
66 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); | ||
67 | |||
25 | spin_lock_init(&iovad->iova_rbtree_lock); | 68 | spin_lock_init(&iovad->iova_rbtree_lock); |
26 | iovad->rbroot = RB_ROOT; | 69 | iovad->rbroot = RB_ROOT; |
27 | iovad->cached32_node = NULL; | 70 | iovad->cached32_node = NULL; |
71 | iovad->granule = granule; | ||
72 | iovad->start_pfn = start_pfn; | ||
28 | iovad->dma_32bit_pfn = pfn_32bit; | 73 | iovad->dma_32bit_pfn = pfn_32bit; |
29 | } | 74 | } |
30 | 75 | ||
@@ -127,7 +172,7 @@ move_left: | |||
127 | if (!curr) { | 172 | if (!curr) { |
128 | if (size_aligned) | 173 | if (size_aligned) |
129 | pad_size = iova_get_pad_size(size, limit_pfn); | 174 | pad_size = iova_get_pad_size(size, limit_pfn); |
130 | if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { | 175 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
131 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 176 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
132 | return -ENOMEM; | 177 | return -ENOMEM; |
133 | } | 178 | } |
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
202 | * @size: - size of page frames to allocate | 247 | * @size: - size of page frames to allocate |
203 | * @limit_pfn: - max limit address | 248 | * @limit_pfn: - max limit address |
204 | * @size_aligned: - set if size_aligned address range is required | 249 | * @size_aligned: - set if size_aligned address range is required |
205 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN | 250 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
206 | * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned | 251 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned |
207 | * flag is set then the allocated address iova->pfn_lo will be naturally | 252 | * flag is set then the allocated address iova->pfn_lo will be naturally |
208 | * aligned on roundup_power_of_two(size). | 253 | * aligned on roundup_power_of_two(size). |
209 | */ | 254 | */ |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 748693192c20..10186cac7716 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/iommu.h> | 17 | #include <linux/iommu.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/platform_data/ipmmu-vmsa.h> | 19 | #include <linux/of.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/sizes.h> | 21 | #include <linux/sizes.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
@@ -24,12 +24,13 @@ | |||
24 | #include <asm/dma-iommu.h> | 24 | #include <asm/dma-iommu.h> |
25 | #include <asm/pgalloc.h> | 25 | #include <asm/pgalloc.h> |
26 | 26 | ||
27 | #include "io-pgtable.h" | ||
28 | |||
27 | struct ipmmu_vmsa_device { | 29 | struct ipmmu_vmsa_device { |
28 | struct device *dev; | 30 | struct device *dev; |
29 | void __iomem *base; | 31 | void __iomem *base; |
30 | struct list_head list; | 32 | struct list_head list; |
31 | 33 | ||
32 | const struct ipmmu_vmsa_platform_data *pdata; | ||
33 | unsigned int num_utlbs; | 34 | unsigned int num_utlbs; |
34 | 35 | ||
35 | struct dma_iommu_mapping *mapping; | 36 | struct dma_iommu_mapping *mapping; |
@@ -39,14 +40,17 @@ struct ipmmu_vmsa_domain { | |||
39 | struct ipmmu_vmsa_device *mmu; | 40 | struct ipmmu_vmsa_device *mmu; |
40 | struct iommu_domain *io_domain; | 41 | struct iommu_domain *io_domain; |
41 | 42 | ||
43 | struct io_pgtable_cfg cfg; | ||
44 | struct io_pgtable_ops *iop; | ||
45 | |||
42 | unsigned int context_id; | 46 | unsigned int context_id; |
43 | spinlock_t lock; /* Protects mappings */ | 47 | spinlock_t lock; /* Protects mappings */ |
44 | pgd_t *pgd; | ||
45 | }; | 48 | }; |
46 | 49 | ||
47 | struct ipmmu_vmsa_archdata { | 50 | struct ipmmu_vmsa_archdata { |
48 | struct ipmmu_vmsa_device *mmu; | 51 | struct ipmmu_vmsa_device *mmu; |
49 | unsigned int utlb; | 52 | unsigned int *utlbs; |
53 | unsigned int num_utlbs; | ||
50 | }; | 54 | }; |
51 | 55 | ||
52 | static DEFINE_SPINLOCK(ipmmu_devices_lock); | 56 | static DEFINE_SPINLOCK(ipmmu_devices_lock); |
@@ -58,6 +62,8 @@ static LIST_HEAD(ipmmu_devices); | |||
58 | * Registers Definition | 62 | * Registers Definition |
59 | */ | 63 | */ |
60 | 64 | ||
65 | #define IM_NS_ALIAS_OFFSET 0x800 | ||
66 | |||
61 | #define IM_CTX_SIZE 0x40 | 67 | #define IM_CTX_SIZE 0x40 |
62 | 68 | ||
63 | #define IMCTR 0x0000 | 69 | #define IMCTR 0x0000 |
@@ -171,52 +177,6 @@ static LIST_HEAD(ipmmu_devices); | |||
171 | #define IMUASID_ASID0_SHIFT 0 | 177 | #define IMUASID_ASID0_SHIFT 0 |
172 | 178 | ||
173 | /* ----------------------------------------------------------------------------- | 179 | /* ----------------------------------------------------------------------------- |
174 | * Page Table Bits | ||
175 | */ | ||
176 | |||
177 | /* | ||
178 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access, | ||
179 | * Long-descriptor format" that the NStable bit being set in a table descriptor | ||
180 | * will result in the NStable and NS bits of all child entries being ignored and | ||
181 | * considered as being set. The IPMMU seems not to comply with this, as it | ||
182 | * generates a secure access page fault if any of the NStable and NS bits isn't | ||
183 | * set when running in non-secure mode. | ||
184 | */ | ||
185 | #ifndef PMD_NSTABLE | ||
186 | #define PMD_NSTABLE (_AT(pmdval_t, 1) << 63) | ||
187 | #endif | ||
188 | |||
189 | #define ARM_VMSA_PTE_XN (((pteval_t)3) << 53) | ||
190 | #define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52) | ||
191 | #define ARM_VMSA_PTE_AF (((pteval_t)1) << 10) | ||
192 | #define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8) | ||
193 | #define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8) | ||
194 | #define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8) | ||
195 | #define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8) | ||
196 | #define ARM_VMSA_PTE_NS (((pteval_t)1) << 5) | ||
197 | #define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0) | ||
198 | |||
199 | /* Stage-1 PTE */ | ||
200 | #define ARM_VMSA_PTE_nG (((pteval_t)1) << 11) | ||
201 | #define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6) | ||
202 | #define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6) | ||
203 | #define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6) | ||
204 | #define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2) | ||
205 | #define ARM_VMSA_PTE_ATTRINDX_SHIFT 2 | ||
206 | |||
207 | #define ARM_VMSA_PTE_ATTRS_MASK \ | ||
208 | (ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \ | ||
209 | ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \ | ||
210 | ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK) | ||
211 | |||
212 | #define ARM_VMSA_PTE_CONT_ENTRIES 16 | ||
213 | #define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES) | ||
214 | |||
215 | #define IPMMU_PTRS_PER_PTE 512 | ||
216 | #define IPMMU_PTRS_PER_PMD 512 | ||
217 | #define IPMMU_PTRS_PER_PGD 4 | ||
218 | |||
219 | /* ----------------------------------------------------------------------------- | ||
220 | * Read/Write Access | 180 | * Read/Write Access |
221 | */ | 181 | */ |
222 | 182 | ||
@@ -305,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, | |||
305 | ipmmu_write(mmu, IMUCTR(utlb), 0); | 265 | ipmmu_write(mmu, IMUCTR(utlb), 0); |
306 | } | 266 | } |
307 | 267 | ||
308 | static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, | 268 | static void ipmmu_tlb_flush_all(void *cookie) |
309 | size_t size) | 269 | { |
270 | struct ipmmu_vmsa_domain *domain = cookie; | ||
271 | |||
272 | ipmmu_tlb_invalidate(domain); | ||
273 | } | ||
274 | |||
275 | static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, | ||
276 | void *cookie) | ||
310 | { | 277 | { |
311 | unsigned long offset = (unsigned long)addr & ~PAGE_MASK; | 278 | /* The hardware doesn't support selective TLB flush. */ |
279 | } | ||
280 | |||
281 | static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie) | ||
282 | { | ||
283 | unsigned long offset = (unsigned long)ptr & ~PAGE_MASK; | ||
284 | struct ipmmu_vmsa_domain *domain = cookie; | ||
312 | 285 | ||
313 | /* | 286 | /* |
314 | * TODO: Add support for coherent walk through CCI with DVM and remove | 287 | * TODO: Add support for coherent walk through CCI with DVM and remove |
315 | * cache handling. | 288 | * cache handling. |
316 | */ | 289 | */ |
317 | dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE); | 290 | dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size, |
291 | DMA_TO_DEVICE); | ||
318 | } | 292 | } |
319 | 293 | ||
294 | static struct iommu_gather_ops ipmmu_gather_ops = { | ||
295 | .tlb_flush_all = ipmmu_tlb_flush_all, | ||
296 | .tlb_add_flush = ipmmu_tlb_add_flush, | ||
297 | .tlb_sync = ipmmu_tlb_flush_all, | ||
298 | .flush_pgtable = ipmmu_flush_pgtable, | ||
299 | }; | ||
300 | |||
320 | /* ----------------------------------------------------------------------------- | 301 | /* ----------------------------------------------------------------------------- |
321 | * Domain/Context Management | 302 | * Domain/Context Management |
322 | */ | 303 | */ |
@@ -324,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr, | |||
324 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | 305 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
325 | { | 306 | { |
326 | phys_addr_t ttbr; | 307 | phys_addr_t ttbr; |
327 | u32 reg; | 308 | |
309 | /* | ||
310 | * Allocate the page table operations. | ||
311 | * | ||
312 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory | ||
313 | * access, Long-descriptor format" that the NStable bit being set in a | ||
314 | * table descriptor will result in the NStable and NS bits of all child | ||
315 | * entries being ignored and considered as being set. The IPMMU seems | ||
316 | * not to comply with this, as it generates a secure access page fault | ||
317 | * if any of the NStable and NS bits isn't set when running in | ||
318 | * non-secure mode. | ||
319 | */ | ||
320 | domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; | ||
321 | domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | ||
322 | domain->cfg.ias = 32; | ||
323 | domain->cfg.oas = 40; | ||
324 | domain->cfg.tlb = &ipmmu_gather_ops; | ||
325 | |||
326 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, | ||
327 | domain); | ||
328 | if (!domain->iop) | ||
329 | return -EINVAL; | ||
328 | 330 | ||
329 | /* | 331 | /* |
330 | * TODO: When adding support for multiple contexts, find an unused | 332 | * TODO: When adding support for multiple contexts, find an unused |
@@ -333,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
333 | domain->context_id = 0; | 335 | domain->context_id = 0; |
334 | 336 | ||
335 | /* TTBR0 */ | 337 | /* TTBR0 */ |
336 | ipmmu_flush_pgtable(domain->mmu, domain->pgd, | 338 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
337 | IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd)); | ||
338 | ttbr = __pa(domain->pgd); | ||
339 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); | 339 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); |
340 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); | 340 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); |
341 | 341 | ||
@@ -348,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
348 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | 348 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | |
349 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); | 349 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); |
350 | 350 | ||
351 | /* | 351 | /* MAIR0 */ |
352 | * MAIR0 | 352 | ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); |
353 | * We need three attributes only, non-cacheable, write-back read/write | ||
354 | * allocate and device memory. | ||
355 | */ | ||
356 | reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC)) | ||
357 | | (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA)) | ||
358 | | (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV)); | ||
359 | ipmmu_ctx_write(domain, IMMAIR0, reg); | ||
360 | 353 | ||
361 | /* IMBUSCR */ | 354 | /* IMBUSCR */ |
362 | ipmmu_ctx_write(domain, IMBUSCR, | 355 | ipmmu_ctx_write(domain, IMBUSCR, |
@@ -461,396 +454,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) | |||
461 | } | 454 | } |
462 | 455 | ||
463 | /* ----------------------------------------------------------------------------- | 456 | /* ----------------------------------------------------------------------------- |
464 | * Page Table Management | ||
465 | */ | ||
466 | |||
467 | #define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) | ||
468 | |||
469 | static void ipmmu_free_ptes(pmd_t *pmd) | ||
470 | { | ||
471 | pgtable_t table = pmd_pgtable(*pmd); | ||
472 | __free_page(table); | ||
473 | } | ||
474 | |||
475 | static void ipmmu_free_pmds(pud_t *pud) | ||
476 | { | ||
477 | pmd_t *pmd = pmd_offset(pud, 0); | ||
478 | pgtable_t table; | ||
479 | unsigned int i; | ||
480 | |||
481 | for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { | ||
482 | if (!pmd_table(*pmd)) | ||
483 | continue; | ||
484 | |||
485 | ipmmu_free_ptes(pmd); | ||
486 | pmd++; | ||
487 | } | ||
488 | |||
489 | table = pud_pgtable(*pud); | ||
490 | __free_page(table); | ||
491 | } | ||
492 | |||
493 | static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain) | ||
494 | { | ||
495 | pgd_t *pgd, *pgd_base = domain->pgd; | ||
496 | unsigned int i; | ||
497 | |||
498 | /* | ||
499 | * Recursively free the page tables for this domain. We don't care about | ||
500 | * speculative TLB filling, because the TLB will be nuked next time this | ||
501 | * context bank is re-allocated and no devices currently map to these | ||
502 | * tables. | ||
503 | */ | ||
504 | pgd = pgd_base; | ||
505 | for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) { | ||
506 | if (pgd_none(*pgd)) | ||
507 | continue; | ||
508 | ipmmu_free_pmds((pud_t *)pgd); | ||
509 | pgd++; | ||
510 | } | ||
511 | |||
512 | kfree(pgd_base); | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte) | ||
517 | * functions as they would flush the CPU TLB. | ||
518 | */ | ||
519 | |||
520 | static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, | ||
521 | unsigned long iova) | ||
522 | { | ||
523 | pte_t *pte; | ||
524 | |||
525 | if (!pmd_none(*pmd)) | ||
526 | return pte_offset_kernel(pmd, iova); | ||
527 | |||
528 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | ||
529 | if (!pte) | ||
530 | return NULL; | ||
531 | |||
532 | ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE); | ||
533 | *pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE); | ||
534 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
535 | |||
536 | return pte + pte_index(iova); | ||
537 | } | ||
538 | |||
539 | static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, | ||
540 | unsigned long iova) | ||
541 | { | ||
542 | pud_t *pud = (pud_t *)pgd; | ||
543 | pmd_t *pmd; | ||
544 | |||
545 | if (!pud_none(*pud)) | ||
546 | return pmd_offset(pud, iova); | ||
547 | |||
548 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | ||
549 | if (!pmd) | ||
550 | return NULL; | ||
551 | |||
552 | ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE); | ||
553 | *pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE); | ||
554 | ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); | ||
555 | |||
556 | return pmd + pmd_index(iova); | ||
557 | } | ||
558 | |||
559 | static u64 ipmmu_page_prot(unsigned int prot, u64 type) | ||
560 | { | ||
561 | u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF | ||
562 | | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV | ||
563 | | ARM_VMSA_PTE_NS | type; | ||
564 | |||
565 | if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) | ||
566 | pgprot |= ARM_VMSA_PTE_AP_RDONLY; | ||
567 | |||
568 | if (prot & IOMMU_CACHE) | ||
569 | pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; | ||
570 | |||
571 | if (prot & IOMMU_NOEXEC) | ||
572 | pgprot |= ARM_VMSA_PTE_XN; | ||
573 | else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | ||
574 | /* If no access create a faulting entry to avoid TLB fills. */ | ||
575 | pgprot &= ~ARM_VMSA_PTE_PAGE; | ||
576 | |||
577 | return pgprot; | ||
578 | } | ||
579 | |||
580 | static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, | ||
581 | unsigned long iova, unsigned long pfn, | ||
582 | size_t size, int prot) | ||
583 | { | ||
584 | pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE); | ||
585 | unsigned int num_ptes = 1; | ||
586 | pte_t *pte, *start; | ||
587 | unsigned int i; | ||
588 | |||
589 | pte = ipmmu_alloc_pte(mmu, pmd, iova); | ||
590 | if (!pte) | ||
591 | return -ENOMEM; | ||
592 | |||
593 | start = pte; | ||
594 | |||
595 | /* | ||
596 | * Install the page table entries. We can be called both for a single | ||
597 | * page or for a block of 16 physically contiguous pages. In the latter | ||
598 | * case set the PTE contiguous hint. | ||
599 | */ | ||
600 | if (size == SZ_64K) { | ||
601 | pteval |= ARM_VMSA_PTE_CONT; | ||
602 | num_ptes = ARM_VMSA_PTE_CONT_ENTRIES; | ||
603 | } | ||
604 | |||
605 | for (i = num_ptes; i; --i) | ||
606 | *pte++ = pfn_pte(pfn++, __pgprot(pteval)); | ||
607 | |||
608 | ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes); | ||
609 | |||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd, | ||
614 | unsigned long iova, unsigned long pfn, | ||
615 | int prot) | ||
616 | { | ||
617 | pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT); | ||
618 | |||
619 | *pmd = pfn_pmd(pfn, __pgprot(pmdval)); | ||
620 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain, | ||
626 | unsigned long iova, phys_addr_t paddr, | ||
627 | size_t size, int prot) | ||
628 | { | ||
629 | struct ipmmu_vmsa_device *mmu = domain->mmu; | ||
630 | pgd_t *pgd = domain->pgd; | ||
631 | unsigned long flags; | ||
632 | unsigned long pfn; | ||
633 | pmd_t *pmd; | ||
634 | int ret; | ||
635 | |||
636 | if (!pgd) | ||
637 | return -EINVAL; | ||
638 | |||
639 | if (size & ~PAGE_MASK) | ||
640 | return -EINVAL; | ||
641 | |||
642 | if (paddr & ~((1ULL << 40) - 1)) | ||
643 | return -ERANGE; | ||
644 | |||
645 | pfn = __phys_to_pfn(paddr); | ||
646 | pgd += pgd_index(iova); | ||
647 | |||
648 | /* Update the page tables. */ | ||
649 | spin_lock_irqsave(&domain->lock, flags); | ||
650 | |||
651 | pmd = ipmmu_alloc_pmd(mmu, pgd, iova); | ||
652 | if (!pmd) { | ||
653 | ret = -ENOMEM; | ||
654 | goto done; | ||
655 | } | ||
656 | |||
657 | switch (size) { | ||
658 | case SZ_2M: | ||
659 | ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot); | ||
660 | break; | ||
661 | case SZ_64K: | ||
662 | case SZ_4K: | ||
663 | ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot); | ||
664 | break; | ||
665 | default: | ||
666 | ret = -EINVAL; | ||
667 | break; | ||
668 | } | ||
669 | |||
670 | done: | ||
671 | spin_unlock_irqrestore(&domain->lock, flags); | ||
672 | |||
673 | if (!ret) | ||
674 | ipmmu_tlb_invalidate(domain); | ||
675 | |||
676 | return ret; | ||
677 | } | ||
678 | |||
679 | static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud) | ||
680 | { | ||
681 | /* Free the page table. */ | ||
682 | pgtable_t table = pud_pgtable(*pud); | ||
683 | __free_page(table); | ||
684 | |||
685 | /* Clear the PUD. */ | ||
686 | *pud = __pud(0); | ||
687 | ipmmu_flush_pgtable(mmu, pud, sizeof(*pud)); | ||
688 | } | ||
689 | |||
690 | static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud, | ||
691 | pmd_t *pmd) | ||
692 | { | ||
693 | unsigned int i; | ||
694 | |||
695 | /* Free the page table. */ | ||
696 | if (pmd_table(*pmd)) { | ||
697 | pgtable_t table = pmd_pgtable(*pmd); | ||
698 | __free_page(table); | ||
699 | } | ||
700 | |||
701 | /* Clear the PMD. */ | ||
702 | *pmd = __pmd(0); | ||
703 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
704 | |||
705 | /* Check whether the PUD is still needed. */ | ||
706 | pmd = pmd_offset(pud, 0); | ||
707 | for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) { | ||
708 | if (!pmd_none(pmd[i])) | ||
709 | return; | ||
710 | } | ||
711 | |||
712 | /* Clear the parent PUD. */ | ||
713 | ipmmu_clear_pud(mmu, pud); | ||
714 | } | ||
715 | |||
716 | static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud, | ||
717 | pmd_t *pmd, pte_t *pte, unsigned int num_ptes) | ||
718 | { | ||
719 | unsigned int i; | ||
720 | |||
721 | /* Clear the PTE. */ | ||
722 | for (i = num_ptes; i; --i) | ||
723 | pte[i-1] = __pte(0); | ||
724 | |||
725 | ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes); | ||
726 | |||
727 | /* Check whether the PMD is still needed. */ | ||
728 | pte = pte_offset_kernel(pmd, 0); | ||
729 | for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) { | ||
730 | if (!pte_none(pte[i])) | ||
731 | return; | ||
732 | } | ||
733 | |||
734 | /* Clear the parent PMD. */ | ||
735 | ipmmu_clear_pmd(mmu, pud, pmd); | ||
736 | } | ||
737 | |||
738 | static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd) | ||
739 | { | ||
740 | pte_t *pte, *start; | ||
741 | pteval_t pteval; | ||
742 | unsigned long pfn; | ||
743 | unsigned int i; | ||
744 | |||
745 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | ||
746 | if (!pte) | ||
747 | return -ENOMEM; | ||
748 | |||
749 | /* Copy the PMD attributes. */ | ||
750 | pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK) | ||
751 | | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE; | ||
752 | |||
753 | pfn = pmd_pfn(*pmd); | ||
754 | start = pte; | ||
755 | |||
756 | for (i = IPMMU_PTRS_PER_PTE; i; --i) | ||
757 | *pte++ = pfn_pte(pfn++, __pgprot(pteval)); | ||
758 | |||
759 | ipmmu_flush_pgtable(mmu, start, PAGE_SIZE); | ||
760 | *pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE); | ||
761 | ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd)); | ||
762 | |||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte) | ||
767 | { | ||
768 | unsigned int i; | ||
769 | |||
770 | for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i) | ||
771 | pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT); | ||
772 | |||
773 | ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES); | ||
774 | } | ||
775 | |||
776 | static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain, | ||
777 | unsigned long iova, size_t size) | ||
778 | { | ||
779 | struct ipmmu_vmsa_device *mmu = domain->mmu; | ||
780 | unsigned long flags; | ||
781 | pgd_t *pgd = domain->pgd; | ||
782 | pud_t *pud; | ||
783 | pmd_t *pmd; | ||
784 | pte_t *pte; | ||
785 | int ret = 0; | ||
786 | |||
787 | if (!pgd) | ||
788 | return -EINVAL; | ||
789 | |||
790 | if (size & ~PAGE_MASK) | ||
791 | return -EINVAL; | ||
792 | |||
793 | pgd += pgd_index(iova); | ||
794 | pud = (pud_t *)pgd; | ||
795 | |||
796 | spin_lock_irqsave(&domain->lock, flags); | ||
797 | |||
798 | /* If there's no PUD or PMD we're done. */ | ||
799 | if (pud_none(*pud)) | ||
800 | goto done; | ||
801 | |||
802 | pmd = pmd_offset(pud, iova); | ||
803 | if (pmd_none(*pmd)) | ||
804 | goto done; | ||
805 | |||
806 | /* | ||
807 | * When freeing a 2MB block just clear the PMD. In the unlikely case the | ||
808 | * block is mapped as individual pages this will free the corresponding | ||
809 | * PTE page table. | ||
810 | */ | ||
811 | if (size == SZ_2M) { | ||
812 | ipmmu_clear_pmd(mmu, pud, pmd); | ||
813 | goto done; | ||
814 | } | ||
815 | |||
816 | /* | ||
817 | * If the PMD has been mapped as a section remap it as pages to allow | ||
818 | * freeing individual pages. | ||
819 | */ | ||
820 | if (pmd_sect(*pmd)) | ||
821 | ipmmu_split_pmd(mmu, pmd); | ||
822 | |||
823 | pte = pte_offset_kernel(pmd, iova); | ||
824 | |||
825 | /* | ||
826 | * When freeing a 64kB block just clear the PTE entries. We don't have | ||
827 | * to care about the contiguous hint of the surrounding entries. | ||
828 | */ | ||
829 | if (size == SZ_64K) { | ||
830 | ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES); | ||
831 | goto done; | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * If the PTE has been mapped with the contiguous hint set remap it and | ||
836 | * its surrounding PTEs to allow unmapping a single page. | ||
837 | */ | ||
838 | if (pte_val(*pte) & ARM_VMSA_PTE_CONT) | ||
839 | ipmmu_split_pte(mmu, pte); | ||
840 | |||
841 | /* Clear the PTE. */ | ||
842 | ipmmu_clear_pte(mmu, pud, pmd, pte, 1); | ||
843 | |||
844 | done: | ||
845 | spin_unlock_irqrestore(&domain->lock, flags); | ||
846 | |||
847 | if (ret) | ||
848 | ipmmu_tlb_invalidate(domain); | ||
849 | |||
850 | return 0; | ||
851 | } | ||
852 | |||
853 | /* ----------------------------------------------------------------------------- | ||
854 | * IOMMU Operations | 457 | * IOMMU Operations |
855 | */ | 458 | */ |
856 | 459 | ||
@@ -864,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain) | |||
864 | 467 | ||
865 | spin_lock_init(&domain->lock); | 468 | spin_lock_init(&domain->lock); |
866 | 469 | ||
867 | domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); | ||
868 | if (!domain->pgd) { | ||
869 | kfree(domain); | ||
870 | return -ENOMEM; | ||
871 | } | ||
872 | |||
873 | io_domain->priv = domain; | 470 | io_domain->priv = domain; |
874 | domain->io_domain = io_domain; | 471 | domain->io_domain = io_domain; |
875 | 472 | ||
@@ -885,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain) | |||
885 | * been detached. | 482 | * been detached. |
886 | */ | 483 | */ |
887 | ipmmu_domain_destroy_context(domain); | 484 | ipmmu_domain_destroy_context(domain); |
888 | ipmmu_free_pgtables(domain); | 485 | free_io_pgtable_ops(domain->iop); |
889 | kfree(domain); | 486 | kfree(domain); |
890 | } | 487 | } |
891 | 488 | ||
@@ -896,6 +493,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, | |||
896 | struct ipmmu_vmsa_device *mmu = archdata->mmu; | 493 | struct ipmmu_vmsa_device *mmu = archdata->mmu; |
897 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 494 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
898 | unsigned long flags; | 495 | unsigned long flags; |
496 | unsigned int i; | ||
899 | int ret = 0; | 497 | int ret = 0; |
900 | 498 | ||
901 | if (!mmu) { | 499 | if (!mmu) { |
@@ -924,7 +522,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, | |||
924 | if (ret < 0) | 522 | if (ret < 0) |
925 | return ret; | 523 | return ret; |
926 | 524 | ||
927 | ipmmu_utlb_enable(domain, archdata->utlb); | 525 | for (i = 0; i < archdata->num_utlbs; ++i) |
526 | ipmmu_utlb_enable(domain, archdata->utlbs[i]); | ||
928 | 527 | ||
929 | return 0; | 528 | return 0; |
930 | } | 529 | } |
@@ -934,8 +533,10 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, | |||
934 | { | 533 | { |
935 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; | 534 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
936 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 535 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
536 | unsigned int i; | ||
937 | 537 | ||
938 | ipmmu_utlb_disable(domain, archdata->utlb); | 538 | for (i = 0; i < archdata->num_utlbs; ++i) |
539 | ipmmu_utlb_disable(domain, archdata->utlbs[i]); | ||
939 | 540 | ||
940 | /* | 541 | /* |
941 | * TODO: Optimize by disabling the context when no device is attached. | 542 | * TODO: Optimize by disabling the context when no device is attached. |
@@ -950,76 +551,61 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | |||
950 | if (!domain) | 551 | if (!domain) |
951 | return -ENODEV; | 552 | return -ENODEV; |
952 | 553 | ||
953 | return ipmmu_create_mapping(domain, iova, paddr, size, prot); | 554 | return domain->iop->map(domain->iop, iova, paddr, size, prot); |
954 | } | 555 | } |
955 | 556 | ||
956 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | 557 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, |
957 | size_t size) | 558 | size_t size) |
958 | { | 559 | { |
959 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 560 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
960 | int ret; | ||
961 | 561 | ||
962 | ret = ipmmu_clear_mapping(domain, iova, size); | 562 | return domain->iop->unmap(domain->iop, iova, size); |
963 | return ret ? 0 : size; | ||
964 | } | 563 | } |
965 | 564 | ||
966 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | 565 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, |
967 | dma_addr_t iova) | 566 | dma_addr_t iova) |
968 | { | 567 | { |
969 | struct ipmmu_vmsa_domain *domain = io_domain->priv; | 568 | struct ipmmu_vmsa_domain *domain = io_domain->priv; |
970 | pgd_t pgd; | ||
971 | pud_t pud; | ||
972 | pmd_t pmd; | ||
973 | pte_t pte; | ||
974 | 569 | ||
975 | /* TODO: Is locking needed ? */ | 570 | /* TODO: Is locking needed ? */ |
976 | 571 | ||
977 | if (!domain->pgd) | 572 | return domain->iop->iova_to_phys(domain->iop, iova); |
978 | return 0; | 573 | } |
979 | |||
980 | pgd = *(domain->pgd + pgd_index(iova)); | ||
981 | if (pgd_none(pgd)) | ||
982 | return 0; | ||
983 | |||
984 | pud = *pud_offset(&pgd, iova); | ||
985 | if (pud_none(pud)) | ||
986 | return 0; | ||
987 | 574 | ||
988 | pmd = *pmd_offset(&pud, iova); | 575 | static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, |
989 | if (pmd_none(pmd)) | 576 | unsigned int *utlbs, unsigned int num_utlbs) |
990 | return 0; | 577 | { |
578 | unsigned int i; | ||
991 | 579 | ||
992 | if (pmd_sect(pmd)) | 580 | for (i = 0; i < num_utlbs; ++i) { |
993 | return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK); | 581 | struct of_phandle_args args; |
582 | int ret; | ||
994 | 583 | ||
995 | pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); | 584 | ret = of_parse_phandle_with_args(dev->of_node, "iommus", |
996 | if (pte_none(pte)) | 585 | "#iommu-cells", i, &args); |
997 | return 0; | 586 | if (ret < 0) |
587 | return ret; | ||
998 | 588 | ||
999 | return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); | 589 | of_node_put(args.np); |
1000 | } | ||
1001 | 590 | ||
1002 | static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev) | 591 | if (args.np != mmu->dev->of_node || args.args_count != 1) |
1003 | { | 592 | return -EINVAL; |
1004 | const struct ipmmu_vmsa_master *master = mmu->pdata->masters; | ||
1005 | const char *devname = dev_name(dev); | ||
1006 | unsigned int i; | ||
1007 | 593 | ||
1008 | for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) { | 594 | utlbs[i] = args.args[0]; |
1009 | if (strcmp(master->name, devname) == 0) | ||
1010 | return master->utlb; | ||
1011 | } | 595 | } |
1012 | 596 | ||
1013 | return -1; | 597 | return 0; |
1014 | } | 598 | } |
1015 | 599 | ||
1016 | static int ipmmu_add_device(struct device *dev) | 600 | static int ipmmu_add_device(struct device *dev) |
1017 | { | 601 | { |
1018 | struct ipmmu_vmsa_archdata *archdata; | 602 | struct ipmmu_vmsa_archdata *archdata; |
1019 | struct ipmmu_vmsa_device *mmu; | 603 | struct ipmmu_vmsa_device *mmu; |
1020 | struct iommu_group *group; | 604 | struct iommu_group *group = NULL; |
1021 | int utlb = -1; | 605 | unsigned int *utlbs; |
1022 | int ret; | 606 | unsigned int i; |
607 | int num_utlbs; | ||
608 | int ret = -ENODEV; | ||
1023 | 609 | ||
1024 | if (dev->archdata.iommu) { | 610 | if (dev->archdata.iommu) { |
1025 | dev_warn(dev, "IOMMU driver already assigned to device %s\n", | 611 | dev_warn(dev, "IOMMU driver already assigned to device %s\n", |
@@ -1028,11 +614,21 @@ static int ipmmu_add_device(struct device *dev) | |||
1028 | } | 614 | } |
1029 | 615 | ||
1030 | /* Find the master corresponding to the device. */ | 616 | /* Find the master corresponding to the device. */ |
617 | |||
618 | num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", | ||
619 | "#iommu-cells"); | ||
620 | if (num_utlbs < 0) | ||
621 | return -ENODEV; | ||
622 | |||
623 | utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); | ||
624 | if (!utlbs) | ||
625 | return -ENOMEM; | ||
626 | |||
1031 | spin_lock(&ipmmu_devices_lock); | 627 | spin_lock(&ipmmu_devices_lock); |
1032 | 628 | ||
1033 | list_for_each_entry(mmu, &ipmmu_devices, list) { | 629 | list_for_each_entry(mmu, &ipmmu_devices, list) { |
1034 | utlb = ipmmu_find_utlb(mmu, dev); | 630 | ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs); |
1035 | if (utlb >= 0) { | 631 | if (!ret) { |
1036 | /* | 632 | /* |
1037 | * TODO Take a reference to the MMU to protect | 633 | * TODO Take a reference to the MMU to protect |
1038 | * against device removal. | 634 | * against device removal. |
@@ -1043,17 +639,22 @@ static int ipmmu_add_device(struct device *dev) | |||
1043 | 639 | ||
1044 | spin_unlock(&ipmmu_devices_lock); | 640 | spin_unlock(&ipmmu_devices_lock); |
1045 | 641 | ||
1046 | if (utlb < 0) | 642 | if (ret < 0) |
1047 | return -ENODEV; | 643 | return -ENODEV; |
1048 | 644 | ||
1049 | if (utlb >= mmu->num_utlbs) | 645 | for (i = 0; i < num_utlbs; ++i) { |
1050 | return -EINVAL; | 646 | if (utlbs[i] >= mmu->num_utlbs) { |
647 | ret = -EINVAL; | ||
648 | goto error; | ||
649 | } | ||
650 | } | ||
1051 | 651 | ||
1052 | /* Create a device group and add the device to it. */ | 652 | /* Create a device group and add the device to it. */ |
1053 | group = iommu_group_alloc(); | 653 | group = iommu_group_alloc(); |
1054 | if (IS_ERR(group)) { | 654 | if (IS_ERR(group)) { |
1055 | dev_err(dev, "Failed to allocate IOMMU group\n"); | 655 | dev_err(dev, "Failed to allocate IOMMU group\n"); |
1056 | return PTR_ERR(group); | 656 | ret = PTR_ERR(group); |
657 | goto error; | ||
1057 | } | 658 | } |
1058 | 659 | ||
1059 | ret = iommu_group_add_device(group, dev); | 660 | ret = iommu_group_add_device(group, dev); |
@@ -1061,7 +662,8 @@ static int ipmmu_add_device(struct device *dev) | |||
1061 | 662 | ||
1062 | if (ret < 0) { | 663 | if (ret < 0) { |
1063 | dev_err(dev, "Failed to add device to IPMMU group\n"); | 664 | dev_err(dev, "Failed to add device to IPMMU group\n"); |
1064 | return ret; | 665 | group = NULL; |
666 | goto error; | ||
1065 | } | 667 | } |
1066 | 668 | ||
1067 | archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); | 669 | archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); |
@@ -1071,7 +673,8 @@ static int ipmmu_add_device(struct device *dev) | |||
1071 | } | 673 | } |
1072 | 674 | ||
1073 | archdata->mmu = mmu; | 675 | archdata->mmu = mmu; |
1074 | archdata->utlb = utlb; | 676 | archdata->utlbs = utlbs; |
677 | archdata->num_utlbs = num_utlbs; | ||
1075 | dev->archdata.iommu = archdata; | 678 | dev->archdata.iommu = archdata; |
1076 | 679 | ||
1077 | /* | 680 | /* |
@@ -1090,7 +693,8 @@ static int ipmmu_add_device(struct device *dev) | |||
1090 | SZ_1G, SZ_2G); | 693 | SZ_1G, SZ_2G); |
1091 | if (IS_ERR(mapping)) { | 694 | if (IS_ERR(mapping)) { |
1092 | dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); | 695 | dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); |
1093 | return PTR_ERR(mapping); | 696 | ret = PTR_ERR(mapping); |
697 | goto error; | ||
1094 | } | 698 | } |
1095 | 699 | ||
1096 | mmu->mapping = mapping; | 700 | mmu->mapping = mapping; |
@@ -1106,17 +710,29 @@ static int ipmmu_add_device(struct device *dev) | |||
1106 | return 0; | 710 | return 0; |
1107 | 711 | ||
1108 | error: | 712 | error: |
713 | arm_iommu_release_mapping(mmu->mapping); | ||
714 | |||
1109 | kfree(dev->archdata.iommu); | 715 | kfree(dev->archdata.iommu); |
716 | kfree(utlbs); | ||
717 | |||
1110 | dev->archdata.iommu = NULL; | 718 | dev->archdata.iommu = NULL; |
1111 | iommu_group_remove_device(dev); | 719 | |
720 | if (!IS_ERR_OR_NULL(group)) | ||
721 | iommu_group_remove_device(dev); | ||
722 | |||
1112 | return ret; | 723 | return ret; |
1113 | } | 724 | } |
1114 | 725 | ||
1115 | static void ipmmu_remove_device(struct device *dev) | 726 | static void ipmmu_remove_device(struct device *dev) |
1116 | { | 727 | { |
728 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; | ||
729 | |||
1117 | arm_iommu_detach_device(dev); | 730 | arm_iommu_detach_device(dev); |
1118 | iommu_group_remove_device(dev); | 731 | iommu_group_remove_device(dev); |
1119 | kfree(dev->archdata.iommu); | 732 | |
733 | kfree(archdata->utlbs); | ||
734 | kfree(archdata); | ||
735 | |||
1120 | dev->archdata.iommu = NULL; | 736 | dev->archdata.iommu = NULL; |
1121 | } | 737 | } |
1122 | 738 | ||
@@ -1131,7 +747,7 @@ static const struct iommu_ops ipmmu_ops = { | |||
1131 | .iova_to_phys = ipmmu_iova_to_phys, | 747 | .iova_to_phys = ipmmu_iova_to_phys, |
1132 | .add_device = ipmmu_add_device, | 748 | .add_device = ipmmu_add_device, |
1133 | .remove_device = ipmmu_remove_device, | 749 | .remove_device = ipmmu_remove_device, |
1134 | .pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K, | 750 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
1135 | }; | 751 | }; |
1136 | 752 | ||
1137 | /* ----------------------------------------------------------------------------- | 753 | /* ----------------------------------------------------------------------------- |
@@ -1154,7 +770,7 @@ static int ipmmu_probe(struct platform_device *pdev) | |||
1154 | int irq; | 770 | int irq; |
1155 | int ret; | 771 | int ret; |
1156 | 772 | ||
1157 | if (!pdev->dev.platform_data) { | 773 | if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) { |
1158 | dev_err(&pdev->dev, "missing platform data\n"); | 774 | dev_err(&pdev->dev, "missing platform data\n"); |
1159 | return -EINVAL; | 775 | return -EINVAL; |
1160 | } | 776 | } |
@@ -1166,7 +782,6 @@ static int ipmmu_probe(struct platform_device *pdev) | |||
1166 | } | 782 | } |
1167 | 783 | ||
1168 | mmu->dev = &pdev->dev; | 784 | mmu->dev = &pdev->dev; |
1169 | mmu->pdata = pdev->dev.platform_data; | ||
1170 | mmu->num_utlbs = 32; | 785 | mmu->num_utlbs = 32; |
1171 | 786 | ||
1172 | /* Map I/O memory and request IRQ. */ | 787 | /* Map I/O memory and request IRQ. */ |
@@ -1175,6 +790,20 @@ static int ipmmu_probe(struct platform_device *pdev) | |||
1175 | if (IS_ERR(mmu->base)) | 790 | if (IS_ERR(mmu->base)) |
1176 | return PTR_ERR(mmu->base); | 791 | return PTR_ERR(mmu->base); |
1177 | 792 | ||
793 | /* | ||
794 | * The IPMMU has two register banks, for secure and non-secure modes. | ||
795 | * The bank mapped at the beginning of the IPMMU address space | ||
796 | * corresponds to the running mode of the CPU. When running in secure | ||
797 | * mode the non-secure register bank is also available at an offset. | ||
798 | * | ||
799 | * Secure mode operation isn't clearly documented and is thus currently | ||
800 | * not implemented in the driver. Furthermore, preliminary tests of | ||
801 | * non-secure operation with the main register bank were not successful. | ||
802 | * Offset the registers base unconditionally to point to the non-secure | ||
803 | * alias space for now. | ||
804 | */ | ||
805 | mmu->base += IM_NS_ALIAS_OFFSET; | ||
806 | |||
1178 | irq = platform_get_irq(pdev, 0); | 807 | irq = platform_get_irq(pdev, 0); |
1179 | if (irq < 0) { | 808 | if (irq < 0) { |
1180 | dev_err(&pdev->dev, "no IRQ found\n"); | 809 | dev_err(&pdev->dev, "no IRQ found\n"); |
@@ -1220,9 +849,14 @@ static int ipmmu_remove(struct platform_device *pdev) | |||
1220 | return 0; | 849 | return 0; |
1221 | } | 850 | } |
1222 | 851 | ||
852 | static const struct of_device_id ipmmu_of_ids[] = { | ||
853 | { .compatible = "renesas,ipmmu-vmsa", }, | ||
854 | }; | ||
855 | |||
1223 | static struct platform_driver ipmmu_driver = { | 856 | static struct platform_driver ipmmu_driver = { |
1224 | .driver = { | 857 | .driver = { |
1225 | .name = "ipmmu-vmsa", | 858 | .name = "ipmmu-vmsa", |
859 | .of_match_table = of_match_ptr(ipmmu_of_ids), | ||
1226 | }, | 860 | }, |
1227 | .probe = ipmmu_probe, | 861 | .probe = ipmmu_probe, |
1228 | .remove = ipmmu_remove, | 862 | .remove = ipmmu_remove, |
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index fde250f86e60..a2b750110bd1 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2012 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index f722a0c466cf..c48da057dbb1 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = { | |||
315 | .attach_dev = gart_iommu_attach_dev, | 315 | .attach_dev = gart_iommu_attach_dev, |
316 | .detach_dev = gart_iommu_detach_dev, | 316 | .detach_dev = gart_iommu_detach_dev, |
317 | .map = gart_iommu_map, | 317 | .map = gart_iommu_map, |
318 | .map_sg = default_iommu_map_sg, | ||
318 | .unmap = gart_iommu_unmap, | 319 | .unmap = gart_iommu_unmap, |
319 | .iova_to_phys = gart_iommu_iova_to_phys, | 320 | .iova_to_phys = gart_iommu_iova_to_phys, |
320 | .pgsize_bitmap = GART_IOMMU_PGSIZES, | 321 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev) | |||
395 | do_gart_setup(gart, NULL); | 396 | do_gart_setup(gart, NULL); |
396 | 397 | ||
397 | gart_handle = gart; | 398 | gart_handle = gart; |
398 | bus_set_iommu(&platform_bus_type, &gart_iommu_ops); | 399 | |
399 | return 0; | 400 | return 0; |
400 | } | 401 | } |
401 | 402 | ||
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 21b156242e42..c1c010498a21 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev, | |||
683 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | 683 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
684 | if (!cmd) { | 684 | if (!cmd) { |
685 | DMERR("could not allocate metadata struct"); | 685 | DMERR("could not allocate metadata struct"); |
686 | return NULL; | 686 | return ERR_PTR(-ENOMEM); |
687 | } | 687 | } |
688 | 688 | ||
689 | atomic_set(&cmd->ref_count, 1); | 689 | atomic_set(&cmd->ref_count, 1); |
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, | |||
745 | return cmd; | 745 | return cmd; |
746 | 746 | ||
747 | cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); | 747 | cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); |
748 | if (cmd) { | 748 | if (!IS_ERR(cmd)) { |
749 | mutex_lock(&table_lock); | 749 | mutex_lock(&table_lock); |
750 | cmd2 = lookup(bdev); | 750 | cmd2 = lookup(bdev); |
751 | if (cmd2) { | 751 | if (cmd2) { |
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, | |||
780 | { | 780 | { |
781 | struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, | 781 | struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, |
782 | may_format_device, policy_hint_size); | 782 | may_format_device, policy_hint_size); |
783 | if (cmd && !same_params(cmd, data_block_size)) { | 783 | |
784 | if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { | ||
784 | dm_cache_metadata_close(cmd); | 785 | dm_cache_metadata_close(cmd); |
785 | return NULL; | 786 | return ERR_PTR(-EINVAL); |
786 | } | 787 | } |
787 | 788 | ||
788 | return cmd; | 789 | return cmd; |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 493478989dbd..07705ee181e3 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) | |||
3385 | struct pool_c *pt = ti->private; | 3385 | struct pool_c *pt = ti->private; |
3386 | struct pool *pool = pt->pool; | 3386 | struct pool *pool = pt->pool; |
3387 | 3387 | ||
3388 | if (get_pool_mode(pool) >= PM_READ_ONLY) { | ||
3389 | DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", | ||
3390 | dm_device_name(pool->pool_md)); | ||
3391 | return -EINVAL; | ||
3392 | } | ||
3393 | |||
3388 | if (!strcasecmp(argv[0], "create_thin")) | 3394 | if (!strcasecmp(argv[0], "create_thin")) |
3389 | r = process_create_thin_mesg(argc, argv, pool); | 3395 | r = process_create_thin_mesg(argc, argv, pool); |
3390 | 3396 | ||
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index f94a9fa60488..c672c4dcffac 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev) | |||
615 | 615 | ||
616 | c_can_irq_control(priv, false); | 616 | c_can_irq_control(priv, false); |
617 | 617 | ||
618 | /* put ctrl to init on stop to end ongoing transmission */ | ||
619 | priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); | ||
620 | |||
618 | /* deactivate pins */ | 621 | /* deactivate pins */ |
619 | pinctrl_pm_select_sleep_state(dev->dev.parent); | 622 | pinctrl_pm_select_sleep_state(dev->dev.parent); |
620 | priv->can.state = CAN_STATE_STOPPED; | 623 | priv->can.state = CAN_STATE_STOPPED; |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index c32cd61073bc..7af379ca861b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
587 | usb_sndbulkpipe(dev->udev, | 587 | usb_sndbulkpipe(dev->udev, |
588 | dev->bulk_out->bEndpointAddress), | 588 | dev->bulk_out->bEndpointAddress), |
589 | buf, msg->len, | 589 | buf, msg->len, |
590 | kvaser_usb_simple_msg_callback, priv); | 590 | kvaser_usb_simple_msg_callback, netdev); |
591 | usb_anchor_urb(urb, &priv->tx_submitted); | 591 | usb_anchor_urb(urb, &priv->tx_submitted); |
592 | 592 | ||
593 | err = usb_submit_urb(urb, GFP_ATOMIC); | 593 | err = usb_submit_urb(urb, GFP_ATOMIC); |
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
662 | priv = dev->nets[channel]; | 662 | priv = dev->nets[channel]; |
663 | stats = &priv->netdev->stats; | 663 | stats = &priv->netdev->stats; |
664 | 664 | ||
665 | if (status & M16C_STATE_BUS_RESET) { | ||
666 | kvaser_usb_unlink_tx_urbs(priv); | ||
667 | return; | ||
668 | } | ||
669 | |||
670 | skb = alloc_can_err_skb(priv->netdev, &cf); | 665 | skb = alloc_can_err_skb(priv->netdev, &cf); |
671 | if (!skb) { | 666 | if (!skb) { |
672 | stats->rx_dropped++; | 667 | stats->rx_dropped++; |
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
677 | 672 | ||
678 | netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); | 673 | netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); |
679 | 674 | ||
680 | if (status & M16C_STATE_BUS_OFF) { | 675 | if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { |
681 | cf->can_id |= CAN_ERR_BUSOFF; | 676 | cf->can_id |= CAN_ERR_BUSOFF; |
682 | 677 | ||
683 | priv->can.can_stats.bus_off++; | 678 | priv->can.can_stats.bus_off++; |
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
703 | } | 698 | } |
704 | 699 | ||
705 | new_state = CAN_STATE_ERROR_PASSIVE; | 700 | new_state = CAN_STATE_ERROR_PASSIVE; |
706 | } | 701 | } else if (status & M16C_STATE_BUS_ERROR) { |
707 | |||
708 | if (status == M16C_STATE_BUS_ERROR) { | ||
709 | if ((priv->can.state < CAN_STATE_ERROR_WARNING) && | 702 | if ((priv->can.state < CAN_STATE_ERROR_WARNING) && |
710 | ((txerr >= 96) || (rxerr >= 96))) { | 703 | ((txerr >= 96) || (rxerr >= 96))) { |
711 | cf->can_id |= CAN_ERR_CRTL; | 704 | cf->can_id |= CAN_ERR_CRTL; |
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
715 | 708 | ||
716 | priv->can.can_stats.error_warning++; | 709 | priv->can.can_stats.error_warning++; |
717 | new_state = CAN_STATE_ERROR_WARNING; | 710 | new_state = CAN_STATE_ERROR_WARNING; |
718 | } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { | 711 | } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && |
712 | ((txerr < 96) && (rxerr < 96))) { | ||
719 | cf->can_id |= CAN_ERR_PROT; | 713 | cf->can_id |= CAN_ERR_PROT; |
720 | cf->data[2] = CAN_ERR_PROT_ACTIVE; | 714 | cf->data[2] = CAN_ERR_PROT_ACTIVE; |
721 | 715 | ||
@@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
1590 | { | 1584 | { |
1591 | struct kvaser_usb *dev; | 1585 | struct kvaser_usb *dev; |
1592 | int err = -ENOMEM; | 1586 | int err = -ENOMEM; |
1593 | int i; | 1587 | int i, retry = 3; |
1594 | 1588 | ||
1595 | dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); | 1589 | dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); |
1596 | if (!dev) | 1590 | if (!dev) |
@@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
1608 | 1602 | ||
1609 | usb_set_intfdata(intf, dev); | 1603 | usb_set_intfdata(intf, dev); |
1610 | 1604 | ||
1611 | err = kvaser_usb_get_software_info(dev); | 1605 | /* On some x86 laptops, plugging a Kvaser device again after |
1606 | * an unplug makes the firmware always ignore the very first | ||
1607 | * command. For such a case, provide some room for retries | ||
1608 | * instead of completely exiting the driver. | ||
1609 | */ | ||
1610 | do { | ||
1611 | err = kvaser_usb_get_software_info(dev); | ||
1612 | } while (--retry && err == -ETIMEDOUT); | ||
1613 | |||
1612 | if (err) { | 1614 | if (err) { |
1613 | dev_err(&intf->dev, | 1615 | dev_err(&intf->dev, |
1614 | "Cannot get software infos, error %d\n", err); | 1616 | "Cannot get software infos, error %d\n", err); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 75b08c63d39f..29a09271b64a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
@@ -767,16 +767,17 @@ | |||
767 | #define MTL_Q_RQOMR 0x40 | 767 | #define MTL_Q_RQOMR 0x40 |
768 | #define MTL_Q_RQMPOCR 0x44 | 768 | #define MTL_Q_RQMPOCR 0x44 |
769 | #define MTL_Q_RQDR 0x4c | 769 | #define MTL_Q_RQDR 0x4c |
770 | #define MTL_Q_RQFCR 0x50 | ||
770 | #define MTL_Q_IER 0x70 | 771 | #define MTL_Q_IER 0x70 |
771 | #define MTL_Q_ISR 0x74 | 772 | #define MTL_Q_ISR 0x74 |
772 | 773 | ||
773 | /* MTL queue register entry bit positions and sizes */ | 774 | /* MTL queue register entry bit positions and sizes */ |
775 | #define MTL_Q_RQFCR_RFA_INDEX 1 | ||
776 | #define MTL_Q_RQFCR_RFA_WIDTH 6 | ||
777 | #define MTL_Q_RQFCR_RFD_INDEX 17 | ||
778 | #define MTL_Q_RQFCR_RFD_WIDTH 6 | ||
774 | #define MTL_Q_RQOMR_EHFC_INDEX 7 | 779 | #define MTL_Q_RQOMR_EHFC_INDEX 7 |
775 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 | 780 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 |
776 | #define MTL_Q_RQOMR_RFA_INDEX 8 | ||
777 | #define MTL_Q_RQOMR_RFA_WIDTH 3 | ||
778 | #define MTL_Q_RQOMR_RFD_INDEX 13 | ||
779 | #define MTL_Q_RQOMR_RFD_WIDTH 3 | ||
780 | #define MTL_Q_RQOMR_RQS_INDEX 16 | 781 | #define MTL_Q_RQOMR_RQS_INDEX 16 |
781 | #define MTL_Q_RQOMR_RQS_WIDTH 9 | 782 | #define MTL_Q_RQOMR_RQS_WIDTH 9 |
782 | #define MTL_Q_RQOMR_RSF_INDEX 5 | 783 | #define MTL_Q_RQOMR_RSF_INDEX 5 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 53f5f66ec2ee..4c66cd1d1e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) | |||
2079 | 2079 | ||
2080 | for (i = 0; i < pdata->rx_q_count; i++) { | 2080 | for (i = 0; i < pdata->rx_q_count; i++) { |
2081 | /* Activate flow control when less than 4k left in fifo */ | 2081 | /* Activate flow control when less than 4k left in fifo */ |
2082 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); | 2082 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); |
2083 | 2083 | ||
2084 | /* De-activate flow control when more than 6k left in fifo */ | 2084 | /* De-activate flow control when more than 6k left in fifo */ |
2085 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); | 2085 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); |
2086 | } | 2086 | } |
2087 | } | 2087 | } |
2088 | 2088 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..e468ed3f210f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
3175 | } | 3175 | } |
3176 | #endif | 3176 | #endif |
3177 | if (!bnx2x_fp_lock_napi(fp)) | 3177 | if (!bnx2x_fp_lock_napi(fp)) |
3178 | return work_done; | 3178 | return budget; |
3179 | 3179 | ||
3180 | for_each_cos_in_tx_queue(fp, cos) | 3180 | for_each_cos_in_tx_queue(fp, cos) |
3181 | if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) | 3181 | if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b29e027c476e..e356afa44e7d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
1335 | int err; | 1335 | int err; |
1336 | 1336 | ||
1337 | if (!enic_poll_lock_napi(&enic->rq[rq])) | 1337 | if (!enic_poll_lock_napi(&enic->rq[rq])) |
1338 | return work_done; | 1338 | return budget; |
1339 | /* Service RQ | 1339 | /* Service RQ |
1340 | */ | 1340 | */ |
1341 | 1341 | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a62fc38f045e..1c75829eb166 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
192 | #define IS_TSO_HEADER(txq, addr) \ | 192 | #define IS_TSO_HEADER(txq, addr) \ |
193 | ((addr >= txq->tso_hdrs_dma) && \ | 193 | ((addr >= txq->tso_hdrs_dma) && \ |
194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) | 194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) |
195 | |||
196 | #define DESC_DMA_MAP_SINGLE 0 | ||
197 | #define DESC_DMA_MAP_PAGE 1 | ||
198 | |||
195 | /* | 199 | /* |
196 | * RX/TX descriptors. | 200 | * RX/TX descriptors. |
197 | */ | 201 | */ |
@@ -362,6 +366,7 @@ struct tx_queue { | |||
362 | dma_addr_t tso_hdrs_dma; | 366 | dma_addr_t tso_hdrs_dma; |
363 | 367 | ||
364 | struct tx_desc *tx_desc_area; | 368 | struct tx_desc *tx_desc_area; |
369 | char *tx_desc_mapping; /* array to track the type of the dma mapping */ | ||
365 | dma_addr_t tx_desc_dma; | 370 | dma_addr_t tx_desc_dma; |
366 | int tx_desc_area_size; | 371 | int tx_desc_area_size; |
367 | 372 | ||
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, | |||
750 | if (txq->tx_curr_desc == txq->tx_ring_size) | 755 | if (txq->tx_curr_desc == txq->tx_ring_size) |
751 | txq->tx_curr_desc = 0; | 756 | txq->tx_curr_desc = 0; |
752 | desc = &txq->tx_desc_area[tx_index]; | 757 | desc = &txq->tx_desc_area[tx_index]; |
758 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; | ||
753 | 759 | ||
754 | desc->l4i_chk = 0; | 760 | desc->l4i_chk = 0; |
755 | desc->byte_cnt = length; | 761 | desc->byte_cnt = length; |
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
879 | skb_frag_t *this_frag; | 885 | skb_frag_t *this_frag; |
880 | int tx_index; | 886 | int tx_index; |
881 | struct tx_desc *desc; | 887 | struct tx_desc *desc; |
882 | void *addr; | ||
883 | 888 | ||
884 | this_frag = &skb_shinfo(skb)->frags[frag]; | 889 | this_frag = &skb_shinfo(skb)->frags[frag]; |
885 | addr = page_address(this_frag->page.p) + this_frag->page_offset; | ||
886 | tx_index = txq->tx_curr_desc++; | 890 | tx_index = txq->tx_curr_desc++; |
887 | if (txq->tx_curr_desc == txq->tx_ring_size) | 891 | if (txq->tx_curr_desc == txq->tx_ring_size) |
888 | txq->tx_curr_desc = 0; | 892 | txq->tx_curr_desc = 0; |
889 | desc = &txq->tx_desc_area[tx_index]; | 893 | desc = &txq->tx_desc_area[tx_index]; |
894 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; | ||
890 | 895 | ||
891 | /* | 896 | /* |
892 | * The last fragment will generate an interrupt | 897 | * The last fragment will generate an interrupt |
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
902 | 907 | ||
903 | desc->l4i_chk = 0; | 908 | desc->l4i_chk = 0; |
904 | desc->byte_cnt = skb_frag_size(this_frag); | 909 | desc->byte_cnt = skb_frag_size(this_frag); |
905 | desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, | 910 | desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, |
906 | desc->byte_cnt, DMA_TO_DEVICE); | 911 | this_frag, 0, desc->byte_cnt, |
912 | DMA_TO_DEVICE); | ||
907 | } | 913 | } |
908 | } | 914 | } |
909 | 915 | ||
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, | |||
936 | if (txq->tx_curr_desc == txq->tx_ring_size) | 942 | if (txq->tx_curr_desc == txq->tx_ring_size) |
937 | txq->tx_curr_desc = 0; | 943 | txq->tx_curr_desc = 0; |
938 | desc = &txq->tx_desc_area[tx_index]; | 944 | desc = &txq->tx_desc_area[tx_index]; |
945 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; | ||
939 | 946 | ||
940 | if (nr_frags) { | 947 | if (nr_frags) { |
941 | txq_submit_frag_skb(txq, skb); | 948 | txq_submit_frag_skb(txq, skb); |
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
1047 | int tx_index; | 1054 | int tx_index; |
1048 | struct tx_desc *desc; | 1055 | struct tx_desc *desc; |
1049 | u32 cmd_sts; | 1056 | u32 cmd_sts; |
1057 | char desc_dma_map; | ||
1050 | 1058 | ||
1051 | tx_index = txq->tx_used_desc; | 1059 | tx_index = txq->tx_used_desc; |
1052 | desc = &txq->tx_desc_area[tx_index]; | 1060 | desc = &txq->tx_desc_area[tx_index]; |
1061 | desc_dma_map = txq->tx_desc_mapping[tx_index]; | ||
1062 | |||
1053 | cmd_sts = desc->cmd_sts; | 1063 | cmd_sts = desc->cmd_sts; |
1054 | 1064 | ||
1055 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | 1065 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
1065 | reclaimed++; | 1075 | reclaimed++; |
1066 | txq->tx_desc_count--; | 1076 | txq->tx_desc_count--; |
1067 | 1077 | ||
1068 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) | 1078 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { |
1069 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, | 1079 | |
1070 | desc->byte_cnt, DMA_TO_DEVICE); | 1080 | if (desc_dma_map == DESC_DMA_MAP_PAGE) |
1081 | dma_unmap_page(mp->dev->dev.parent, | ||
1082 | desc->buf_ptr, | ||
1083 | desc->byte_cnt, | ||
1084 | DMA_TO_DEVICE); | ||
1085 | else | ||
1086 | dma_unmap_single(mp->dev->dev.parent, | ||
1087 | desc->buf_ptr, | ||
1088 | desc->byte_cnt, | ||
1089 | DMA_TO_DEVICE); | ||
1090 | } | ||
1071 | 1091 | ||
1072 | if (cmd_sts & TX_ENABLE_INTERRUPT) { | 1092 | if (cmd_sts & TX_ENABLE_INTERRUPT) { |
1073 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); | 1093 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); |
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1996 | struct tx_queue *txq = mp->txq + index; | 2016 | struct tx_queue *txq = mp->txq + index; |
1997 | struct tx_desc *tx_desc; | 2017 | struct tx_desc *tx_desc; |
1998 | int size; | 2018 | int size; |
2019 | int ret; | ||
1999 | int i; | 2020 | int i; |
2000 | 2021 | ||
2001 | txq->index = index; | 2022 | txq->index = index; |
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
2048 | nexti * sizeof(struct tx_desc); | 2069 | nexti * sizeof(struct tx_desc); |
2049 | } | 2070 | } |
2050 | 2071 | ||
2072 | txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), | ||
2073 | GFP_KERNEL); | ||
2074 | if (!txq->tx_desc_mapping) { | ||
2075 | ret = -ENOMEM; | ||
2076 | goto err_free_desc_area; | ||
2077 | } | ||
2078 | |||
2051 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ | 2079 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ |
2052 | txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, | 2080 | txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, |
2053 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2081 | txq->tx_ring_size * TSO_HEADER_SIZE, |
2054 | &txq->tso_hdrs_dma, GFP_KERNEL); | 2082 | &txq->tso_hdrs_dma, GFP_KERNEL); |
2055 | if (txq->tso_hdrs == NULL) { | 2083 | if (txq->tso_hdrs == NULL) { |
2056 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | 2084 | ret = -ENOMEM; |
2057 | txq->tx_desc_area, txq->tx_desc_dma); | 2085 | goto err_free_desc_mapping; |
2058 | return -ENOMEM; | ||
2059 | } | 2086 | } |
2060 | skb_queue_head_init(&txq->tx_skb); | 2087 | skb_queue_head_init(&txq->tx_skb); |
2061 | 2088 | ||
2062 | return 0; | 2089 | return 0; |
2090 | |||
2091 | err_free_desc_mapping: | ||
2092 | kfree(txq->tx_desc_mapping); | ||
2093 | err_free_desc_area: | ||
2094 | if (index == 0 && size <= mp->tx_desc_sram_size) | ||
2095 | iounmap(txq->tx_desc_area); | ||
2096 | else | ||
2097 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | ||
2098 | txq->tx_desc_area, txq->tx_desc_dma); | ||
2099 | return ret; | ||
2063 | } | 2100 | } |
2064 | 2101 | ||
2065 | static void txq_deinit(struct tx_queue *txq) | 2102 | static void txq_deinit(struct tx_queue *txq) |
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq) | |||
2077 | else | 2114 | else |
2078 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | 2115 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, |
2079 | txq->tx_desc_area, txq->tx_desc_dma); | 2116 | txq->tx_desc_area, txq->tx_desc_dma); |
2117 | kfree(txq->tx_desc_mapping); | ||
2118 | |||
2080 | if (txq->tso_hdrs) | 2119 | if (txq->tso_hdrs) |
2081 | dma_free_coherent(mp->dev->dev.parent, | 2120 | dma_free_coherent(mp->dev->dev.parent, |
2082 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2121 | txq->tx_ring_size * TSO_HEADER_SIZE, |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..c531c8ae1be4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) | |||
2388 | 2388 | ||
2389 | work_done = netxen_process_rcv_ring(sds_ring, budget); | 2389 | work_done = netxen_process_rcv_ring(sds_ring, budget); |
2390 | 2390 | ||
2391 | if ((work_done < budget) && tx_complete) { | 2391 | if (!tx_complete) |
2392 | work_done = budget; | ||
2393 | |||
2394 | if (work_done < budget) { | ||
2392 | napi_complete(&sds_ring->napi); | 2395 | napi_complete(&sds_ring->napi); |
2393 | if (test_bit(__NX_DEV_UP, &adapter->state)) | 2396 | if (test_bit(__NX_DEV_UP, &adapter->state)) |
2394 | netxen_nic_enable_int(sds_ring); | 2397 | netxen_nic_enable_int(sds_ring); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6576243222af..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
396 | [TSU_ADRL31] = 0x01fc, | 396 | [TSU_ADRL31] = 0x01fc, |
397 | }; | 397 | }; |
398 | 398 | ||
399 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); | ||
400 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); | ||
401 | |||
399 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 402 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
400 | { | 403 | { |
401 | return mdp->reg_offset == sh_eth_offset_gigabit; | 404 | return mdp->reg_offset == sh_eth_offset_gigabit; |
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1120 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1123 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
1121 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1124 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
1122 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1125 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
1126 | dma_addr_t dma_addr; | ||
1123 | 1127 | ||
1124 | mdp->cur_rx = 0; | 1128 | mdp->cur_rx = 0; |
1125 | mdp->cur_tx = 0; | 1129 | mdp->cur_tx = 0; |
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1133 | /* skb */ | 1137 | /* skb */ |
1134 | mdp->rx_skbuff[i] = NULL; | 1138 | mdp->rx_skbuff[i] = NULL; |
1135 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1139 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1136 | mdp->rx_skbuff[i] = skb; | ||
1137 | if (skb == NULL) | 1140 | if (skb == NULL) |
1138 | break; | 1141 | break; |
1139 | sh_eth_set_receive_align(skb); | 1142 | sh_eth_set_receive_align(skb); |
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1142 | rxdesc = &mdp->rx_ring[i]; | 1145 | rxdesc = &mdp->rx_ring[i]; |
1143 | /* The size of the buffer is a multiple of 16 bytes. */ | 1146 | /* The size of the buffer is a multiple of 16 bytes. */ |
1144 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1147 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
1145 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | 1148 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
1146 | DMA_FROM_DEVICE); | 1149 | rxdesc->buffer_length, |
1147 | rxdesc->addr = virt_to_phys(skb->data); | 1150 | DMA_FROM_DEVICE); |
1151 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
1152 | kfree_skb(skb); | ||
1153 | break; | ||
1154 | } | ||
1155 | mdp->rx_skbuff[i] = skb; | ||
1156 | rxdesc->addr = dma_addr; | ||
1148 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1157 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1149 | 1158 | ||
1150 | /* Rx descriptor address set */ | 1159 | /* Rx descriptor address set */ |
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1316 | RFLR); | 1325 | RFLR); |
1317 | 1326 | ||
1318 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); | 1327 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); |
1319 | if (start) | 1328 | if (start) { |
1329 | mdp->irq_enabled = true; | ||
1320 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1330 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
1331 | } | ||
1321 | 1332 | ||
1322 | /* PAUSE Prohibition */ | 1333 | /* PAUSE Prohibition */ |
1323 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | | 1334 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
1356 | return ret; | 1367 | return ret; |
1357 | } | 1368 | } |
1358 | 1369 | ||
1370 | static void sh_eth_dev_exit(struct net_device *ndev) | ||
1371 | { | ||
1372 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1373 | int i; | ||
1374 | |||
1375 | /* Deactivate all TX descriptors, so DMA should stop at next | ||
1376 | * packet boundary if it's currently running | ||
1377 | */ | ||
1378 | for (i = 0; i < mdp->num_tx_ring; i++) | ||
1379 | mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); | ||
1380 | |||
1381 | /* Disable TX FIFO egress to MAC */ | ||
1382 | sh_eth_rcv_snd_disable(ndev); | ||
1383 | |||
1384 | /* Stop RX DMA at next packet boundary */ | ||
1385 | sh_eth_write(ndev, 0, EDRRR); | ||
1386 | |||
1387 | /* Aside from TX DMA, we can't tell when the hardware is | ||
1388 | * really stopped, so we need to reset to make sure. | ||
1389 | * Before doing that, wait for long enough to *probably* | ||
1390 | * finish transmitting the last packet and poll stats. | ||
1391 | */ | ||
1392 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ | ||
1393 | sh_eth_get_stats(ndev); | ||
1394 | sh_eth_reset(ndev); | ||
1395 | } | ||
1396 | |||
1359 | /* free Tx skb function */ | 1397 | /* free Tx skb function */ |
1360 | static int sh_eth_txfree(struct net_device *ndev) | 1398 | static int sh_eth_txfree(struct net_device *ndev) |
1361 | { | 1399 | { |
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1400 | u16 pkt_len = 0; | 1438 | u16 pkt_len = 0; |
1401 | u32 desc_status; | 1439 | u32 desc_status; |
1402 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1440 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
1441 | dma_addr_t dma_addr; | ||
1403 | 1442 | ||
1404 | boguscnt = min(boguscnt, *quota); | 1443 | boguscnt = min(boguscnt, *quota); |
1405 | limit = boguscnt; | 1444 | limit = boguscnt; |
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1447 | mdp->rx_skbuff[entry] = NULL; | 1486 | mdp->rx_skbuff[entry] = NULL; |
1448 | if (mdp->cd->rpadir) | 1487 | if (mdp->cd->rpadir) |
1449 | skb_reserve(skb, NET_IP_ALIGN); | 1488 | skb_reserve(skb, NET_IP_ALIGN); |
1450 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1489 | dma_unmap_single(&ndev->dev, rxdesc->addr, |
1451 | ALIGN(mdp->rx_buf_sz, 16), | 1490 | ALIGN(mdp->rx_buf_sz, 16), |
1452 | DMA_FROM_DEVICE); | 1491 | DMA_FROM_DEVICE); |
1453 | skb_put(skb, pkt_len); | 1492 | skb_put(skb, pkt_len); |
1454 | skb->protocol = eth_type_trans(skb, ndev); | 1493 | skb->protocol = eth_type_trans(skb, ndev); |
1455 | netif_receive_skb(skb); | 1494 | netif_receive_skb(skb); |
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1469 | 1508 | ||
1470 | if (mdp->rx_skbuff[entry] == NULL) { | 1509 | if (mdp->rx_skbuff[entry] == NULL) { |
1471 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1510 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1472 | mdp->rx_skbuff[entry] = skb; | ||
1473 | if (skb == NULL) | 1511 | if (skb == NULL) |
1474 | break; /* Better luck next round. */ | 1512 | break; /* Better luck next round. */ |
1475 | sh_eth_set_receive_align(skb); | 1513 | sh_eth_set_receive_align(skb); |
1476 | dma_map_single(&ndev->dev, skb->data, | 1514 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
1477 | rxdesc->buffer_length, DMA_FROM_DEVICE); | 1515 | rxdesc->buffer_length, |
1516 | DMA_FROM_DEVICE); | ||
1517 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
1518 | kfree_skb(skb); | ||
1519 | break; | ||
1520 | } | ||
1521 | mdp->rx_skbuff[entry] = skb; | ||
1478 | 1522 | ||
1479 | skb_checksum_none_assert(skb); | 1523 | skb_checksum_none_assert(skb); |
1480 | rxdesc->addr = virt_to_phys(skb->data); | 1524 | rxdesc->addr = dma_addr; |
1481 | } | 1525 | } |
1482 | if (entry >= mdp->num_rx_ring - 1) | 1526 | if (entry >= mdp->num_rx_ring - 1) |
1483 | rxdesc->status |= | 1527 | rxdesc->status |= |
@@ -1573,7 +1617,6 @@ ignore_link: | |||
1573 | if (intr_status & EESR_RFRMER) { | 1617 | if (intr_status & EESR_RFRMER) { |
1574 | /* Receive Frame Overflow int */ | 1618 | /* Receive Frame Overflow int */ |
1575 | ndev->stats.rx_frame_errors++; | 1619 | ndev->stats.rx_frame_errors++; |
1576 | netif_err(mdp, rx_err, ndev, "Receive Abort\n"); | ||
1577 | } | 1620 | } |
1578 | } | 1621 | } |
1579 | 1622 | ||
@@ -1592,13 +1635,11 @@ ignore_link: | |||
1592 | if (intr_status & EESR_RDE) { | 1635 | if (intr_status & EESR_RDE) { |
1593 | /* Receive Descriptor Empty int */ | 1636 | /* Receive Descriptor Empty int */ |
1594 | ndev->stats.rx_over_errors++; | 1637 | ndev->stats.rx_over_errors++; |
1595 | netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); | ||
1596 | } | 1638 | } |
1597 | 1639 | ||
1598 | if (intr_status & EESR_RFE) { | 1640 | if (intr_status & EESR_RFE) { |
1599 | /* Receive FIFO Overflow int */ | 1641 | /* Receive FIFO Overflow int */ |
1600 | ndev->stats.rx_fifo_errors++; | 1642 | ndev->stats.rx_fifo_errors++; |
1601 | netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); | ||
1602 | } | 1643 | } |
1603 | 1644 | ||
1604 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { | 1645 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { |
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1653 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) | 1694 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) |
1654 | ret = IRQ_HANDLED; | 1695 | ret = IRQ_HANDLED; |
1655 | else | 1696 | else |
1656 | goto other_irq; | 1697 | goto out; |
1698 | |||
1699 | if (!likely(mdp->irq_enabled)) { | ||
1700 | sh_eth_write(ndev, 0, EESIPR); | ||
1701 | goto out; | ||
1702 | } | ||
1657 | 1703 | ||
1658 | if (intr_status & EESR_RX_CHECK) { | 1704 | if (intr_status & EESR_RX_CHECK) { |
1659 | if (napi_schedule_prep(&mdp->napi)) { | 1705 | if (napi_schedule_prep(&mdp->napi)) { |
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1684 | sh_eth_error(ndev, intr_status); | 1730 | sh_eth_error(ndev, intr_status); |
1685 | } | 1731 | } |
1686 | 1732 | ||
1687 | other_irq: | 1733 | out: |
1688 | spin_unlock(&mdp->lock); | 1734 | spin_unlock(&mdp->lock); |
1689 | 1735 | ||
1690 | return ret; | 1736 | return ret; |
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) | |||
1712 | napi_complete(napi); | 1758 | napi_complete(napi); |
1713 | 1759 | ||
1714 | /* Reenable Rx interrupts */ | 1760 | /* Reenable Rx interrupts */ |
1715 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1761 | if (mdp->irq_enabled) |
1762 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
1716 | out: | 1763 | out: |
1717 | return budget - quota; | 1764 | return budget - quota; |
1718 | } | 1765 | } |
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev, | |||
1968 | return -EINVAL; | 2015 | return -EINVAL; |
1969 | 2016 | ||
1970 | if (netif_running(ndev)) { | 2017 | if (netif_running(ndev)) { |
2018 | netif_device_detach(ndev); | ||
1971 | netif_tx_disable(ndev); | 2019 | netif_tx_disable(ndev); |
1972 | /* Disable interrupts by clearing the interrupt mask. */ | 2020 | |
1973 | sh_eth_write(ndev, 0x0000, EESIPR); | 2021 | /* Serialise with the interrupt handler and NAPI, then |
1974 | /* Stop the chip's Tx and Rx processes. */ | 2022 | * disable interrupts. We have to clear the |
1975 | sh_eth_write(ndev, 0, EDTRR); | 2023 | * irq_enabled flag first to ensure that interrupts |
1976 | sh_eth_write(ndev, 0, EDRRR); | 2024 | * won't be re-enabled. |
2025 | */ | ||
2026 | mdp->irq_enabled = false; | ||
1977 | synchronize_irq(ndev->irq); | 2027 | synchronize_irq(ndev->irq); |
1978 | } | 2028 | napi_synchronize(&mdp->napi); |
2029 | sh_eth_write(ndev, 0x0000, EESIPR); | ||
1979 | 2030 | ||
1980 | /* Free all the skbuffs in the Rx queue. */ | 2031 | sh_eth_dev_exit(ndev); |
1981 | sh_eth_ring_free(ndev); | 2032 | |
1982 | /* Free DMA buffer */ | 2033 | /* Free all the skbuffs in the Rx queue. */ |
1983 | sh_eth_free_dma_buffer(mdp); | 2034 | sh_eth_ring_free(ndev); |
2035 | /* Free DMA buffer */ | ||
2036 | sh_eth_free_dma_buffer(mdp); | ||
2037 | } | ||
1984 | 2038 | ||
1985 | /* Set new parameters */ | 2039 | /* Set new parameters */ |
1986 | mdp->num_rx_ring = ring->rx_pending; | 2040 | mdp->num_rx_ring = ring->rx_pending; |
1987 | mdp->num_tx_ring = ring->tx_pending; | 2041 | mdp->num_tx_ring = ring->tx_pending; |
1988 | 2042 | ||
1989 | ret = sh_eth_ring_init(ndev); | ||
1990 | if (ret < 0) { | ||
1991 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); | ||
1992 | return ret; | ||
1993 | } | ||
1994 | ret = sh_eth_dev_init(ndev, false); | ||
1995 | if (ret < 0) { | ||
1996 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); | ||
1997 | return ret; | ||
1998 | } | ||
1999 | |||
2000 | if (netif_running(ndev)) { | 2043 | if (netif_running(ndev)) { |
2044 | ret = sh_eth_ring_init(ndev); | ||
2045 | if (ret < 0) { | ||
2046 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", | ||
2047 | __func__); | ||
2048 | return ret; | ||
2049 | } | ||
2050 | ret = sh_eth_dev_init(ndev, false); | ||
2051 | if (ret < 0) { | ||
2052 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", | ||
2053 | __func__); | ||
2054 | return ret; | ||
2055 | } | ||
2056 | |||
2057 | mdp->irq_enabled = true; | ||
2001 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 2058 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
2002 | /* Setting the Rx mode will start the Rx process. */ | 2059 | /* Setting the Rx mode will start the Rx process. */ |
2003 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 2060 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
2004 | netif_wake_queue(ndev); | 2061 | netif_device_attach(ndev); |
2005 | } | 2062 | } |
2006 | 2063 | ||
2007 | return 0; | 2064 | return 0; |
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2117 | } | 2174 | } |
2118 | spin_unlock_irqrestore(&mdp->lock, flags); | 2175 | spin_unlock_irqrestore(&mdp->lock, flags); |
2119 | 2176 | ||
2177 | if (skb_padto(skb, ETH_ZLEN)) | ||
2178 | return NETDEV_TX_OK; | ||
2179 | |||
2120 | entry = mdp->cur_tx % mdp->num_tx_ring; | 2180 | entry = mdp->cur_tx % mdp->num_tx_ring; |
2121 | mdp->tx_skbuff[entry] = skb; | 2181 | mdp->tx_skbuff[entry] = skb; |
2122 | txdesc = &mdp->tx_ring[entry]; | 2182 | txdesc = &mdp->tx_ring[entry]; |
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2126 | skb->len + 2); | 2186 | skb->len + 2); |
2127 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | 2187 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, |
2128 | DMA_TO_DEVICE); | 2188 | DMA_TO_DEVICE); |
2129 | if (skb->len < ETH_ZLEN) | 2189 | if (dma_mapping_error(&ndev->dev, txdesc->addr)) { |
2130 | txdesc->buffer_length = ETH_ZLEN; | 2190 | kfree_skb(skb); |
2131 | else | 2191 | return NETDEV_TX_OK; |
2132 | txdesc->buffer_length = skb->len; | 2192 | } |
2193 | txdesc->buffer_length = skb->len; | ||
2133 | 2194 | ||
2134 | if (entry >= mdp->num_tx_ring - 1) | 2195 | if (entry >= mdp->num_tx_ring - 1) |
2135 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 2196 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev) | |||
2181 | 2242 | ||
2182 | netif_stop_queue(ndev); | 2243 | netif_stop_queue(ndev); |
2183 | 2244 | ||
2184 | /* Disable interrupts by clearing the interrupt mask. */ | 2245 | /* Serialise with the interrupt handler and NAPI, then disable |
2246 | * interrupts. We have to clear the irq_enabled flag first to | ||
2247 | * ensure that interrupts won't be re-enabled. | ||
2248 | */ | ||
2249 | mdp->irq_enabled = false; | ||
2250 | synchronize_irq(ndev->irq); | ||
2251 | napi_disable(&mdp->napi); | ||
2185 | sh_eth_write(ndev, 0x0000, EESIPR); | 2252 | sh_eth_write(ndev, 0x0000, EESIPR); |
2186 | 2253 | ||
2187 | /* Stop the chip's Tx and Rx processes. */ | 2254 | sh_eth_dev_exit(ndev); |
2188 | sh_eth_write(ndev, 0, EDTRR); | ||
2189 | sh_eth_write(ndev, 0, EDRRR); | ||
2190 | 2255 | ||
2191 | sh_eth_get_stats(ndev); | ||
2192 | /* PHY Disconnect */ | 2256 | /* PHY Disconnect */ |
2193 | if (mdp->phydev) { | 2257 | if (mdp->phydev) { |
2194 | phy_stop(mdp->phydev); | 2258 | phy_stop(mdp->phydev); |
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev) | |||
2198 | 2262 | ||
2199 | free_irq(ndev->irq, ndev); | 2263 | free_irq(ndev->irq, ndev); |
2200 | 2264 | ||
2201 | napi_disable(&mdp->napi); | ||
2202 | |||
2203 | /* Free all the skbuffs in the Rx queue. */ | 2265 | /* Free all the skbuffs in the Rx queue. */ |
2204 | sh_eth_ring_free(ndev); | 2266 | sh_eth_ring_free(ndev); |
2205 | 2267 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 71f5de1171bd..332d3c16d483 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -513,6 +513,7 @@ struct sh_eth_private { | |||
513 | u32 rx_buf_sz; /* Based on MTU+slack. */ | 513 | u32 rx_buf_sz; /* Based on MTU+slack. */ |
514 | int edmac_endian; | 514 | int edmac_endian; |
515 | struct napi_struct napi; | 515 | struct napi_struct napi; |
516 | bool irq_enabled; | ||
516 | /* MII transceiver section. */ | 517 | /* MII transceiver section. */ |
517 | u32 phy_id; /* PHY ID */ | 518 | u32 phy_id; /* PHY ID */ |
518 | struct mii_bus *mii_bus; /* MDIO bus control */ | 519 | struct mii_bus *mii_bus; /* MDIO bus control */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c6b7c1651e5..cf62ff4c8c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
2778 | * @addr: iobase memory address | 2778 | * @addr: iobase memory address |
2779 | * Description: this is the main probe function used to | 2779 | * Description: this is the main probe function used to |
2780 | * call the alloc_etherdev, allocate the priv structure. | 2780 | * call the alloc_etherdev, allocate the priv structure. |
2781 | * Return: | ||
2782 | * on success the new private structure is returned, otherwise the error | ||
2783 | * pointer. | ||
2781 | */ | 2784 | */ |
2782 | struct stmmac_priv *stmmac_dvr_probe(struct device *device, | 2785 | struct stmmac_priv *stmmac_dvr_probe(struct device *device, |
2783 | struct plat_stmmacenet_data *plat_dat, | 2786 | struct plat_stmmacenet_data *plat_dat, |
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
2789 | 2792 | ||
2790 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); | 2793 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); |
2791 | if (!ndev) | 2794 | if (!ndev) |
2792 | return NULL; | 2795 | return ERR_PTR(-ENOMEM); |
2793 | 2796 | ||
2794 | SET_NETDEV_DEV(ndev, device); | 2797 | SET_NETDEV_DEV(ndev, device); |
2795 | 2798 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e068d48b0f21..a39131f494ec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, | |||
1683 | if (vid == priv->data.default_vlan) | 1683 | if (vid == priv->data.default_vlan) |
1684 | return 0; | 1684 | return 0; |
1685 | 1685 | ||
1686 | if (priv->data.dual_emac) { | ||
1687 | /* In dual EMAC, reserved VLAN id should not be used for | ||
1688 | * creating VLAN interfaces as this can break the dual | ||
1689 | * EMAC port separation | ||
1690 | */ | ||
1691 | int i; | ||
1692 | |||
1693 | for (i = 0; i < priv->data.slaves; i++) { | ||
1694 | if (vid == priv->slaves[i].port_vlan) | ||
1695 | return -EINVAL; | ||
1696 | } | ||
1697 | } | ||
1698 | |||
1686 | dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); | 1699 | dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); |
1687 | return cpsw_add_vlan_ale_entry(priv, vid); | 1700 | return cpsw_add_vlan_ale_entry(priv, vid); |
1688 | } | 1701 | } |
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, | |||
1696 | if (vid == priv->data.default_vlan) | 1709 | if (vid == priv->data.default_vlan) |
1697 | return 0; | 1710 | return 0; |
1698 | 1711 | ||
1712 | if (priv->data.dual_emac) { | ||
1713 | int i; | ||
1714 | |||
1715 | for (i = 0; i < priv->data.slaves; i++) { | ||
1716 | if (vid == priv->slaves[i].port_vlan) | ||
1717 | return -EINVAL; | ||
1718 | } | ||
1719 | } | ||
1720 | |||
1699 | dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); | 1721 | dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); |
1700 | ret = cpsw_ale_del_vlan(priv->ale, vid, 0); | 1722 | ret = cpsw_ale_del_vlan(priv->ale, vid, 0); |
1701 | if (ret != 0) | 1723 | if (ret != 0) |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a14d87783245..2e195289ddf4 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) | |||
377 | }; | 377 | }; |
378 | 378 | ||
379 | dst = ip6_route_output(dev_net(dev), NULL, &fl6); | 379 | dst = ip6_route_output(dev_net(dev), NULL, &fl6); |
380 | if (IS_ERR(dst)) | 380 | if (dst->error) { |
381 | ret = dst->error; | ||
382 | dst_release(dst); | ||
381 | goto err; | 383 | goto err; |
382 | 384 | } | |
383 | skb_dst_drop(skb); | 385 | skb_dst_drop(skb); |
384 | skb_dst_set(skb, dst); | 386 | skb_dst_set(skb, dst); |
385 | err = ip6_local_out(skb); | 387 | err = ip6_local_out(skb); |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9a72640237cb..62b0bf4fdf6b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
285 | 285 | ||
286 | __ath_cancel_work(sc); | 286 | __ath_cancel_work(sc); |
287 | 287 | ||
288 | disable_irq(sc->irq); | ||
288 | tasklet_disable(&sc->intr_tq); | 289 | tasklet_disable(&sc->intr_tq); |
289 | tasklet_disable(&sc->bcon_tasklet); | 290 | tasklet_disable(&sc->bcon_tasklet); |
290 | spin_lock_bh(&sc->sc_pcu_lock); | 291 | spin_lock_bh(&sc->sc_pcu_lock); |
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
331 | r = -EIO; | 332 | r = -EIO; |
332 | 333 | ||
333 | out: | 334 | out: |
335 | enable_irq(sc->irq); | ||
334 | spin_unlock_bh(&sc->sc_pcu_lock); | 336 | spin_unlock_bh(&sc->sc_pcu_lock); |
335 | tasklet_enable(&sc->bcon_tasklet); | 337 | tasklet_enable(&sc->bcon_tasklet); |
336 | tasklet_enable(&sc->intr_tq); | 338 | tasklet_enable(&sc->intr_tq); |
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
512 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) | 514 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) |
513 | return IRQ_NONE; | 515 | return IRQ_NONE; |
514 | 516 | ||
515 | if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) | ||
516 | return IRQ_NONE; | ||
517 | |||
518 | /* shared irq, not for us */ | 517 | /* shared irq, not for us */ |
519 | if (!ath9k_hw_intrpend(ah)) | 518 | if (!ath9k_hw_intrpend(ah)) |
520 | return IRQ_NONE; | 519 | return IRQ_NONE; |
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
529 | ath9k_debug_sync_cause(sc, sync_cause); | 528 | ath9k_debug_sync_cause(sc, sync_cause); |
530 | status &= ah->imask; /* discard unasked-for bits */ | 529 | status &= ah->imask; /* discard unasked-for bits */ |
531 | 530 | ||
532 | if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) | 531 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) |
533 | return IRQ_HANDLED; | 532 | return IRQ_HANDLED; |
534 | 533 | ||
535 | /* | 534 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 1bbe4fc47b97..660ddb1b7d8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h | |||
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag { | |||
246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, | 246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, |
247 | * regardless of the band or the number of the probes. FW will calculate | 247 | * regardless of the band or the number of the probes. FW will calculate |
248 | * the actual dwell time. | 248 | * the actual dwell time. |
249 | * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. | ||
249 | */ | 250 | */ |
250 | enum iwl_ucode_tlv_api { | 251 | enum iwl_ucode_tlv_api { |
251 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), | 252 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), |
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api { | |||
257 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), | 258 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), |
258 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), | 259 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), |
259 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), | 260 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), |
261 | IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), | ||
260 | }; | 262 | }; |
261 | 263 | ||
262 | /** | 264 | /** |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 201846de94e7..cfc0e65b34a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags { | |||
653 | }; | 653 | }; |
654 | 654 | ||
655 | /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S | 655 | /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S |
656 | * @flags: enum iwl_scan_channel_flgs | 656 | * @flags: enum iwl_scan_channel_flags |
657 | * @non_ebs_ratio: how many regular scan iteration before EBS | 657 | * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is |
658 | * involved. | ||
659 | * 1 - EBS is disabled. | ||
660 | * 2 - every second scan will be full scan(and so on). | ||
658 | */ | 661 | */ |
659 | struct iwl_scan_channel_opt { | 662 | struct iwl_scan_channel_opt { |
660 | __le16 flags; | 663 | __le16 flags; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e880f9d4717b..20915587c820 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | |||
3343 | msk |= mvmsta->tfd_queue_msk; | 3343 | msk |= mvmsta->tfd_queue_msk; |
3344 | } | 3344 | } |
3345 | 3345 | ||
3346 | if (drop) { | 3346 | msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); |
3347 | if (iwl_mvm_flush_tx_path(mvm, msk, true)) | ||
3348 | IWL_ERR(mvm, "flush request fail\n"); | ||
3349 | mutex_unlock(&mvm->mutex); | ||
3350 | } else { | ||
3351 | mutex_unlock(&mvm->mutex); | ||
3352 | 3347 | ||
3353 | /* this can take a while, and we may need/want other operations | 3348 | if (iwl_mvm_flush_tx_path(mvm, msk, true)) |
3354 | * to succeed while doing this, so do it without the mutex held | 3349 | IWL_ERR(mvm, "flush request fail\n"); |
3355 | */ | 3350 | mutex_unlock(&mvm->mutex); |
3356 | iwl_trans_wait_tx_queue_empty(mvm->trans, msk); | 3351 | |
3357 | } | 3352 | /* this can take a while, and we may need/want other operations |
3353 | * to succeed while doing this, so do it without the mutex held | ||
3354 | */ | ||
3355 | iwl_trans_wait_tx_queue_empty(mvm->trans, msk); | ||
3358 | } | 3356 | } |
3359 | 3357 | ||
3360 | const struct ieee80211_ops iwl_mvm_hw_ops = { | 3358 | const struct ieee80211_ops iwl_mvm_hw_ops = { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec9a8e7bae1d..844bf7c4c8de 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -72,6 +72,8 @@ | |||
72 | 72 | ||
73 | #define IWL_PLCP_QUIET_THRESH 1 | 73 | #define IWL_PLCP_QUIET_THRESH 1 |
74 | #define IWL_ACTIVE_QUIET_TIME 10 | 74 | #define IWL_ACTIVE_QUIET_TIME 10 |
75 | #define IWL_DENSE_EBS_SCAN_RATIO 5 | ||
76 | #define IWL_SPARSE_EBS_SCAN_RATIO 1 | ||
75 | 77 | ||
76 | struct iwl_mvm_scan_params { | 78 | struct iwl_mvm_scan_params { |
77 | u32 max_out_time; | 79 | u32 max_out_time; |
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1105 | return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, | 1107 | return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, |
1106 | notify); | 1108 | notify); |
1107 | 1109 | ||
1110 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) | ||
1111 | return 0; | ||
1112 | |||
1113 | if (iwl_mvm_is_radio_killed(mvm)) | ||
1114 | goto out; | ||
1115 | |||
1108 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | 1116 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && |
1109 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || | 1117 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || |
1110 | mvm->scan_status != IWL_MVM_SCAN_OS)) { | 1118 | mvm->scan_status != IWL_MVM_SCAN_OS)) { |
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
1141 | if (mvm->scan_status == IWL_MVM_SCAN_OS) | 1149 | if (mvm->scan_status == IWL_MVM_SCAN_OS) |
1142 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); | 1150 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); |
1143 | 1151 | ||
1152 | out: | ||
1144 | mvm->scan_status = IWL_MVM_SCAN_NONE; | 1153 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
1145 | 1154 | ||
1146 | if (notify) { | 1155 | if (notify) { |
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, | |||
1297 | cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); | 1306 | cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); |
1298 | cmd->iter_num = cpu_to_le32(1); | 1307 | cmd->iter_num = cpu_to_le32(1); |
1299 | 1308 | ||
1300 | if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && | ||
1301 | mvm->last_ebs_successful) { | ||
1302 | cmd->channel_opt[0].flags = | ||
1303 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1304 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1305 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1306 | cmd->channel_opt[1].flags = | ||
1307 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1308 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1309 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1310 | } | ||
1311 | |||
1312 | if (iwl_mvm_rrm_scan_needed(mvm)) | 1309 | if (iwl_mvm_rrm_scan_needed(mvm)) |
1313 | cmd->scan_flags |= | 1310 | cmd->scan_flags |= |
1314 | cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); | 1311 | cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); |
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, | |||
1383 | cmd->schedule[1].iterations = 0; | 1380 | cmd->schedule[1].iterations = 0; |
1384 | cmd->schedule[1].full_scan_mul = 0; | 1381 | cmd->schedule[1].full_scan_mul = 0; |
1385 | 1382 | ||
1383 | if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS && | ||
1384 | mvm->last_ebs_successful) { | ||
1385 | cmd->channel_opt[0].flags = | ||
1386 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1387 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1388 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1389 | cmd->channel_opt[0].non_ebs_ratio = | ||
1390 | cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); | ||
1391 | cmd->channel_opt[1].flags = | ||
1392 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1393 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1394 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1395 | cmd->channel_opt[1].non_ebs_ratio = | ||
1396 | cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); | ||
1397 | } | ||
1398 | |||
1386 | for (i = 1; i <= req->req.n_ssids; i++) | 1399 | for (i = 1; i <= req->req.n_ssids; i++) |
1387 | ssid_bitmap |= BIT(i); | 1400 | ssid_bitmap |= BIT(i); |
1388 | 1401 | ||
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, | |||
1483 | cmd->schedule[1].iterations = 0xff; | 1496 | cmd->schedule[1].iterations = 0xff; |
1484 | cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; | 1497 | cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; |
1485 | 1498 | ||
1499 | if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && | ||
1500 | mvm->last_ebs_successful) { | ||
1501 | cmd->channel_opt[0].flags = | ||
1502 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1503 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1504 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1505 | cmd->channel_opt[0].non_ebs_ratio = | ||
1506 | cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); | ||
1507 | cmd->channel_opt[1].flags = | ||
1508 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
1509 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
1510 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
1511 | cmd->channel_opt[1].non_ebs_ratio = | ||
1512 | cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); | ||
1513 | } | ||
1514 | |||
1486 | iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, | 1515 | iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, |
1487 | ssid_bitmap, cmd); | 1516 | ssid_bitmap, cmd); |
1488 | 1517 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4333306ccdee..c59d07567d90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
90 | 90 | ||
91 | if (ieee80211_is_probe_resp(fc)) | 91 | if (ieee80211_is_probe_resp(fc)) |
92 | tx_flags |= TX_CMD_FLG_TSF; | 92 | tx_flags |= TX_CMD_FLG_TSF; |
93 | else if (ieee80211_is_back_req(fc)) | ||
94 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | ||
95 | 93 | ||
96 | if (ieee80211_has_morefrags(fc)) | 94 | if (ieee80211_has_morefrags(fc)) |
97 | tx_flags |= TX_CMD_FLG_MORE_FRAG; | 95 | tx_flags |= TX_CMD_FLG_MORE_FRAG; |
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
100 | u8 *qc = ieee80211_get_qos_ctl(hdr); | 98 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
101 | tx_cmd->tid_tspec = qc[0] & 0xf; | 99 | tx_cmd->tid_tspec = qc[0] & 0xf; |
102 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; | 100 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
101 | } else if (ieee80211_is_back_req(fc)) { | ||
102 | struct ieee80211_bar *bar = (void *)skb->data; | ||
103 | u16 control = le16_to_cpu(bar->control); | ||
104 | |||
105 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | ||
106 | tx_cmd->tid_tspec = (control & | ||
107 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> | ||
108 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; | ||
109 | WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); | ||
103 | } else { | 110 | } else { |
104 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; | 111 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
105 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) | 112 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) |
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index dfd021e8268f..f4cd0b9b2438 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -177,7 +177,7 @@ struct at91_pinctrl { | |||
177 | struct device *dev; | 177 | struct device *dev; |
178 | struct pinctrl_dev *pctl; | 178 | struct pinctrl_dev *pctl; |
179 | 179 | ||
180 | int nbanks; | 180 | int nactive_banks; |
181 | 181 | ||
182 | uint32_t *mux_mask; | 182 | uint32_t *mux_mask; |
183 | int nmux; | 183 | int nmux; |
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name, | |||
653 | int mux; | 653 | int mux; |
654 | 654 | ||
655 | /* check if it's a valid config */ | 655 | /* check if it's a valid config */ |
656 | if (pin->bank >= info->nbanks) { | 656 | if (pin->bank >= gpio_banks) { |
657 | dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", | 657 | dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", |
658 | name, index, pin->bank, info->nbanks); | 658 | name, index, pin->bank, gpio_banks); |
659 | return -EINVAL; | 659 | return -EINVAL; |
660 | } | 660 | } |
661 | 661 | ||
662 | if (!gpio_chips[pin->bank]) { | ||
663 | dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", | ||
664 | name, index, pin->bank); | ||
665 | return -ENXIO; | ||
666 | } | ||
667 | |||
662 | if (pin->pin >= MAX_NB_GPIO_PER_BANK) { | 668 | if (pin->pin >= MAX_NB_GPIO_PER_BANK) { |
663 | dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", | 669 | dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", |
664 | name, index, pin->pin, MAX_NB_GPIO_PER_BANK); | 670 | name, index, pin->pin, MAX_NB_GPIO_PER_BANK); |
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info, | |||
981 | 987 | ||
982 | for_each_child_of_node(np, child) { | 988 | for_each_child_of_node(np, child) { |
983 | if (of_device_is_compatible(child, gpio_compat)) { | 989 | if (of_device_is_compatible(child, gpio_compat)) { |
984 | info->nbanks++; | 990 | if (of_device_is_available(child)) |
991 | info->nactive_banks++; | ||
985 | } else { | 992 | } else { |
986 | info->nfunctions++; | 993 | info->nfunctions++; |
987 | info->ngroups += of_get_child_count(child); | 994 | info->ngroups += of_get_child_count(child); |
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info, | |||
1003 | } | 1010 | } |
1004 | 1011 | ||
1005 | size /= sizeof(*list); | 1012 | size /= sizeof(*list); |
1006 | if (!size || size % info->nbanks) { | 1013 | if (!size || size % gpio_banks) { |
1007 | dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); | 1014 | dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks); |
1008 | return -EINVAL; | 1015 | return -EINVAL; |
1009 | } | 1016 | } |
1010 | info->nmux = size / info->nbanks; | 1017 | info->nmux = size / gpio_banks; |
1011 | 1018 | ||
1012 | info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); | 1019 | info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); |
1013 | if (!info->mux_mask) { | 1020 | if (!info->mux_mask) { |
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
1131 | of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; | 1138 | of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; |
1132 | at91_pinctrl_child_count(info, np); | 1139 | at91_pinctrl_child_count(info, np); |
1133 | 1140 | ||
1134 | if (info->nbanks < 1) { | 1141 | if (gpio_banks < 1) { |
1135 | dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); | 1142 | dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); |
1136 | return -EINVAL; | 1143 | return -EINVAL; |
1137 | } | 1144 | } |
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
1144 | 1151 | ||
1145 | dev_dbg(&pdev->dev, "mux-mask\n"); | 1152 | dev_dbg(&pdev->dev, "mux-mask\n"); |
1146 | tmp = info->mux_mask; | 1153 | tmp = info->mux_mask; |
1147 | for (i = 0; i < info->nbanks; i++) { | 1154 | for (i = 0; i < gpio_banks; i++) { |
1148 | for (j = 0; j < info->nmux; j++, tmp++) { | 1155 | for (j = 0; j < info->nmux; j++, tmp++) { |
1149 | dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); | 1156 | dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); |
1150 | } | 1157 | } |
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
1162 | if (!info->groups) | 1169 | if (!info->groups) |
1163 | return -ENOMEM; | 1170 | return -ENOMEM; |
1164 | 1171 | ||
1165 | dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); | 1172 | dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks); |
1166 | dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); | 1173 | dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); |
1167 | dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); | 1174 | dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); |
1168 | 1175 | ||
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
1185 | { | 1192 | { |
1186 | struct at91_pinctrl *info; | 1193 | struct at91_pinctrl *info; |
1187 | struct pinctrl_pin_desc *pdesc; | 1194 | struct pinctrl_pin_desc *pdesc; |
1188 | int ret, i, j, k; | 1195 | int ret, i, j, k, ngpio_chips_enabled = 0; |
1189 | 1196 | ||
1190 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); | 1197 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); |
1191 | if (!info) | 1198 | if (!info) |
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
1200 | * to obtain references to the struct gpio_chip * for them, and we | 1207 | * to obtain references to the struct gpio_chip * for them, and we |
1201 | * need this to proceed. | 1208 | * need this to proceed. |
1202 | */ | 1209 | */ |
1203 | for (i = 0; i < info->nbanks; i++) { | 1210 | for (i = 0; i < gpio_banks; i++) |
1204 | if (!gpio_chips[i]) { | 1211 | if (gpio_chips[i]) |
1205 | dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); | 1212 | ngpio_chips_enabled++; |
1206 | devm_kfree(&pdev->dev, info); | 1213 | |
1207 | return -EPROBE_DEFER; | 1214 | if (ngpio_chips_enabled < info->nactive_banks) { |
1208 | } | 1215 | dev_warn(&pdev->dev, |
1216 | "All GPIO chips are not registered yet (%d/%d)\n", | ||
1217 | ngpio_chips_enabled, info->nactive_banks); | ||
1218 | devm_kfree(&pdev->dev, info); | ||
1219 | return -EPROBE_DEFER; | ||
1209 | } | 1220 | } |
1210 | 1221 | ||
1211 | at91_pinctrl_desc.name = dev_name(&pdev->dev); | 1222 | at91_pinctrl_desc.name = dev_name(&pdev->dev); |
1212 | at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; | 1223 | at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK; |
1213 | at91_pinctrl_desc.pins = pdesc = | 1224 | at91_pinctrl_desc.pins = pdesc = |
1214 | devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); | 1225 | devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); |
1215 | 1226 | ||
1216 | if (!at91_pinctrl_desc.pins) | 1227 | if (!at91_pinctrl_desc.pins) |
1217 | return -ENOMEM; | 1228 | return -ENOMEM; |
1218 | 1229 | ||
1219 | for (i = 0 , k = 0; i < info->nbanks; i++) { | 1230 | for (i = 0, k = 0; i < gpio_banks; i++) { |
1220 | for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { | 1231 | for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { |
1221 | pdesc->number = k; | 1232 | pdesc->number = k; |
1222 | pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); | 1233 | pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); |
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
1234 | } | 1245 | } |
1235 | 1246 | ||
1236 | /* We will handle a range of GPIO pins */ | 1247 | /* We will handle a range of GPIO pins */ |
1237 | for (i = 0; i < info->nbanks; i++) | 1248 | for (i = 0; i < gpio_banks; i++) |
1238 | pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); | 1249 | if (gpio_chips[i]) |
1250 | pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); | ||
1239 | 1251 | ||
1240 | dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); | 1252 | dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); |
1241 | 1253 | ||
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
1613 | static int at91_gpio_of_irq_setup(struct platform_device *pdev, | 1625 | static int at91_gpio_of_irq_setup(struct platform_device *pdev, |
1614 | struct at91_gpio_chip *at91_gpio) | 1626 | struct at91_gpio_chip *at91_gpio) |
1615 | { | 1627 | { |
1628 | struct gpio_chip *gpiochip_prev = NULL; | ||
1616 | struct at91_gpio_chip *prev = NULL; | 1629 | struct at91_gpio_chip *prev = NULL; |
1617 | struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); | 1630 | struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); |
1618 | int ret; | 1631 | int ret, i; |
1619 | 1632 | ||
1620 | at91_gpio->pioc_hwirq = irqd_to_hwirq(d); | 1633 | at91_gpio->pioc_hwirq = irqd_to_hwirq(d); |
1621 | 1634 | ||
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, | |||
1641 | return ret; | 1654 | return ret; |
1642 | } | 1655 | } |
1643 | 1656 | ||
1644 | /* Setup chained handler */ | ||
1645 | if (at91_gpio->pioc_idx) | ||
1646 | prev = gpio_chips[at91_gpio->pioc_idx - 1]; | ||
1647 | |||
1648 | /* The top level handler handles one bank of GPIOs, except | 1657 | /* The top level handler handles one bank of GPIOs, except |
1649 | * on some SoC it can handle up to three... | 1658 | * on some SoC it can handle up to three... |
1650 | * We only set up the handler for the first of the list. | 1659 | * We only set up the handler for the first of the list. |
1651 | */ | 1660 | */ |
1652 | if (prev && prev->next == at91_gpio) | 1661 | gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); |
1662 | if (!gpiochip_prev) { | ||
1663 | /* Then register the chain on the parent IRQ */ | ||
1664 | gpiochip_set_chained_irqchip(&at91_gpio->chip, | ||
1665 | &gpio_irqchip, | ||
1666 | at91_gpio->pioc_virq, | ||
1667 | gpio_irq_handler); | ||
1653 | return 0; | 1668 | return 0; |
1669 | } | ||
1654 | 1670 | ||
1655 | /* Then register the chain on the parent IRQ */ | 1671 | prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); |
1656 | gpiochip_set_chained_irqchip(&at91_gpio->chip, | ||
1657 | &gpio_irqchip, | ||
1658 | at91_gpio->pioc_virq, | ||
1659 | gpio_irq_handler); | ||
1660 | 1672 | ||
1661 | return 0; | 1673 | /* we can only have 2 banks before */ |
1674 | for (i = 0; i < 2; i++) { | ||
1675 | if (prev->next) { | ||
1676 | prev = prev->next; | ||
1677 | } else { | ||
1678 | prev->next = at91_gpio; | ||
1679 | return 0; | ||
1680 | } | ||
1681 | } | ||
1682 | |||
1683 | return -EINVAL; | ||
1662 | } | 1684 | } |
1663 | 1685 | ||
1664 | /* This structure is replicated for each GPIO block allocated at probe time */ | 1686 | /* This structure is replicated for each GPIO block allocated at probe time */ |
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = { | |||
1675 | .ngpio = MAX_NB_GPIO_PER_BANK, | 1697 | .ngpio = MAX_NB_GPIO_PER_BANK, |
1676 | }; | 1698 | }; |
1677 | 1699 | ||
1678 | static void at91_gpio_probe_fixup(void) | ||
1679 | { | ||
1680 | unsigned i; | ||
1681 | struct at91_gpio_chip *at91_gpio, *last = NULL; | ||
1682 | |||
1683 | for (i = 0; i < gpio_banks; i++) { | ||
1684 | at91_gpio = gpio_chips[i]; | ||
1685 | |||
1686 | /* | ||
1687 | * GPIO controller are grouped on some SoC: | ||
1688 | * PIOC, PIOD and PIOE can share the same IRQ line | ||
1689 | */ | ||
1690 | if (last && last->pioc_virq == at91_gpio->pioc_virq) | ||
1691 | last->next = at91_gpio; | ||
1692 | last = at91_gpio; | ||
1693 | } | ||
1694 | } | ||
1695 | |||
1696 | static struct of_device_id at91_gpio_of_match[] = { | 1700 | static struct of_device_id at91_gpio_of_match[] = { |
1697 | { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, | 1701 | { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, |
1698 | { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, | 1702 | { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, |
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev) | |||
1805 | gpio_chips[alias_idx] = at91_chip; | 1809 | gpio_chips[alias_idx] = at91_chip; |
1806 | gpio_banks = max(gpio_banks, alias_idx + 1); | 1810 | gpio_banks = max(gpio_banks, alias_idx + 1); |
1807 | 1811 | ||
1808 | at91_gpio_probe_fixup(); | ||
1809 | |||
1810 | ret = at91_gpio_of_irq_setup(pdev, at91_chip); | 1812 | ret = at91_gpio_of_irq_setup(pdev, at91_chip); |
1811 | if (ret) | 1813 | if (ret) |
1812 | goto irq_setup_err; | 1814 | goto irq_setup_err; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e225711bb8bc..9c48fb32f660 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id) | |||
1488 | } | 1488 | } |
1489 | EXPORT_SYMBOL_GPL(regulator_get_optional); | 1489 | EXPORT_SYMBOL_GPL(regulator_get_optional); |
1490 | 1490 | ||
1491 | /* Locks held by regulator_put() */ | 1491 | /* regulator_list_mutex lock held by regulator_put() */ |
1492 | static void _regulator_put(struct regulator *regulator) | 1492 | static void _regulator_put(struct regulator *regulator) |
1493 | { | 1493 | { |
1494 | struct regulator_dev *rdev; | 1494 | struct regulator_dev *rdev; |
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator) | |||
1503 | /* remove any sysfs entries */ | 1503 | /* remove any sysfs entries */ |
1504 | if (regulator->dev) | 1504 | if (regulator->dev) |
1505 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); | 1505 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); |
1506 | mutex_lock(&rdev->mutex); | ||
1506 | kfree(regulator->supply_name); | 1507 | kfree(regulator->supply_name); |
1507 | list_del(®ulator->list); | 1508 | list_del(®ulator->list); |
1508 | kfree(regulator); | 1509 | kfree(regulator); |
1509 | 1510 | ||
1510 | rdev->open_count--; | 1511 | rdev->open_count--; |
1511 | rdev->exclusive = 0; | 1512 | rdev->exclusive = 0; |
1513 | mutex_unlock(&rdev->mutex); | ||
1512 | 1514 | ||
1513 | module_put(rdev->owner); | 1515 | module_put(rdev->owner); |
1514 | } | 1516 | } |
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 2809ae0d6bcd..ff828117798f 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops; | |||
405 | .enable_mask = S2MPS14_ENABLE_MASK \ | 405 | .enable_mask = S2MPS14_ENABLE_MASK \ |
406 | } | 406 | } |
407 | 407 | ||
408 | #define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \ | ||
409 | .name = "BUCK"#num, \ | ||
410 | .id = S2MPS13_BUCK##num, \ | ||
411 | .ops = &s2mps14_reg_ops, \ | ||
412 | .type = REGULATOR_VOLTAGE, \ | ||
413 | .owner = THIS_MODULE, \ | ||
414 | .min_uV = min, \ | ||
415 | .uV_step = step, \ | ||
416 | .linear_min_sel = min_sel, \ | ||
417 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ | ||
418 | .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ | ||
419 | .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ | ||
420 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ | ||
421 | .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \ | ||
422 | .enable_mask = S2MPS14_ENABLE_MASK \ | ||
423 | } | ||
424 | |||
425 | #define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \ | ||
426 | .name = "BUCK"#num, \ | ||
427 | .id = S2MPS13_BUCK##num, \ | ||
428 | .ops = &s2mps14_reg_ops, \ | ||
429 | .type = REGULATOR_VOLTAGE, \ | ||
430 | .owner = THIS_MODULE, \ | ||
431 | .min_uV = min, \ | ||
432 | .uV_step = step, \ | ||
433 | .linear_min_sel = min_sel, \ | ||
434 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ | ||
435 | .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ | ||
436 | .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ | ||
437 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ | ||
438 | .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \ | ||
439 | .enable_mask = S2MPS14_ENABLE_MASK \ | ||
440 | } | ||
441 | |||
408 | static const struct regulator_desc s2mps13_regulators[] = { | 442 | static const struct regulator_desc s2mps13_regulators[] = { |
409 | regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), | 443 | regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), |
410 | regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), | 444 | regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), |
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = { | |||
452 | regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), | 486 | regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), |
453 | regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), | 487 | regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), |
454 | regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), | 488 | regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), |
455 | regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10), | 489 | regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10), |
456 | regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20), | 490 | regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20), |
457 | regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20), | 491 | regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20), |
458 | regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10), | 492 | regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10), |
459 | }; | 493 | }; |
460 | 494 | ||
461 | static int s2mps14_regulator_enable(struct regulator_dev *rdev) | 495 | static int s2mps14_regulator_enable(struct regulator_dev *rdev) |
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b5e7c4670205..89ac1d5083c6 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c | |||
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); | |||
832 | static const struct platform_device_id s5m_rtc_id[] = { | 832 | static const struct platform_device_id s5m_rtc_id[] = { |
833 | { "s5m-rtc", S5M8767X }, | 833 | { "s5m-rtc", S5M8767X }, |
834 | { "s2mps14-rtc", S2MPS14X }, | 834 | { "s2mps14-rtc", S2MPS14X }, |
835 | { }, | ||
835 | }; | 836 | }; |
836 | 837 | ||
837 | static struct platform_driver s5m_rtc_driver = { | 838 | static struct platform_driver s5m_rtc_driver = { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f407e3763432..642c77c76b84 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1784 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); | 1784 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); |
1785 | card = CARD_FROM_CDEV(channel->ccwdev); | 1785 | card = CARD_FROM_CDEV(channel->ccwdev); |
1786 | iob = qeth_get_buffer(channel); | 1786 | iob = qeth_get_buffer(channel); |
1787 | if (!iob) | ||
1788 | return -ENOMEM; | ||
1787 | iob->callback = idx_reply_cb; | 1789 | iob->callback = idx_reply_cb; |
1788 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); | 1790 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); |
1789 | channel->ccw.count = QETH_BUFSIZE; | 1791 | channel->ccw.count = QETH_BUFSIZE; |
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1834 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); | 1836 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); |
1835 | 1837 | ||
1836 | iob = qeth_get_buffer(channel); | 1838 | iob = qeth_get_buffer(channel); |
1839 | if (!iob) | ||
1840 | return -ENOMEM; | ||
1837 | iob->callback = idx_reply_cb; | 1841 | iob->callback = idx_reply_cb; |
1838 | memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); | 1842 | memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); |
1839 | channel->ccw.count = IDX_ACTIVATE_SIZE; | 1843 | channel->ccw.count = IDX_ACTIVATE_SIZE; |
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, | |||
2021 | } | 2025 | } |
2022 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); | 2026 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); |
2023 | 2027 | ||
2028 | /** | ||
2029 | * qeth_send_control_data() - send control command to the card | ||
2030 | * @card: qeth_card structure pointer | ||
2031 | * @len: size of the command buffer | ||
2032 | * @iob: qeth_cmd_buffer pointer | ||
2033 | * @reply_cb: callback function pointer | ||
2034 | * @cb_card: pointer to the qeth_card structure | ||
2035 | * @cb_reply: pointer to the qeth_reply structure | ||
2036 | * @cb_cmd: pointer to the original iob for non-IPA | ||
2037 | * commands, or to the qeth_ipa_cmd structure | ||
2038 | * for the IPA commands. | ||
2039 | * @reply_param: private pointer passed to the callback | ||
2040 | * | ||
2041 | * Returns the value of the `return_code' field of the response | ||
2042 | * block returned from the hardware, or other error indication. | ||
2043 | * Value of zero indicates successful execution of the command. | ||
2044 | * | ||
2045 | * Callback function gets called one or more times, with cb_cmd | ||
2046 | * pointing to the response returned by the hardware. Callback | ||
2047 | * function must return non-zero if more reply blocks are expected, | ||
2048 | * and zero if the last or only reply block is received. Callback | ||
2049 | * function can get the value of the reply_param pointer from the | ||
2050 | * field 'param' of the structure qeth_reply. | ||
2051 | */ | ||
2052 | |||
2024 | int qeth_send_control_data(struct qeth_card *card, int len, | 2053 | int qeth_send_control_data(struct qeth_card *card, int len, |
2025 | struct qeth_cmd_buffer *iob, | 2054 | struct qeth_cmd_buffer *iob, |
2026 | int (*reply_cb)(struct qeth_card *, struct qeth_reply *, | 2055 | int (*reply_cb)(struct qeth_card *cb_card, |
2027 | unsigned long), | 2056 | struct qeth_reply *cb_reply, |
2057 | unsigned long cb_cmd), | ||
2028 | void *reply_param) | 2058 | void *reply_param) |
2029 | { | 2059 | { |
2030 | int rc; | 2060 | int rc; |
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, | |||
2914 | struct qeth_cmd_buffer *iob; | 2944 | struct qeth_cmd_buffer *iob; |
2915 | struct qeth_ipa_cmd *cmd; | 2945 | struct qeth_ipa_cmd *cmd; |
2916 | 2946 | ||
2917 | iob = qeth_wait_for_buffer(&card->write); | 2947 | iob = qeth_get_buffer(&card->write); |
2918 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 2948 | if (iob) { |
2919 | qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); | 2949 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
2950 | qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); | ||
2951 | } else { | ||
2952 | dev_warn(&card->gdev->dev, | ||
2953 | "The qeth driver ran out of channel command buffers\n"); | ||
2954 | QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", | ||
2955 | dev_name(&card->gdev->dev)); | ||
2956 | } | ||
2920 | 2957 | ||
2921 | return iob; | 2958 | return iob; |
2922 | } | 2959 | } |
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
2932 | } | 2969 | } |
2933 | EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); | 2970 | EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); |
2934 | 2971 | ||
2972 | /** | ||
2973 | * qeth_send_ipa_cmd() - send an IPA command | ||
2974 | * | ||
2975 | * See qeth_send_control_data() for explanation of the arguments. | ||
2976 | */ | ||
2977 | |||
2935 | int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | 2978 | int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, |
2936 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, | 2979 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, |
2937 | unsigned long), | 2980 | unsigned long), |
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card) | |||
2968 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); | 3011 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); |
2969 | 3012 | ||
2970 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); | 3013 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); |
3014 | if (!iob) | ||
3015 | return -ENOMEM; | ||
2971 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); | 3016 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); |
2972 | return rc; | 3017 | return rc; |
2973 | } | 3018 | } |
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, | |||
3013 | 3058 | ||
3014 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, | 3059 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, |
3015 | QETH_PROT_IPV4); | 3060 | QETH_PROT_IPV4); |
3016 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3061 | if (iob) { |
3017 | cmd->data.setadapterparms.hdr.cmdlength = cmdlen; | 3062 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
3018 | cmd->data.setadapterparms.hdr.command_code = command; | 3063 | cmd->data.setadapterparms.hdr.cmdlength = cmdlen; |
3019 | cmd->data.setadapterparms.hdr.used_total = 1; | 3064 | cmd->data.setadapterparms.hdr.command_code = command; |
3020 | cmd->data.setadapterparms.hdr.seq_no = 1; | 3065 | cmd->data.setadapterparms.hdr.used_total = 1; |
3066 | cmd->data.setadapterparms.hdr.seq_no = 1; | ||
3067 | } | ||
3021 | 3068 | ||
3022 | return iob; | 3069 | return iob; |
3023 | } | 3070 | } |
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card) | |||
3030 | QETH_CARD_TEXT(card, 3, "queryadp"); | 3077 | QETH_CARD_TEXT(card, 3, "queryadp"); |
3031 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, | 3078 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, |
3032 | sizeof(struct qeth_ipacmd_setadpparms)); | 3079 | sizeof(struct qeth_ipacmd_setadpparms)); |
3080 | if (!iob) | ||
3081 | return -ENOMEM; | ||
3033 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); | 3082 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); |
3034 | return rc; | 3083 | return rc; |
3035 | } | 3084 | } |
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) | |||
3080 | 3129 | ||
3081 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); | 3130 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); |
3082 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); | 3131 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); |
3132 | if (!iob) | ||
3133 | return -ENOMEM; | ||
3083 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); | 3134 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); |
3084 | return rc; | 3135 | return rc; |
3085 | } | 3136 | } |
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card, | |||
3119 | return -ENOMEDIUM; | 3170 | return -ENOMEDIUM; |
3120 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, | 3171 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, |
3121 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); | 3172 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); |
3173 | if (!iob) | ||
3174 | return -ENOMEM; | ||
3122 | return qeth_send_ipa_cmd(card, iob, | 3175 | return qeth_send_ipa_cmd(card, iob, |
3123 | qeth_query_switch_attributes_cb, sw_info); | 3176 | qeth_query_switch_attributes_cb, sw_info); |
3124 | } | 3177 | } |
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card) | |||
3146 | 3199 | ||
3147 | QETH_DBF_TEXT(SETUP, 2, "qdiagass"); | 3200 | QETH_DBF_TEXT(SETUP, 2, "qdiagass"); |
3148 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 3201 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
3202 | if (!iob) | ||
3203 | return -ENOMEM; | ||
3149 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3204 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
3150 | cmd->data.diagass.subcmd_len = 16; | 3205 | cmd->data.diagass.subcmd_len = 16; |
3151 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; | 3206 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; |
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) | |||
3197 | 3252 | ||
3198 | QETH_DBF_TEXT(SETUP, 2, "diagtrap"); | 3253 | QETH_DBF_TEXT(SETUP, 2, "diagtrap"); |
3199 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 3254 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
3255 | if (!iob) | ||
3256 | return -ENOMEM; | ||
3200 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3257 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
3201 | cmd->data.diagass.subcmd_len = 80; | 3258 | cmd->data.diagass.subcmd_len = 80; |
3202 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; | 3259 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; |
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
4162 | 4219 | ||
4163 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, | 4220 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, |
4164 | sizeof(struct qeth_ipacmd_setadpparms)); | 4221 | sizeof(struct qeth_ipacmd_setadpparms)); |
4222 | if (!iob) | ||
4223 | return; | ||
4165 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); | 4224 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); |
4166 | cmd->data.setadapterparms.data.mode = mode; | 4225 | cmd->data.setadapterparms.data.mode = mode; |
4167 | qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); | 4226 | qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); |
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) | |||
4232 | 4291 | ||
4233 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, | 4292 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, |
4234 | sizeof(struct qeth_ipacmd_setadpparms)); | 4293 | sizeof(struct qeth_ipacmd_setadpparms)); |
4294 | if (!iob) | ||
4295 | return -ENOMEM; | ||
4235 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4296 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4236 | cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; | 4297 | cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; |
4237 | cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; | 4298 | cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; |
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, | |||
4345 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, | 4406 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, |
4346 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + | 4407 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
4347 | sizeof(struct qeth_set_access_ctrl)); | 4408 | sizeof(struct qeth_set_access_ctrl)); |
4409 | if (!iob) | ||
4410 | return -ENOMEM; | ||
4348 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4411 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4349 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; | 4412 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; |
4350 | access_ctrl_req->subcmd_code = isolation; | 4413 | access_ctrl_req->subcmd_code = isolation; |
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
4588 | 4651 | ||
4589 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, | 4652 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, |
4590 | QETH_SNMP_SETADP_CMDLENGTH + req_len); | 4653 | QETH_SNMP_SETADP_CMDLENGTH + req_len); |
4654 | if (!iob) { | ||
4655 | rc = -ENOMEM; | ||
4656 | goto out; | ||
4657 | } | ||
4591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4658 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4592 | memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); | 4659 | memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); |
4593 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, | 4660 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, |
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
4599 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) | 4666 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
4600 | rc = -EFAULT; | 4667 | rc = -EFAULT; |
4601 | } | 4668 | } |
4602 | 4669 | out: | |
4603 | kfree(ureq); | 4670 | kfree(ureq); |
4604 | kfree(qinfo.udata); | 4671 | kfree(qinfo.udata); |
4605 | return rc; | 4672 | return rc; |
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata) | |||
4670 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, | 4737 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, |
4671 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + | 4738 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
4672 | sizeof(struct qeth_query_oat)); | 4739 | sizeof(struct qeth_query_oat)); |
4740 | if (!iob) { | ||
4741 | rc = -ENOMEM; | ||
4742 | goto out_free; | ||
4743 | } | ||
4673 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4744 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
4674 | oat_req = &cmd->data.setadapterparms.data.query_oat; | 4745 | oat_req = &cmd->data.setadapterparms.data.query_oat; |
4675 | oat_req->subcmd_code = oat_data.command; | 4746 | oat_req->subcmd_code = oat_data.command; |
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card, | |||
4735 | return -EOPNOTSUPP; | 4806 | return -EOPNOTSUPP; |
4736 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, | 4807 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, |
4737 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); | 4808 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); |
4809 | if (!iob) | ||
4810 | return -ENOMEM; | ||
4738 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, | 4811 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, |
4739 | (void *)carrier_info); | 4812 | (void *)carrier_info); |
4740 | } | 4813 | } |
@@ -5060,11 +5133,23 @@ retriable: | |||
5060 | card->options.adp.supported_funcs = 0; | 5133 | card->options.adp.supported_funcs = 0; |
5061 | card->options.sbp.supported_funcs = 0; | 5134 | card->options.sbp.supported_funcs = 0; |
5062 | card->info.diagass_support = 0; | 5135 | card->info.diagass_support = 0; |
5063 | qeth_query_ipassists(card, QETH_PROT_IPV4); | 5136 | rc = qeth_query_ipassists(card, QETH_PROT_IPV4); |
5064 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) | 5137 | if (rc == -ENOMEM) |
5065 | qeth_query_setadapterparms(card); | 5138 | goto out; |
5066 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) | 5139 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
5067 | qeth_query_setdiagass(card); | 5140 | rc = qeth_query_setadapterparms(card); |
5141 | if (rc < 0) { | ||
5142 | QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); | ||
5143 | goto out; | ||
5144 | } | ||
5145 | } | ||
5146 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { | ||
5147 | rc = qeth_query_setdiagass(card); | ||
5148 | if (rc < 0) { | ||
5149 | QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); | ||
5150 | goto out; | ||
5151 | } | ||
5152 | } | ||
5068 | return 0; | 5153 | return 0; |
5069 | out: | 5154 | out: |
5070 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " | 5155 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d02cd1a67943..ce87ae72edbd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); | |||
27 | static int qeth_l2_stop(struct net_device *); | 27 | static int qeth_l2_stop(struct net_device *); |
28 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); | 28 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); |
29 | static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, | 29 | static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, |
30 | enum qeth_ipa_cmds, | 30 | enum qeth_ipa_cmds); |
31 | int (*reply_cb) (struct qeth_card *, | ||
32 | struct qeth_reply*, | ||
33 | unsigned long)); | ||
34 | static void qeth_l2_set_multicast_list(struct net_device *); | 31 | static void qeth_l2_set_multicast_list(struct net_device *); |
35 | static int qeth_l2_recover(void *); | 32 | static int qeth_l2_recover(void *); |
36 | static void qeth_bridgeport_query_support(struct qeth_card *card); | 33 | static void qeth_bridgeport_query_support(struct qeth_card *card); |
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) | |||
130 | return ndev; | 127 | return ndev; |
131 | } | 128 | } |
132 | 129 | ||
133 | static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | 130 | static int qeth_setdel_makerc(struct qeth_card *card, int retcode) |
134 | struct qeth_reply *reply, | ||
135 | unsigned long data) | ||
136 | { | 131 | { |
137 | struct qeth_ipa_cmd *cmd; | 132 | int rc; |
138 | __u8 *mac; | ||
139 | 133 | ||
140 | QETH_CARD_TEXT(card, 2, "L2Sgmacb"); | 134 | if (retcode) |
141 | cmd = (struct qeth_ipa_cmd *) data; | 135 | QETH_CARD_TEXT_(card, 2, "err%04x", retcode); |
142 | mac = &cmd->data.setdelmac.mac[0]; | 136 | switch (retcode) { |
143 | /* MAC already registered, needed in couple/uncouple case */ | 137 | case IPA_RC_SUCCESS: |
144 | if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { | 138 | rc = 0; |
145 | QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", | 139 | break; |
146 | mac, QETH_CARD_IFNAME(card)); | 140 | case IPA_RC_L2_UNSUPPORTED_CMD: |
147 | cmd->hdr.return_code = 0; | 141 | rc = -ENOSYS; |
142 | break; | ||
143 | case IPA_RC_L2_ADDR_TABLE_FULL: | ||
144 | rc = -ENOSPC; | ||
145 | break; | ||
146 | case IPA_RC_L2_DUP_MAC: | ||
147 | case IPA_RC_L2_DUP_LAYER3_MAC: | ||
148 | rc = -EEXIST; | ||
149 | break; | ||
150 | case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: | ||
151 | case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: | ||
152 | rc = -EPERM; | ||
153 | break; | ||
154 | case IPA_RC_L2_MAC_NOT_FOUND: | ||
155 | rc = -ENOENT; | ||
156 | break; | ||
157 | case -ENOMEM: | ||
158 | rc = -ENOMEM; | ||
159 | break; | ||
160 | default: | ||
161 | rc = -EIO; | ||
162 | break; | ||
148 | } | 163 | } |
149 | if (cmd->hdr.return_code) | 164 | return rc; |
150 | QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n", | ||
151 | mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); | ||
152 | return 0; | ||
153 | } | 165 | } |
154 | 166 | ||
155 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) | 167 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) |
156 | { | 168 | { |
157 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); | 169 | int rc; |
158 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, | ||
159 | qeth_l2_send_setgroupmac_cb); | ||
160 | } | ||
161 | |||
162 | static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | ||
163 | struct qeth_reply *reply, | ||
164 | unsigned long data) | ||
165 | { | ||
166 | struct qeth_ipa_cmd *cmd; | ||
167 | __u8 *mac; | ||
168 | 170 | ||
169 | QETH_CARD_TEXT(card, 2, "L2Dgmacb"); | 171 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); |
170 | cmd = (struct qeth_ipa_cmd *) data; | 172 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
171 | mac = &cmd->data.setdelmac.mac[0]; | 173 | IPA_CMD_SETGMAC)); |
172 | if (cmd->hdr.return_code) | 174 | if (rc == -EEXIST) |
173 | QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", | 175 | QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", |
174 | mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 176 | mac, QETH_CARD_IFNAME(card)); |
175 | return 0; | 177 | else if (rc) |
178 | QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n", | ||
179 | mac, QETH_CARD_IFNAME(card), rc); | ||
180 | return rc; | ||
176 | } | 181 | } |
177 | 182 | ||
178 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) | 183 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) |
179 | { | 184 | { |
185 | int rc; | ||
186 | |||
180 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); | 187 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); |
181 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, | 188 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
182 | qeth_l2_send_delgroupmac_cb); | 189 | IPA_CMD_DELGMAC)); |
190 | if (rc) | ||
191 | QETH_DBF_MESSAGE(2, | ||
192 | "Could not delete group MAC %pM on %s: %d\n", | ||
193 | mac, QETH_CARD_IFNAME(card), rc); | ||
194 | return rc; | ||
183 | } | 195 | } |
184 | 196 | ||
185 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) | 197 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) |
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) | |||
197 | mc->is_vmac = vmac; | 209 | mc->is_vmac = vmac; |
198 | 210 | ||
199 | if (vmac) { | 211 | if (vmac) { |
200 | rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | 212 | rc = qeth_setdel_makerc(card, |
201 | NULL); | 213 | qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC)); |
202 | } else { | 214 | } else { |
203 | rc = qeth_l2_send_setgroupmac(card, mac); | 215 | rc = qeth_setdel_makerc(card, |
216 | qeth_l2_send_setgroupmac(card, mac)); | ||
204 | } | 217 | } |
205 | 218 | ||
206 | if (!rc) | 219 | if (!rc) |
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del) | |||
218 | if (del) { | 231 | if (del) { |
219 | if (mc->is_vmac) | 232 | if (mc->is_vmac) |
220 | qeth_l2_send_setdelmac(card, mc->mc_addr, | 233 | qeth_l2_send_setdelmac(card, mc->mc_addr, |
221 | IPA_CMD_DELVMAC, NULL); | 234 | IPA_CMD_DELVMAC); |
222 | else | 235 | else |
223 | qeth_l2_send_delgroupmac(card, mc->mc_addr); | 236 | qeth_l2_send_delgroupmac(card, mc->mc_addr); |
224 | } | 237 | } |
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
291 | 304 | ||
292 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); | 305 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); |
293 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 306 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
307 | if (!iob) | ||
308 | return -ENOMEM; | ||
294 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 309 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
295 | cmd->data.setdelvlan.vlan_id = i; | 310 | cmd->data.setdelvlan.vlan_id = i; |
296 | return qeth_send_ipa_cmd(card, iob, | 311 | return qeth_send_ipa_cmd(card, iob, |
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, | |||
313 | { | 328 | { |
314 | struct qeth_card *card = dev->ml_priv; | 329 | struct qeth_card *card = dev->ml_priv; |
315 | struct qeth_vlan_vid *id; | 330 | struct qeth_vlan_vid *id; |
331 | int rc; | ||
316 | 332 | ||
317 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); | 333 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); |
318 | if (!vid) | 334 | if (!vid) |
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, | |||
328 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); | 344 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); |
329 | if (id) { | 345 | if (id) { |
330 | id->vid = vid; | 346 | id->vid = vid; |
331 | qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); | 347 | rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); |
348 | if (rc) { | ||
349 | kfree(id); | ||
350 | return rc; | ||
351 | } | ||
332 | spin_lock_bh(&card->vlanlock); | 352 | spin_lock_bh(&card->vlanlock); |
333 | list_add_tail(&id->list, &card->vid_list); | 353 | list_add_tail(&id->list, &card->vid_list); |
334 | spin_unlock_bh(&card->vlanlock); | 354 | spin_unlock_bh(&card->vlanlock); |
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, | |||
343 | { | 363 | { |
344 | struct qeth_vlan_vid *id, *tmpid = NULL; | 364 | struct qeth_vlan_vid *id, *tmpid = NULL; |
345 | struct qeth_card *card = dev->ml_priv; | 365 | struct qeth_card *card = dev->ml_priv; |
366 | int rc = 0; | ||
346 | 367 | ||
347 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); | 368 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); |
348 | if (card->info.type == QETH_CARD_TYPE_OSM) { | 369 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, | |||
363 | } | 384 | } |
364 | spin_unlock_bh(&card->vlanlock); | 385 | spin_unlock_bh(&card->vlanlock); |
365 | if (tmpid) { | 386 | if (tmpid) { |
366 | qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); | 387 | rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); |
367 | kfree(tmpid); | 388 | kfree(tmpid); |
368 | } | 389 | } |
369 | qeth_l2_set_multicast_list(card->dev); | 390 | qeth_l2_set_multicast_list(card->dev); |
370 | return 0; | 391 | return rc; |
371 | } | 392 | } |
372 | 393 | ||
373 | static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | 394 | static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) |
@@ -539,91 +560,62 @@ out: | |||
539 | } | 560 | } |
540 | 561 | ||
541 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, | 562 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, |
542 | enum qeth_ipa_cmds ipacmd, | 563 | enum qeth_ipa_cmds ipacmd) |
543 | int (*reply_cb) (struct qeth_card *, | ||
544 | struct qeth_reply*, | ||
545 | unsigned long)) | ||
546 | { | 564 | { |
547 | struct qeth_ipa_cmd *cmd; | 565 | struct qeth_ipa_cmd *cmd; |
548 | struct qeth_cmd_buffer *iob; | 566 | struct qeth_cmd_buffer *iob; |
549 | 567 | ||
550 | QETH_CARD_TEXT(card, 2, "L2sdmac"); | 568 | QETH_CARD_TEXT(card, 2, "L2sdmac"); |
551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 569 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
570 | if (!iob) | ||
571 | return -ENOMEM; | ||
552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 572 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
553 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; | 573 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; |
554 | memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); | 574 | memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); |
555 | return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); | 575 | return qeth_send_ipa_cmd(card, iob, NULL, NULL); |
556 | } | 576 | } |
557 | 577 | ||
558 | static int qeth_l2_send_setmac_cb(struct qeth_card *card, | 578 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) |
559 | struct qeth_reply *reply, | ||
560 | unsigned long data) | ||
561 | { | 579 | { |
562 | struct qeth_ipa_cmd *cmd; | 580 | int rc; |
563 | 581 | ||
564 | QETH_CARD_TEXT(card, 2, "L2Smaccb"); | 582 | QETH_CARD_TEXT(card, 2, "L2Setmac"); |
565 | cmd = (struct qeth_ipa_cmd *) data; | 583 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
566 | if (cmd->hdr.return_code) { | 584 | IPA_CMD_SETVMAC)); |
567 | QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); | 585 | if (rc == 0) { |
586 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | ||
587 | memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); | ||
588 | dev_info(&card->gdev->dev, | ||
589 | "MAC address %pM successfully registered on device %s\n", | ||
590 | card->dev->dev_addr, card->dev->name); | ||
591 | } else { | ||
568 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 592 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; |
569 | switch (cmd->hdr.return_code) { | 593 | switch (rc) { |
570 | case IPA_RC_L2_DUP_MAC: | 594 | case -EEXIST: |
571 | case IPA_RC_L2_DUP_LAYER3_MAC: | ||
572 | dev_warn(&card->gdev->dev, | 595 | dev_warn(&card->gdev->dev, |
573 | "MAC address %pM already exists\n", | 596 | "MAC address %pM already exists\n", mac); |
574 | cmd->data.setdelmac.mac); | ||
575 | break; | 597 | break; |
576 | case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: | 598 | case -EPERM: |
577 | case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: | ||
578 | dev_warn(&card->gdev->dev, | 599 | dev_warn(&card->gdev->dev, |
579 | "MAC address %pM is not authorized\n", | 600 | "MAC address %pM is not authorized\n", mac); |
580 | cmd->data.setdelmac.mac); | ||
581 | break; | ||
582 | default: | ||
583 | break; | 601 | break; |
584 | } | 602 | } |
585 | } else { | ||
586 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | ||
587 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, | ||
588 | OSA_ADDR_LEN); | ||
589 | dev_info(&card->gdev->dev, | ||
590 | "MAC address %pM successfully registered on device %s\n", | ||
591 | card->dev->dev_addr, card->dev->name); | ||
592 | } | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | ||
597 | { | ||
598 | QETH_CARD_TEXT(card, 2, "L2Setmac"); | ||
599 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | ||
600 | qeth_l2_send_setmac_cb); | ||
601 | } | ||
602 | |||
603 | static int qeth_l2_send_delmac_cb(struct qeth_card *card, | ||
604 | struct qeth_reply *reply, | ||
605 | unsigned long data) | ||
606 | { | ||
607 | struct qeth_ipa_cmd *cmd; | ||
608 | |||
609 | QETH_CARD_TEXT(card, 2, "L2Dmaccb"); | ||
610 | cmd = (struct qeth_ipa_cmd *) data; | ||
611 | if (cmd->hdr.return_code) { | ||
612 | QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); | ||
613 | return 0; | ||
614 | } | 603 | } |
615 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 604 | return rc; |
616 | |||
617 | return 0; | ||
618 | } | 605 | } |
619 | 606 | ||
620 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) | 607 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) |
621 | { | 608 | { |
609 | int rc; | ||
610 | |||
622 | QETH_CARD_TEXT(card, 2, "L2Delmac"); | 611 | QETH_CARD_TEXT(card, 2, "L2Delmac"); |
623 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 612 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
624 | return 0; | 613 | return 0; |
625 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, | 614 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
626 | qeth_l2_send_delmac_cb); | 615 | IPA_CMD_DELVMAC)); |
616 | if (rc == 0) | ||
617 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | ||
618 | return rc; | ||
627 | } | 619 | } |
628 | 620 | ||
629 | static int qeth_l2_request_initial_mac(struct qeth_card *card) | 621 | static int qeth_l2_request_initial_mac(struct qeth_card *card) |
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
651 | if (rc) { | 643 | if (rc) { |
652 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " | 644 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " |
653 | "device %s: x%x\n", CARD_BUS_ID(card), rc); | 645 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
654 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 646 | QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); |
655 | return rc; | 647 | return rc; |
656 | } | 648 | } |
657 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); | 649 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); |
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
687 | return -ERESTARTSYS; | 679 | return -ERESTARTSYS; |
688 | } | 680 | } |
689 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); | 681 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); |
690 | if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) | 682 | if (!rc || (rc == -ENOENT)) |
691 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 683 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
692 | return rc ? -EINVAL : 0; | 684 | return rc ? -EINVAL : 0; |
693 | } | 685 | } |
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
996 | recover_flag = card->state; | 988 | recover_flag = card->state; |
997 | rc = qeth_core_hardsetup_card(card); | 989 | rc = qeth_core_hardsetup_card(card); |
998 | if (rc) { | 990 | if (rc) { |
999 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 991 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
1000 | rc = -ENODEV; | 992 | rc = -ENODEV; |
1001 | goto out_remove; | 993 | goto out_remove; |
1002 | } | 994 | } |
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) | |||
1730 | 1722 | ||
1731 | QETH_CARD_TEXT(card, 2, "brqsuppo"); | 1723 | QETH_CARD_TEXT(card, 2, "brqsuppo"); |
1732 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1724 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
1725 | if (!iob) | ||
1726 | return; | ||
1733 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1727 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1734 | cmd->data.sbp.hdr.cmdlength = | 1728 | cmd->data.sbp.hdr.cmdlength = |
1735 | sizeof(struct qeth_ipacmd_sbp_hdr) + | 1729 | sizeof(struct qeth_ipacmd_sbp_hdr) + |
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, | |||
1805 | if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) | 1799 | if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) |
1806 | return -EOPNOTSUPP; | 1800 | return -EOPNOTSUPP; |
1807 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1801 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
1802 | if (!iob) | ||
1803 | return -ENOMEM; | ||
1808 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1804 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1809 | cmd->data.sbp.hdr.cmdlength = | 1805 | cmd->data.sbp.hdr.cmdlength = |
1810 | sizeof(struct qeth_ipacmd_sbp_hdr); | 1806 | sizeof(struct qeth_ipacmd_sbp_hdr); |
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, | |||
1817 | if (rc) | 1813 | if (rc) |
1818 | return rc; | 1814 | return rc; |
1819 | rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); | 1815 | rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); |
1820 | if (rc) | 1816 | return rc; |
1821 | return rc; | ||
1822 | return 0; | ||
1823 | } | 1817 | } |
1824 | EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); | 1818 | EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); |
1825 | 1819 | ||
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) | |||
1873 | if (!(card->options.sbp.supported_funcs & setcmd)) | 1867 | if (!(card->options.sbp.supported_funcs & setcmd)) |
1874 | return -EOPNOTSUPP; | 1868 | return -EOPNOTSUPP; |
1875 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1869 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
1870 | if (!iob) | ||
1871 | return -ENOMEM; | ||
1876 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1872 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1877 | cmd->data.sbp.hdr.cmdlength = cmdlength; | 1873 | cmd->data.sbp.hdr.cmdlength = cmdlength; |
1878 | cmd->data.sbp.hdr.command_code = setcmd; | 1874 | cmd->data.sbp.hdr.command_code = setcmd; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..e2a0ee845399 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, | |||
549 | QETH_CARD_TEXT(card, 4, "setdelmc"); | 549 | QETH_CARD_TEXT(card, 4, "setdelmc"); |
550 | 550 | ||
551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
552 | if (!iob) | ||
553 | return -ENOMEM; | ||
552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 554 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
553 | memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); | 555 | memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); |
554 | if (addr->proto == QETH_PROT_IPV6) | 556 | if (addr->proto == QETH_PROT_IPV6) |
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, | |||
588 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); | 590 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); |
589 | 591 | ||
590 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 592 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
593 | if (!iob) | ||
594 | return -ENOMEM; | ||
591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 595 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
592 | if (addr->proto == QETH_PROT_IPV6) { | 596 | if (addr->proto == QETH_PROT_IPV6) { |
593 | memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, | 597 | memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, |
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
616 | 620 | ||
617 | QETH_CARD_TEXT(card, 4, "setroutg"); | 621 | QETH_CARD_TEXT(card, 4, "setroutg"); |
618 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); | 622 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); |
623 | if (!iob) | ||
624 | return -ENOMEM; | ||
619 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 625 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
620 | cmd->data.setrtg.type = (type); | 626 | cmd->data.setrtg.type = (type); |
621 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); | 627 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); |
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( | |||
1049 | QETH_CARD_TEXT(card, 4, "getasscm"); | 1055 | QETH_CARD_TEXT(card, 4, "getasscm"); |
1050 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); | 1056 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); |
1051 | 1057 | ||
1052 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1058 | if (iob) { |
1053 | cmd->data.setassparms.hdr.assist_no = ipa_func; | 1059 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1054 | cmd->data.setassparms.hdr.length = 8 + len; | 1060 | cmd->data.setassparms.hdr.assist_no = ipa_func; |
1055 | cmd->data.setassparms.hdr.command_code = cmd_code; | 1061 | cmd->data.setassparms.hdr.length = 8 + len; |
1056 | cmd->data.setassparms.hdr.return_code = 0; | 1062 | cmd->data.setassparms.hdr.command_code = cmd_code; |
1057 | cmd->data.setassparms.hdr.seq_no = 0; | 1063 | cmd->data.setassparms.hdr.return_code = 0; |
1064 | cmd->data.setassparms.hdr.seq_no = 0; | ||
1065 | } | ||
1058 | 1066 | ||
1059 | return iob; | 1067 | return iob; |
1060 | } | 1068 | } |
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, | |||
1090 | QETH_CARD_TEXT(card, 4, "simassp6"); | 1098 | QETH_CARD_TEXT(card, 4, "simassp6"); |
1091 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1099 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
1092 | 0, QETH_PROT_IPV6); | 1100 | 0, QETH_PROT_IPV6); |
1101 | if (!iob) | ||
1102 | return -ENOMEM; | ||
1093 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, | 1103 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, |
1094 | qeth_l3_default_setassparms_cb, NULL); | 1104 | qeth_l3_default_setassparms_cb, NULL); |
1095 | return rc; | 1105 | return rc; |
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, | |||
1108 | length = sizeof(__u32); | 1118 | length = sizeof(__u32); |
1109 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1119 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
1110 | length, QETH_PROT_IPV4); | 1120 | length, QETH_PROT_IPV4); |
1121 | if (!iob) | ||
1122 | return -ENOMEM; | ||
1111 | rc = qeth_l3_send_setassparms(card, iob, length, data, | 1123 | rc = qeth_l3_send_setassparms(card, iob, length, data, |
1112 | qeth_l3_default_setassparms_cb, NULL); | 1124 | qeth_l3_default_setassparms_cb, NULL); |
1113 | return rc; | 1125 | return rc; |
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) | |||
1494 | 1506 | ||
1495 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1507 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
1496 | QETH_PROT_IPV6); | 1508 | QETH_PROT_IPV6); |
1509 | if (!iob) | ||
1510 | return -ENOMEM; | ||
1497 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1511 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1498 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = | 1512 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = |
1499 | card->info.unique_id; | 1513 | card->info.unique_id; |
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card) | |||
1537 | 1551 | ||
1538 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1552 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
1539 | QETH_PROT_IPV6); | 1553 | QETH_PROT_IPV6); |
1554 | if (!iob) | ||
1555 | return -ENOMEM; | ||
1540 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1556 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1541 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = | 1557 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = |
1542 | card->info.unique_id; | 1558 | card->info.unique_id; |
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) | |||
1611 | QETH_DBF_TEXT(SETUP, 2, "diagtrac"); | 1627 | QETH_DBF_TEXT(SETUP, 2, "diagtrac"); |
1612 | 1628 | ||
1613 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 1629 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
1630 | if (!iob) | ||
1631 | return -ENOMEM; | ||
1614 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1632 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
1615 | cmd->data.diagass.subcmd_len = 16; | 1633 | cmd->data.diagass.subcmd_len = 16; |
1616 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; | 1634 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; |
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, | |||
2442 | IPA_CMD_ASS_ARP_QUERY_INFO, | 2460 | IPA_CMD_ASS_ARP_QUERY_INFO, |
2443 | sizeof(struct qeth_arp_query_data) - sizeof(char), | 2461 | sizeof(struct qeth_arp_query_data) - sizeof(char), |
2444 | prot); | 2462 | prot); |
2463 | if (!iob) | ||
2464 | return -ENOMEM; | ||
2445 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 2465 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
2446 | cmd->data.setassparms.data.query_arp.request_bits = 0x000F; | 2466 | cmd->data.setassparms.data.query_arp.request_bits = 0x000F; |
2447 | cmd->data.setassparms.data.query_arp.reply_bits = 0; | 2467 | cmd->data.setassparms.data.query_arp.reply_bits = 0; |
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2535 | IPA_CMD_ASS_ARP_ADD_ENTRY, | 2555 | IPA_CMD_ASS_ARP_ADD_ENTRY, |
2536 | sizeof(struct qeth_arp_cache_entry), | 2556 | sizeof(struct qeth_arp_cache_entry), |
2537 | QETH_PROT_IPV4); | 2557 | QETH_PROT_IPV4); |
2558 | if (!iob) | ||
2559 | return -ENOMEM; | ||
2538 | rc = qeth_l3_send_setassparms(card, iob, | 2560 | rc = qeth_l3_send_setassparms(card, iob, |
2539 | sizeof(struct qeth_arp_cache_entry), | 2561 | sizeof(struct qeth_arp_cache_entry), |
2540 | (unsigned long) entry, | 2562 | (unsigned long) entry, |
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2574 | IPA_CMD_ASS_ARP_REMOVE_ENTRY, | 2596 | IPA_CMD_ASS_ARP_REMOVE_ENTRY, |
2575 | 12, | 2597 | 12, |
2576 | QETH_PROT_IPV4); | 2598 | QETH_PROT_IPV4); |
2599 | if (!iob) | ||
2600 | return -ENOMEM; | ||
2577 | rc = qeth_l3_send_setassparms(card, iob, | 2601 | rc = qeth_l3_send_setassparms(card, iob, |
2578 | 12, (unsigned long)buf, | 2602 | 12, (unsigned long)buf, |
2579 | qeth_l3_default_setassparms_cb, NULL); | 2603 | qeth_l3_default_setassparms_cb, NULL); |
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { | |||
3262 | 3286 | ||
3263 | static int qeth_l3_setup_netdev(struct qeth_card *card) | 3287 | static int qeth_l3_setup_netdev(struct qeth_card *card) |
3264 | { | 3288 | { |
3289 | int rc; | ||
3290 | |||
3265 | if (card->info.type == QETH_CARD_TYPE_OSD || | 3291 | if (card->info.type == QETH_CARD_TYPE_OSD || |
3266 | card->info.type == QETH_CARD_TYPE_OSX) { | 3292 | card->info.type == QETH_CARD_TYPE_OSX) { |
3267 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || | 3293 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || |
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
3293 | return -ENODEV; | 3319 | return -ENODEV; |
3294 | card->dev->flags |= IFF_NOARP; | 3320 | card->dev->flags |= IFF_NOARP; |
3295 | card->dev->netdev_ops = &qeth_l3_netdev_ops; | 3321 | card->dev->netdev_ops = &qeth_l3_netdev_ops; |
3296 | qeth_l3_iqd_read_initial_mac(card); | 3322 | rc = qeth_l3_iqd_read_initial_mac(card); |
3323 | if (rc) | ||
3324 | return rc; | ||
3297 | if (card->options.hsuid[0]) | 3325 | if (card->options.hsuid[0]) |
3298 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); | 3326 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); |
3299 | } else | 3327 | } else |
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3360 | recover_flag = card->state; | 3388 | recover_flag = card->state; |
3361 | rc = qeth_core_hardsetup_card(card); | 3389 | rc = qeth_core_hardsetup_card(card); |
3362 | if (rc) { | 3390 | if (rc) { |
3363 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3391 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
3364 | rc = -ENODEV; | 3392 | rc = -ENODEV; |
3365 | goto out_remove; | 3393 | goto out_remove; |
3366 | } | 3394 | } |
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3401 | contin: | 3429 | contin: |
3402 | rc = qeth_l3_setadapter_parms(card); | 3430 | rc = qeth_l3_setadapter_parms(card); |
3403 | if (rc) | 3431 | if (rc) |
3404 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3432 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
3405 | if (!card->options.sniffer) { | 3433 | if (!card->options.sniffer) { |
3406 | rc = qeth_l3_start_ipassists(card); | 3434 | rc = qeth_l3_start_ipassists(card); |
3407 | if (rc) { | 3435 | if (rc) { |
@@ -3410,10 +3438,10 @@ contin: | |||
3410 | } | 3438 | } |
3411 | rc = qeth_l3_setrouting_v4(card); | 3439 | rc = qeth_l3_setrouting_v4(card); |
3412 | if (rc) | 3440 | if (rc) |
3413 | QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); | 3441 | QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); |
3414 | rc = qeth_l3_setrouting_v6(card); | 3442 | rc = qeth_l3_setrouting_v6(card); |
3415 | if (rc) | 3443 | if (rc) |
3416 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); | 3444 | QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); |
3417 | } | 3445 | } |
3418 | netif_tx_disable(card->dev); | 3446 | netif_tx_disable(card->dev); |
3419 | 3447 | ||
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e02885451425..9b3829931f40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev) | |||
986 | return -ENXIO; | 986 | return -ENXIO; |
987 | if (!get_device(&sdev->sdev_gendev)) | 987 | if (!get_device(&sdev->sdev_gendev)) |
988 | return -ENXIO; | 988 | return -ENXIO; |
989 | /* We can fail this if we're doing SCSI operations | 989 | /* We can fail try_module_get if we're doing SCSI operations |
990 | * from module exit (like cache flush) */ | 990 | * from module exit (like cache flush) */ |
991 | try_module_get(sdev->host->hostt->module); | 991 | __module_get(sdev->host->hostt->module); |
992 | 992 | ||
993 | return 0; | 993 | return 0; |
994 | } | 994 | } |
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get); | |||
1004 | */ | 1004 | */ |
1005 | void scsi_device_put(struct scsi_device *sdev) | 1005 | void scsi_device_put(struct scsi_device *sdev) |
1006 | { | 1006 | { |
1007 | #ifdef CONFIG_MODULE_UNLOAD | 1007 | module_put(sdev->host->hostt->module); |
1008 | struct module *module = sdev->host->hostt->module; | ||
1009 | |||
1010 | /* The module refcount will be zero if scsi_device_get() | ||
1011 | * was called from a module removal routine */ | ||
1012 | if (module && module_refcount(module) != 0) | ||
1013 | module_put(module); | ||
1014 | #endif | ||
1015 | put_device(&sdev->sdev_gendev); | 1008 | put_device(&sdev->sdev_gendev); |
1016 | } | 1009 | } |
1017 | EXPORT_SYMBOL(scsi_device_put); | 1010 | EXPORT_SYMBOL(scsi_device_put); |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 7281316a5ecb..a67d37c7e3c0 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws) | |||
271 | iounmap(clk_reg); | 271 | iounmap(clk_reg); |
272 | 272 | ||
273 | dws->num_cs = 16; | 273 | dws->num_cs = 16; |
274 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
275 | 274 | ||
276 | #ifdef CONFIG_SPI_DW_MID_DMA | 275 | #ifdef CONFIG_SPI_DW_MID_DMA |
277 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); | 276 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); |
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index d0d5542efc06..8edcd1b84562 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws) | |||
621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
622 | u32 fifo; | 622 | u32 fifo; |
623 | 623 | ||
624 | for (fifo = 2; fifo <= 257; fifo++) { | 624 | for (fifo = 2; fifo <= 256; fifo++) { |
625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
627 | break; | 627 | break; |
628 | } | 628 | } |
629 | 629 | ||
630 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | 630 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; |
631 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 631 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
632 | } | 632 | } |
633 | } | 633 | } |
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
673 | if (dws->dma_ops && dws->dma_ops->dma_init) { | 673 | if (dws->dma_ops && dws->dma_ops->dma_init) { |
674 | ret = dws->dma_ops->dma_init(dws); | 674 | ret = dws->dma_ops->dma_init(dws); |
675 | if (ret) { | 675 | if (ret) { |
676 | dev_warn(&master->dev, "DMA init failed\n"); | 676 | dev_warn(dev, "DMA init failed\n"); |
677 | dws->dma_inited = 0; | 677 | dws->dma_inited = 0; |
678 | } | 678 | } |
679 | } | 679 | } |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 05c623cfb078..23822e7df6c1 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data) | |||
546 | cs_deassert(drv_data); | 546 | cs_deassert(drv_data); |
547 | } | 547 | } |
548 | 548 | ||
549 | spi_finalize_current_message(drv_data->master); | ||
550 | drv_data->cur_chip = NULL; | 549 | drv_data->cur_chip = NULL; |
550 | spi_finalize_current_message(drv_data->master); | ||
551 | } | 551 | } |
552 | 552 | ||
553 | static void reset_sccr1(struct driver_data *drv_data) | 553 | static void reset_sccr1(struct driver_data *drv_data) |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 96a5fc0878d8..3ab7a21445fc 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv { | |||
82 | #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ | 82 | #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ |
83 | #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ | 83 | #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ |
84 | #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ | 84 | #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ |
85 | #define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */ | 85 | #define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ |
86 | #define MDR1_FLD_SHIFT 2 | 86 | #define MDR1_FLD_SHIFT 2 |
87 | #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ | 87 | #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ |
88 | /* TMDR1 */ | 88 | /* TMDR1 */ |
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 930f6010203e..65d610abe06e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c | |||
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) | |||
632 | return 0; | 632 | return 0; |
633 | } | 633 | } |
634 | 634 | ||
635 | if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { | 635 | if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { |
636 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); | 636 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); |
637 | return -EFAULT; | 637 | return -EFAULT; |
638 | } | 638 | } |
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index 093535c6217b..120b70d72d79 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c | |||
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle; | |||
85 | static const struct mfd_cell nvec_devices[] = { | 85 | static const struct mfd_cell nvec_devices[] = { |
86 | { | 86 | { |
87 | .name = "nvec-kbd", | 87 | .name = "nvec-kbd", |
88 | .id = 1, | ||
89 | }, | 88 | }, |
90 | { | 89 | { |
91 | .name = "nvec-mouse", | 90 | .name = "nvec-mouse", |
92 | .id = 1, | ||
93 | }, | 91 | }, |
94 | { | 92 | { |
95 | .name = "nvec-power", | 93 | .name = "nvec-power", |
96 | .id = 1, | 94 | .id = 0, |
97 | }, | 95 | }, |
98 | { | 96 | { |
99 | .name = "nvec-power", | 97 | .name = "nvec-power", |
100 | .id = 2, | 98 | .id = 1, |
101 | }, | 99 | }, |
102 | { | 100 | { |
103 | .name = "nvec-paz00", | 101 | .name = "nvec-paz00", |
104 | .id = 1, | ||
105 | }, | 102 | }, |
106 | }; | 103 | }; |
107 | 104 | ||
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) | |||
891 | nvec_msg_free(nvec, msg); | 888 | nvec_msg_free(nvec, msg); |
892 | } | 889 | } |
893 | 890 | ||
894 | ret = mfd_add_devices(nvec->dev, -1, nvec_devices, | 891 | ret = mfd_add_devices(nvec->dev, 0, nvec_devices, |
895 | ARRAY_SIZE(nvec_devices), NULL, 0, NULL); | 892 | ARRAY_SIZE(nvec_devices), NULL, 0, NULL); |
896 | if (ret) | 893 | if (ret) |
897 | dev_err(nvec->dev, "error adding subdevices\n"); | 894 | dev_err(nvec->dev, "error adding subdevices\n"); |
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h index de0c9c9d7091..a6315abe7b7c 100644 --- a/drivers/usb/core/otg_whitelist.h +++ b/drivers/usb/core/otg_whitelist.h | |||
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev) | |||
55 | le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) | 55 | le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) |
56 | return 0; | 56 | return 0; |
57 | 57 | ||
58 | /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */ | ||
59 | if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && | ||
60 | le16_to_cpu(dev->descriptor.idProduct) == 0x0200)) | ||
61 | return 1; | ||
62 | |||
58 | /* NOTE: can't use usb_match_id() since interface caches | 63 | /* NOTE: can't use usb_match_id() since interface caches |
59 | * aren't set up yet. this is cut/paste from that code. | 64 | * aren't set up yet. this is cut/paste from that code. |
60 | */ | 65 | */ |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0ffb4ed0a945..41e510ae8c83 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
179 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | 179 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = |
180 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | 180 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, |
181 | 181 | ||
182 | /* Protocol and OTG Electrical Test Device */ | ||
183 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | ||
184 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | ||
185 | |||
182 | { } /* terminating entry must be last */ | 186 | { } /* terminating entry must be last */ |
183 | }; | 187 | }; |
184 | 188 | ||
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index ad43c5bc1ef1..02e3e2d4ea56 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c | |||
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) | |||
476 | u32 gintsts; | 476 | u32 gintsts; |
477 | irqreturn_t retval = IRQ_NONE; | 477 | irqreturn_t retval = IRQ_NONE; |
478 | 478 | ||
479 | spin_lock(&hsotg->lock); | ||
480 | |||
479 | if (!dwc2_is_controller_alive(hsotg)) { | 481 | if (!dwc2_is_controller_alive(hsotg)) { |
480 | dev_warn(hsotg->dev, "Controller is dead\n"); | 482 | dev_warn(hsotg->dev, "Controller is dead\n"); |
481 | goto out; | 483 | goto out; |
482 | } | 484 | } |
483 | 485 | ||
484 | spin_lock(&hsotg->lock); | ||
485 | |||
486 | gintsts = dwc2_read_common_intr(hsotg); | 486 | gintsts = dwc2_read_common_intr(hsotg); |
487 | if (gintsts & ~GINTSTS_PRTINT) | 487 | if (gintsts & ~GINTSTS_PRTINT) |
488 | retval = IRQ_HANDLED; | 488 | retval = IRQ_HANDLED; |
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) | |||
515 | } | 515 | } |
516 | } | 516 | } |
517 | 517 | ||
518 | spin_unlock(&hsotg->lock); | ||
519 | out: | 518 | out: |
519 | spin_unlock(&hsotg->lock); | ||
520 | return retval; | 520 | return retval; |
521 | } | 521 | } |
522 | EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); | 522 | EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); |
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index ccfdfb24b240..2f9735b35338 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c | |||
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list, | |||
34 | return phy; | 34 | return phy; |
35 | } | 35 | } |
36 | 36 | ||
37 | return ERR_PTR(-EPROBE_DEFER); | 37 | return ERR_PTR(-ENODEV); |
38 | } | 38 | } |
39 | 39 | ||
40 | static struct usb_phy *__usb_find_phy_dev(struct device *dev, | 40 | static struct usb_phy *__usb_find_phy_dev(struct device *dev, |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 11c7a9676441..d684b4b8108f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100, | |||
507 | UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, | 507 | UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, |
508 | "SCM Microsystems", | 508 | "SCM Microsystems", |
509 | "eUSB SCSI Adapter (Bus Powered)", | 509 | "eUSB SCSI Adapter (Bus Powered)", |
510 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, | 510 | USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init, |
511 | US_FL_SCM_MULT_TARG ), | 511 | US_FL_SCM_MULT_TARG ), |
512 | 512 | ||
513 | UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, | 513 | UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, |
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, | |||
1995 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1995 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
1996 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), | 1996 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), |
1997 | 1997 | ||
1998 | /* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */ | ||
1999 | UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, | ||
2000 | "JMicron", | ||
2001 | "USB to ATA/ATAPI Bridge", | ||
2002 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2003 | US_FL_BROKEN_FUA ), | ||
2004 | |||
1998 | /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) | 2005 | /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) |
1999 | * and Mac USB Dock USB-SCSI */ | 2006 | * and Mac USB Dock USB-SCSI */ |
2000 | UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, | 2007 | UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 6df4357d9ee3..dbc00e56c7f5 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, | |||
140 | "External HDD", | 140 | "External HDD", |
141 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 141 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
142 | US_FL_IGNORE_UAS), | 142 | US_FL_IGNORE_UAS), |
143 | |||
144 | /* Reported-by: Richard Henderson <rth@redhat.com> */ | ||
145 | UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, | ||
146 | "SimpleTech", | ||
147 | "External HDD", | ||
148 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
149 | US_FL_NO_REPORT_OPCODES), | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2f0fbc374e87..e427cb7ee12c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, | |||
3065 | path->search_commit_root = 1; | 3065 | path->search_commit_root = 1; |
3066 | path->skip_locking = 1; | 3066 | path->skip_locking = 1; |
3067 | 3067 | ||
3068 | ppath->search_commit_root = 1; | ||
3069 | ppath->skip_locking = 1; | ||
3068 | /* | 3070 | /* |
3069 | * trigger the readahead for extent tree csum tree and wait for | 3071 | * trigger the readahead for extent tree csum tree and wait for |
3070 | * completion. During readahead, the scrub is officially paused | 3072 | * completion. During readahead, the scrub is officially paused |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c8b148bbdc8b..3e193cb36996 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
667 | 667 | ||
668 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 668 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
669 | s64 change, struct gfs2_quota_data *qd, | 669 | s64 change, struct gfs2_quota_data *qd, |
670 | struct fs_disk_quota *fdq) | 670 | struct qc_dqblk *fdq) |
671 | { | 671 | { |
672 | struct inode *inode = &ip->i_inode; | 672 | struct inode *inode = &ip->i_inode; |
673 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 673 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
697 | be64_add_cpu(&q.qu_value, change); | 697 | be64_add_cpu(&q.qu_value, change); |
698 | qd->qd_qb.qb_value = q.qu_value; | 698 | qd->qd_qb.qb_value = q.qu_value; |
699 | if (fdq) { | 699 | if (fdq) { |
700 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | 700 | if (fdq->d_fieldmask & QC_SPC_SOFT) { |
701 | q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift); | 701 | q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); |
702 | qd->qd_qb.qb_warn = q.qu_warn; | 702 | qd->qd_qb.qb_warn = q.qu_warn; |
703 | } | 703 | } |
704 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | 704 | if (fdq->d_fieldmask & QC_SPC_HARD) { |
705 | q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift); | 705 | q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); |
706 | qd->qd_qb.qb_limit = q.qu_limit; | 706 | qd->qd_qb.qb_limit = q.qu_limit; |
707 | } | 707 | } |
708 | if (fdq->d_fieldmask & FS_DQ_BCOUNT) { | 708 | if (fdq->d_fieldmask & QC_SPACE) { |
709 | q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift); | 709 | q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); |
710 | qd->qd_qb.qb_value = q.qu_value; | 710 | qd->qd_qb.qb_value = q.qu_value; |
711 | } | 711 | } |
712 | } | 712 | } |
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb, | |||
1497 | } | 1497 | } |
1498 | 1498 | ||
1499 | static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | 1499 | static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, |
1500 | struct fs_disk_quota *fdq) | 1500 | struct qc_dqblk *fdq) |
1501 | { | 1501 | { |
1502 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1502 | struct gfs2_sbd *sdp = sb->s_fs_info; |
1503 | struct gfs2_quota_lvb *qlvb; | 1503 | struct gfs2_quota_lvb *qlvb; |
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | |||
1505 | struct gfs2_holder q_gh; | 1505 | struct gfs2_holder q_gh; |
1506 | int error; | 1506 | int error; |
1507 | 1507 | ||
1508 | memset(fdq, 0, sizeof(struct fs_disk_quota)); | 1508 | memset(fdq, 0, sizeof(*fdq)); |
1509 | 1509 | ||
1510 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | 1510 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) |
1511 | return -ESRCH; /* Crazy XFS error code */ | 1511 | return -ESRCH; /* Crazy XFS error code */ |
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid, | |||
1522 | goto out; | 1522 | goto out; |
1523 | 1523 | ||
1524 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; | 1524 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; |
1525 | fdq->d_version = FS_DQUOT_VERSION; | 1525 | fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; |
1526 | fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA; | 1526 | fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; |
1527 | fdq->d_id = from_kqid_munged(current_user_ns(), qid); | 1527 | fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; |
1528 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift; | ||
1529 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift; | ||
1530 | fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift; | ||
1531 | 1528 | ||
1532 | gfs2_glock_dq_uninit(&q_gh); | 1529 | gfs2_glock_dq_uninit(&q_gh); |
1533 | out: | 1530 | out: |
@@ -1536,10 +1533,10 @@ out: | |||
1536 | } | 1533 | } |
1537 | 1534 | ||
1538 | /* GFS2 only supports a subset of the XFS fields */ | 1535 | /* GFS2 only supports a subset of the XFS fields */ |
1539 | #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT) | 1536 | #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE) |
1540 | 1537 | ||
1541 | static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, | 1538 | static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, |
1542 | struct fs_disk_quota *fdq) | 1539 | struct qc_dqblk *fdq) |
1543 | { | 1540 | { |
1544 | struct gfs2_sbd *sdp = sb->s_fs_info; | 1541 | struct gfs2_sbd *sdp = sb->s_fs_info; |
1545 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 1542 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, | |||
1583 | goto out_i; | 1580 | goto out_i; |
1584 | 1581 | ||
1585 | /* If nothing has changed, this is a no-op */ | 1582 | /* If nothing has changed, this is a no-op */ |
1586 | if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | 1583 | if ((fdq->d_fieldmask & QC_SPC_SOFT) && |
1587 | ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) | 1584 | ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) |
1588 | fdq->d_fieldmask ^= FS_DQ_BSOFT; | 1585 | fdq->d_fieldmask ^= QC_SPC_SOFT; |
1589 | 1586 | ||
1590 | if ((fdq->d_fieldmask & FS_DQ_BHARD) && | 1587 | if ((fdq->d_fieldmask & QC_SPC_HARD) && |
1591 | ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) | 1588 | ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) |
1592 | fdq->d_fieldmask ^= FS_DQ_BHARD; | 1589 | fdq->d_fieldmask ^= QC_SPC_HARD; |
1593 | 1590 | ||
1594 | if ((fdq->d_fieldmask & FS_DQ_BCOUNT) && | 1591 | if ((fdq->d_fieldmask & QC_SPACE) && |
1595 | ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value))) | 1592 | ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) |
1596 | fdq->d_fieldmask ^= FS_DQ_BCOUNT; | 1593 | fdq->d_fieldmask ^= QC_SPACE; |
1597 | 1594 | ||
1598 | if (fdq->d_fieldmask == 0) | 1595 | if (fdq->d_fieldmask == 0) |
1599 | goto out_i; | 1596 | goto out_i; |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 10bf07280f4a..294692ff83b1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, | |||
212 | */ | 212 | */ |
213 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) | 213 | ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos) |
214 | { | 214 | { |
215 | struct inode *inode = iocb->ki_filp->f_mapping->host; | ||
216 | |||
217 | /* we only support swap file calling nfs_direct_IO */ | ||
218 | if (!IS_SWAPFILE(inode)) | ||
219 | return 0; | ||
220 | |||
215 | #ifndef CONFIG_NFS_SWAP | 221 | #ifndef CONFIG_NFS_SWAP |
216 | dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", | 222 | dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n", |
217 | iocb->ki_filp, (long long) pos, iter->nr_segs); | 223 | iocb->ki_filp, (long long) pos, iter->nr_segs); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 4bffe637ea32..2211f6ba8736 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st | |||
352 | 352 | ||
353 | nfs_attr_check_mountpoint(sb, fattr); | 353 | nfs_attr_check_mountpoint(sb, fattr); |
354 | 354 | ||
355 | if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && | 355 | if (nfs_attr_use_mounted_on_fileid(fattr)) |
356 | !nfs_attr_use_mounted_on_fileid(fattr)) | 356 | fattr->fileid = fattr->mounted_on_fileid; |
357 | else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) | ||
357 | goto out_no_inode; | 358 | goto out_no_inode; |
358 | if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) | 359 | if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) |
359 | goto out_no_inode; | 360 | goto out_no_inode; |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index efaa31c70fbe..b6f34bfa6fe8 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr) | |||
31 | (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && | 31 | (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && |
32 | ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) | 32 | ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) |
33 | return 0; | 33 | return 0; |
34 | |||
35 | fattr->fileid = fattr->mounted_on_fileid; | ||
36 | return 1; | 34 | return 1; |
37 | } | 35 | } |
38 | 36 | ||
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 953daa44a282..706ad10b8186 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
639 | prev = pos; | 639 | prev = pos; |
640 | 640 | ||
641 | status = nfs_wait_client_init_complete(pos); | 641 | status = nfs_wait_client_init_complete(pos); |
642 | if (status == 0) { | 642 | if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { |
643 | nfs4_schedule_lease_recovery(pos); | 643 | nfs4_schedule_lease_recovery(pos); |
644 | status = nfs4_wait_clnt_recover(pos); | 644 | status = nfs4_wait_clnt_recover(pos); |
645 | } | 645 | } |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 8f0acef3d184..69df5b239844 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space) | |||
2396 | } | 2396 | } |
2397 | 2397 | ||
2398 | /* Generic routine for getting common part of quota structure */ | 2398 | /* Generic routine for getting common part of quota structure */ |
2399 | static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | 2399 | static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) |
2400 | { | 2400 | { |
2401 | struct mem_dqblk *dm = &dquot->dq_dqb; | 2401 | struct mem_dqblk *dm = &dquot->dq_dqb; |
2402 | 2402 | ||
2403 | memset(di, 0, sizeof(*di)); | 2403 | memset(di, 0, sizeof(*di)); |
2404 | di->d_version = FS_DQUOT_VERSION; | ||
2405 | di->d_flags = dquot->dq_id.type == USRQUOTA ? | ||
2406 | FS_USER_QUOTA : FS_GROUP_QUOTA; | ||
2407 | di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id); | ||
2408 | |||
2409 | spin_lock(&dq_data_lock); | 2404 | spin_lock(&dq_data_lock); |
2410 | di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit); | 2405 | di->d_spc_hardlimit = dm->dqb_bhardlimit; |
2411 | di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit); | 2406 | di->d_spc_softlimit = dm->dqb_bsoftlimit; |
2412 | di->d_ino_hardlimit = dm->dqb_ihardlimit; | 2407 | di->d_ino_hardlimit = dm->dqb_ihardlimit; |
2413 | di->d_ino_softlimit = dm->dqb_isoftlimit; | 2408 | di->d_ino_softlimit = dm->dqb_isoftlimit; |
2414 | di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace; | 2409 | di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; |
2415 | di->d_icount = dm->dqb_curinodes; | 2410 | di->d_ino_count = dm->dqb_curinodes; |
2416 | di->d_btimer = dm->dqb_btime; | 2411 | di->d_spc_timer = dm->dqb_btime; |
2417 | di->d_itimer = dm->dqb_itime; | 2412 | di->d_ino_timer = dm->dqb_itime; |
2418 | spin_unlock(&dq_data_lock); | 2413 | spin_unlock(&dq_data_lock); |
2419 | } | 2414 | } |
2420 | 2415 | ||
2421 | int dquot_get_dqblk(struct super_block *sb, struct kqid qid, | 2416 | int dquot_get_dqblk(struct super_block *sb, struct kqid qid, |
2422 | struct fs_disk_quota *di) | 2417 | struct qc_dqblk *di) |
2423 | { | 2418 | { |
2424 | struct dquot *dquot; | 2419 | struct dquot *dquot; |
2425 | 2420 | ||
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid, | |||
2433 | } | 2428 | } |
2434 | EXPORT_SYMBOL(dquot_get_dqblk); | 2429 | EXPORT_SYMBOL(dquot_get_dqblk); |
2435 | 2430 | ||
2436 | #define VFS_FS_DQ_MASK \ | 2431 | #define VFS_QC_MASK \ |
2437 | (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ | 2432 | (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ |
2438 | FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \ | 2433 | QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ |
2439 | FS_DQ_BTIMER | FS_DQ_ITIMER) | 2434 | QC_SPC_TIMER | QC_INO_TIMER) |
2440 | 2435 | ||
2441 | /* Generic routine for setting common part of quota structure */ | 2436 | /* Generic routine for setting common part of quota structure */ |
2442 | static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | 2437 | static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) |
2443 | { | 2438 | { |
2444 | struct mem_dqblk *dm = &dquot->dq_dqb; | 2439 | struct mem_dqblk *dm = &dquot->dq_dqb; |
2445 | int check_blim = 0, check_ilim = 0; | 2440 | int check_blim = 0, check_ilim = 0; |
2446 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; | 2441 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; |
2447 | 2442 | ||
2448 | if (di->d_fieldmask & ~VFS_FS_DQ_MASK) | 2443 | if (di->d_fieldmask & ~VFS_QC_MASK) |
2449 | return -EINVAL; | 2444 | return -EINVAL; |
2450 | 2445 | ||
2451 | if (((di->d_fieldmask & FS_DQ_BSOFT) && | 2446 | if (((di->d_fieldmask & QC_SPC_SOFT) && |
2452 | (di->d_blk_softlimit > dqi->dqi_maxblimit)) || | 2447 | stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) || |
2453 | ((di->d_fieldmask & FS_DQ_BHARD) && | 2448 | ((di->d_fieldmask & QC_SPC_HARD) && |
2454 | (di->d_blk_hardlimit > dqi->dqi_maxblimit)) || | 2449 | stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) || |
2455 | ((di->d_fieldmask & FS_DQ_ISOFT) && | 2450 | ((di->d_fieldmask & QC_INO_SOFT) && |
2456 | (di->d_ino_softlimit > dqi->dqi_maxilimit)) || | 2451 | (di->d_ino_softlimit > dqi->dqi_maxilimit)) || |
2457 | ((di->d_fieldmask & FS_DQ_IHARD) && | 2452 | ((di->d_fieldmask & QC_INO_HARD) && |
2458 | (di->d_ino_hardlimit > dqi->dqi_maxilimit))) | 2453 | (di->d_ino_hardlimit > dqi->dqi_maxilimit))) |
2459 | return -ERANGE; | 2454 | return -ERANGE; |
2460 | 2455 | ||
2461 | spin_lock(&dq_data_lock); | 2456 | spin_lock(&dq_data_lock); |
2462 | if (di->d_fieldmask & FS_DQ_BCOUNT) { | 2457 | if (di->d_fieldmask & QC_SPACE) { |
2463 | dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace; | 2458 | dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; |
2464 | check_blim = 1; | 2459 | check_blim = 1; |
2465 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | 2460 | set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); |
2466 | } | 2461 | } |
2467 | 2462 | ||
2468 | if (di->d_fieldmask & FS_DQ_BSOFT) | 2463 | if (di->d_fieldmask & QC_SPC_SOFT) |
2469 | dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit); | 2464 | dm->dqb_bsoftlimit = di->d_spc_softlimit; |
2470 | if (di->d_fieldmask & FS_DQ_BHARD) | 2465 | if (di->d_fieldmask & QC_SPC_HARD) |
2471 | dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit); | 2466 | dm->dqb_bhardlimit = di->d_spc_hardlimit; |
2472 | if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) { | 2467 | if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { |
2473 | check_blim = 1; | 2468 | check_blim = 1; |
2474 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | 2469 | set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); |
2475 | } | 2470 | } |
2476 | 2471 | ||
2477 | if (di->d_fieldmask & FS_DQ_ICOUNT) { | 2472 | if (di->d_fieldmask & QC_INO_COUNT) { |
2478 | dm->dqb_curinodes = di->d_icount; | 2473 | dm->dqb_curinodes = di->d_ino_count; |
2479 | check_ilim = 1; | 2474 | check_ilim = 1; |
2480 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | 2475 | set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); |
2481 | } | 2476 | } |
2482 | 2477 | ||
2483 | if (di->d_fieldmask & FS_DQ_ISOFT) | 2478 | if (di->d_fieldmask & QC_INO_SOFT) |
2484 | dm->dqb_isoftlimit = di->d_ino_softlimit; | 2479 | dm->dqb_isoftlimit = di->d_ino_softlimit; |
2485 | if (di->d_fieldmask & FS_DQ_IHARD) | 2480 | if (di->d_fieldmask & QC_INO_HARD) |
2486 | dm->dqb_ihardlimit = di->d_ino_hardlimit; | 2481 | dm->dqb_ihardlimit = di->d_ino_hardlimit; |
2487 | if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) { | 2482 | if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { |
2488 | check_ilim = 1; | 2483 | check_ilim = 1; |
2489 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | 2484 | set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); |
2490 | } | 2485 | } |
2491 | 2486 | ||
2492 | if (di->d_fieldmask & FS_DQ_BTIMER) { | 2487 | if (di->d_fieldmask & QC_SPC_TIMER) { |
2493 | dm->dqb_btime = di->d_btimer; | 2488 | dm->dqb_btime = di->d_spc_timer; |
2494 | check_blim = 1; | 2489 | check_blim = 1; |
2495 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | 2490 | set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); |
2496 | } | 2491 | } |
2497 | 2492 | ||
2498 | if (di->d_fieldmask & FS_DQ_ITIMER) { | 2493 | if (di->d_fieldmask & QC_INO_TIMER) { |
2499 | dm->dqb_itime = di->d_itimer; | 2494 | dm->dqb_itime = di->d_ino_timer; |
2500 | check_ilim = 1; | 2495 | check_ilim = 1; |
2501 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | 2496 | set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); |
2502 | } | 2497 | } |
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2506 | dm->dqb_curspace < dm->dqb_bsoftlimit) { | 2501 | dm->dqb_curspace < dm->dqb_bsoftlimit) { |
2507 | dm->dqb_btime = 0; | 2502 | dm->dqb_btime = 0; |
2508 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | 2503 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); |
2509 | } else if (!(di->d_fieldmask & FS_DQ_BTIMER)) | 2504 | } else if (!(di->d_fieldmask & QC_SPC_TIMER)) |
2510 | /* Set grace only if user hasn't provided his own... */ | 2505 | /* Set grace only if user hasn't provided his own... */ |
2511 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; | 2506 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; |
2512 | } | 2507 | } |
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2515 | dm->dqb_curinodes < dm->dqb_isoftlimit) { | 2510 | dm->dqb_curinodes < dm->dqb_isoftlimit) { |
2516 | dm->dqb_itime = 0; | 2511 | dm->dqb_itime = 0; |
2517 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | 2512 | clear_bit(DQ_INODES_B, &dquot->dq_flags); |
2518 | } else if (!(di->d_fieldmask & FS_DQ_ITIMER)) | 2513 | } else if (!(di->d_fieldmask & QC_INO_TIMER)) |
2519 | /* Set grace only if user hasn't provided his own... */ | 2514 | /* Set grace only if user hasn't provided his own... */ |
2520 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; | 2515 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; |
2521 | } | 2516 | } |
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) | |||
2531 | } | 2526 | } |
2532 | 2527 | ||
2533 | int dquot_set_dqblk(struct super_block *sb, struct kqid qid, | 2528 | int dquot_set_dqblk(struct super_block *sb, struct kqid qid, |
2534 | struct fs_disk_quota *di) | 2529 | struct qc_dqblk *di) |
2535 | { | 2530 | { |
2536 | struct dquot *dquot; | 2531 | struct dquot *dquot; |
2537 | int rc; | 2532 | int rc; |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 2aa4151f99d2..6f3856328eea 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr) | |||
118 | return sb->s_qcop->set_info(sb, type, &info); | 118 | return sb->s_qcop->set_info(sb, type, &info); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src) | 121 | static inline qsize_t qbtos(qsize_t blocks) |
122 | { | ||
123 | return blocks << QIF_DQBLKSIZE_BITS; | ||
124 | } | ||
125 | |||
126 | static inline qsize_t stoqb(qsize_t space) | ||
127 | { | ||
128 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | ||
129 | } | ||
130 | |||
131 | static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src) | ||
122 | { | 132 | { |
123 | memset(dst, 0, sizeof(*dst)); | 133 | memset(dst, 0, sizeof(*dst)); |
124 | dst->dqb_bhardlimit = src->d_blk_hardlimit; | 134 | dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit); |
125 | dst->dqb_bsoftlimit = src->d_blk_softlimit; | 135 | dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit); |
126 | dst->dqb_curspace = src->d_bcount; | 136 | dst->dqb_curspace = src->d_space; |
127 | dst->dqb_ihardlimit = src->d_ino_hardlimit; | 137 | dst->dqb_ihardlimit = src->d_ino_hardlimit; |
128 | dst->dqb_isoftlimit = src->d_ino_softlimit; | 138 | dst->dqb_isoftlimit = src->d_ino_softlimit; |
129 | dst->dqb_curinodes = src->d_icount; | 139 | dst->dqb_curinodes = src->d_ino_count; |
130 | dst->dqb_btime = src->d_btimer; | 140 | dst->dqb_btime = src->d_spc_timer; |
131 | dst->dqb_itime = src->d_itimer; | 141 | dst->dqb_itime = src->d_ino_timer; |
132 | dst->dqb_valid = QIF_ALL; | 142 | dst->dqb_valid = QIF_ALL; |
133 | } | 143 | } |
134 | 144 | ||
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, | |||
136 | void __user *addr) | 146 | void __user *addr) |
137 | { | 147 | { |
138 | struct kqid qid; | 148 | struct kqid qid; |
139 | struct fs_disk_quota fdq; | 149 | struct qc_dqblk fdq; |
140 | struct if_dqblk idq; | 150 | struct if_dqblk idq; |
141 | int ret; | 151 | int ret; |
142 | 152 | ||
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id, | |||
154 | return 0; | 164 | return 0; |
155 | } | 165 | } |
156 | 166 | ||
157 | static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src) | 167 | static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src) |
158 | { | 168 | { |
159 | dst->d_blk_hardlimit = src->dqb_bhardlimit; | 169 | dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit); |
160 | dst->d_blk_softlimit = src->dqb_bsoftlimit; | 170 | dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit); |
161 | dst->d_bcount = src->dqb_curspace; | 171 | dst->d_space = src->dqb_curspace; |
162 | dst->d_ino_hardlimit = src->dqb_ihardlimit; | 172 | dst->d_ino_hardlimit = src->dqb_ihardlimit; |
163 | dst->d_ino_softlimit = src->dqb_isoftlimit; | 173 | dst->d_ino_softlimit = src->dqb_isoftlimit; |
164 | dst->d_icount = src->dqb_curinodes; | 174 | dst->d_ino_count = src->dqb_curinodes; |
165 | dst->d_btimer = src->dqb_btime; | 175 | dst->d_spc_timer = src->dqb_btime; |
166 | dst->d_itimer = src->dqb_itime; | 176 | dst->d_ino_timer = src->dqb_itime; |
167 | 177 | ||
168 | dst->d_fieldmask = 0; | 178 | dst->d_fieldmask = 0; |
169 | if (src->dqb_valid & QIF_BLIMITS) | 179 | if (src->dqb_valid & QIF_BLIMITS) |
170 | dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD; | 180 | dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD; |
171 | if (src->dqb_valid & QIF_SPACE) | 181 | if (src->dqb_valid & QIF_SPACE) |
172 | dst->d_fieldmask |= FS_DQ_BCOUNT; | 182 | dst->d_fieldmask |= QC_SPACE; |
173 | if (src->dqb_valid & QIF_ILIMITS) | 183 | if (src->dqb_valid & QIF_ILIMITS) |
174 | dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD; | 184 | dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD; |
175 | if (src->dqb_valid & QIF_INODES) | 185 | if (src->dqb_valid & QIF_INODES) |
176 | dst->d_fieldmask |= FS_DQ_ICOUNT; | 186 | dst->d_fieldmask |= QC_INO_COUNT; |
177 | if (src->dqb_valid & QIF_BTIME) | 187 | if (src->dqb_valid & QIF_BTIME) |
178 | dst->d_fieldmask |= FS_DQ_BTIMER; | 188 | dst->d_fieldmask |= QC_SPC_TIMER; |
179 | if (src->dqb_valid & QIF_ITIME) | 189 | if (src->dqb_valid & QIF_ITIME) |
180 | dst->d_fieldmask |= FS_DQ_ITIMER; | 190 | dst->d_fieldmask |= QC_INO_TIMER; |
181 | } | 191 | } |
182 | 192 | ||
183 | static int quota_setquota(struct super_block *sb, int type, qid_t id, | 193 | static int quota_setquota(struct super_block *sb, int type, qid_t id, |
184 | void __user *addr) | 194 | void __user *addr) |
185 | { | 195 | { |
186 | struct fs_disk_quota fdq; | 196 | struct qc_dqblk fdq; |
187 | struct if_dqblk idq; | 197 | struct if_dqblk idq; |
188 | struct kqid qid; | 198 | struct kqid qid; |
189 | 199 | ||
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr) | |||
247 | return ret; | 257 | return ret; |
248 | } | 258 | } |
249 | 259 | ||
260 | /* | ||
261 | * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them | ||
262 | * out of there as xfsprogs rely on definitions being in that header file. So | ||
263 | * just define same functions here for quota purposes. | ||
264 | */ | ||
265 | #define XFS_BB_SHIFT 9 | ||
266 | |||
267 | static inline u64 quota_bbtob(u64 blocks) | ||
268 | { | ||
269 | return blocks << XFS_BB_SHIFT; | ||
270 | } | ||
271 | |||
272 | static inline u64 quota_btobb(u64 bytes) | ||
273 | { | ||
274 | return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT; | ||
275 | } | ||
276 | |||
277 | static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src) | ||
278 | { | ||
279 | dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit); | ||
280 | dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit); | ||
281 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | ||
282 | dst->d_ino_softlimit = src->d_ino_softlimit; | ||
283 | dst->d_space = quota_bbtob(src->d_bcount); | ||
284 | dst->d_ino_count = src->d_icount; | ||
285 | dst->d_ino_timer = src->d_itimer; | ||
286 | dst->d_spc_timer = src->d_btimer; | ||
287 | dst->d_ino_warns = src->d_iwarns; | ||
288 | dst->d_spc_warns = src->d_bwarns; | ||
289 | dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit); | ||
290 | dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit); | ||
291 | dst->d_rt_space = quota_bbtob(src->d_rtbcount); | ||
292 | dst->d_rt_spc_timer = src->d_rtbtimer; | ||
293 | dst->d_rt_spc_warns = src->d_rtbwarns; | ||
294 | dst->d_fieldmask = 0; | ||
295 | if (src->d_fieldmask & FS_DQ_ISOFT) | ||
296 | dst->d_fieldmask |= QC_INO_SOFT; | ||
297 | if (src->d_fieldmask & FS_DQ_IHARD) | ||
298 | dst->d_fieldmask |= QC_INO_HARD; | ||
299 | if (src->d_fieldmask & FS_DQ_BSOFT) | ||
300 | dst->d_fieldmask |= QC_SPC_SOFT; | ||
301 | if (src->d_fieldmask & FS_DQ_BHARD) | ||
302 | dst->d_fieldmask |= QC_SPC_HARD; | ||
303 | if (src->d_fieldmask & FS_DQ_RTBSOFT) | ||
304 | dst->d_fieldmask |= QC_RT_SPC_SOFT; | ||
305 | if (src->d_fieldmask & FS_DQ_RTBHARD) | ||
306 | dst->d_fieldmask |= QC_RT_SPC_HARD; | ||
307 | if (src->d_fieldmask & FS_DQ_BTIMER) | ||
308 | dst->d_fieldmask |= QC_SPC_TIMER; | ||
309 | if (src->d_fieldmask & FS_DQ_ITIMER) | ||
310 | dst->d_fieldmask |= QC_INO_TIMER; | ||
311 | if (src->d_fieldmask & FS_DQ_RTBTIMER) | ||
312 | dst->d_fieldmask |= QC_RT_SPC_TIMER; | ||
313 | if (src->d_fieldmask & FS_DQ_BWARNS) | ||
314 | dst->d_fieldmask |= QC_SPC_WARNS; | ||
315 | if (src->d_fieldmask & FS_DQ_IWARNS) | ||
316 | dst->d_fieldmask |= QC_INO_WARNS; | ||
317 | if (src->d_fieldmask & FS_DQ_RTBWARNS) | ||
318 | dst->d_fieldmask |= QC_RT_SPC_WARNS; | ||
319 | if (src->d_fieldmask & FS_DQ_BCOUNT) | ||
320 | dst->d_fieldmask |= QC_SPACE; | ||
321 | if (src->d_fieldmask & FS_DQ_ICOUNT) | ||
322 | dst->d_fieldmask |= QC_INO_COUNT; | ||
323 | if (src->d_fieldmask & FS_DQ_RTBCOUNT) | ||
324 | dst->d_fieldmask |= QC_RT_SPACE; | ||
325 | } | ||
326 | |||
250 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, | 327 | static int quota_setxquota(struct super_block *sb, int type, qid_t id, |
251 | void __user *addr) | 328 | void __user *addr) |
252 | { | 329 | { |
253 | struct fs_disk_quota fdq; | 330 | struct fs_disk_quota fdq; |
331 | struct qc_dqblk qdq; | ||
254 | struct kqid qid; | 332 | struct kqid qid; |
255 | 333 | ||
256 | if (copy_from_user(&fdq, addr, sizeof(fdq))) | 334 | if (copy_from_user(&fdq, addr, sizeof(fdq))) |
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id, | |||
260 | qid = make_kqid(current_user_ns(), type, id); | 338 | qid = make_kqid(current_user_ns(), type, id); |
261 | if (!qid_valid(qid)) | 339 | if (!qid_valid(qid)) |
262 | return -EINVAL; | 340 | return -EINVAL; |
263 | return sb->s_qcop->set_dqblk(sb, qid, &fdq); | 341 | copy_from_xfs_dqblk(&qdq, &fdq); |
342 | return sb->s_qcop->set_dqblk(sb, qid, &qdq); | ||
343 | } | ||
344 | |||
345 | static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src, | ||
346 | int type, qid_t id) | ||
347 | { | ||
348 | memset(dst, 0, sizeof(*dst)); | ||
349 | dst->d_version = FS_DQUOT_VERSION; | ||
350 | dst->d_id = id; | ||
351 | if (type == USRQUOTA) | ||
352 | dst->d_flags = FS_USER_QUOTA; | ||
353 | else if (type == PRJQUOTA) | ||
354 | dst->d_flags = FS_PROJ_QUOTA; | ||
355 | else | ||
356 | dst->d_flags = FS_GROUP_QUOTA; | ||
357 | dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit); | ||
358 | dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit); | ||
359 | dst->d_ino_hardlimit = src->d_ino_hardlimit; | ||
360 | dst->d_ino_softlimit = src->d_ino_softlimit; | ||
361 | dst->d_bcount = quota_btobb(src->d_space); | ||
362 | dst->d_icount = src->d_ino_count; | ||
363 | dst->d_itimer = src->d_ino_timer; | ||
364 | dst->d_btimer = src->d_spc_timer; | ||
365 | dst->d_iwarns = src->d_ino_warns; | ||
366 | dst->d_bwarns = src->d_spc_warns; | ||
367 | dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit); | ||
368 | dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit); | ||
369 | dst->d_rtbcount = quota_btobb(src->d_rt_space); | ||
370 | dst->d_rtbtimer = src->d_rt_spc_timer; | ||
371 | dst->d_rtbwarns = src->d_rt_spc_warns; | ||
264 | } | 372 | } |
265 | 373 | ||
266 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, | 374 | static int quota_getxquota(struct super_block *sb, int type, qid_t id, |
267 | void __user *addr) | 375 | void __user *addr) |
268 | { | 376 | { |
269 | struct fs_disk_quota fdq; | 377 | struct fs_disk_quota fdq; |
378 | struct qc_dqblk qdq; | ||
270 | struct kqid qid; | 379 | struct kqid qid; |
271 | int ret; | 380 | int ret; |
272 | 381 | ||
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, | |||
275 | qid = make_kqid(current_user_ns(), type, id); | 384 | qid = make_kqid(current_user_ns(), type, id); |
276 | if (!qid_valid(qid)) | 385 | if (!qid_valid(qid)) |
277 | return -EINVAL; | 386 | return -EINVAL; |
278 | ret = sb->s_qcop->get_dqblk(sb, qid, &fdq); | 387 | ret = sb->s_qcop->get_dqblk(sb, qid, &qdq); |
279 | if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) | 388 | if (ret) |
389 | return ret; | ||
390 | copy_to_xfs_dqblk(&fdq, &qdq, type, id); | ||
391 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | ||
280 | return -EFAULT; | 392 | return -EFAULT; |
281 | return ret; | 393 | return ret; |
282 | } | 394 | } |
diff --git a/fs/udf/file.c b/fs/udf/file.c index bb15771b92ae..08f3555fbeac 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -224,7 +224,7 @@ out: | |||
224 | static int udf_release_file(struct inode *inode, struct file *filp) | 224 | static int udf_release_file(struct inode *inode, struct file *filp) |
225 | { | 225 | { |
226 | if (filp->f_mode & FMODE_WRITE && | 226 | if (filp->f_mode & FMODE_WRITE && |
227 | atomic_read(&inode->i_writecount) > 1) { | 227 | atomic_read(&inode->i_writecount) == 1) { |
228 | /* | 228 | /* |
229 | * Grab i_mutex to avoid races with writes changing i_size | 229 | * Grab i_mutex to avoid races with writes changing i_size |
230 | * while we are running. | 230 | * while we are running. |
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 3a07a937e232..41f6c0b9d51c 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h | |||
@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint); | |||
166 | /* quota ops */ | 166 | /* quota ops */ |
167 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); | 167 | extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); |
168 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, | 168 | extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, |
169 | uint, struct fs_disk_quota *); | 169 | uint, struct qc_dqblk *); |
170 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, | 170 | extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, |
171 | struct fs_disk_quota *); | 171 | struct qc_dqblk *); |
172 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, | 172 | extern int xfs_qm_scall_getqstat(struct xfs_mount *, |
173 | struct fs_quota_stat *); | 173 | struct fs_quota_stat *); |
174 | extern int xfs_qm_scall_getqstatv(struct xfs_mount *, | 174 | extern int xfs_qm_scall_getqstatv(struct xfs_mount *, |
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 74fca68e43b6..cb6168ec92c9 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c | |||
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); | |||
39 | STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, | 39 | STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, |
40 | uint); | 40 | uint); |
41 | STATIC uint xfs_qm_export_flags(uint); | 41 | STATIC uint xfs_qm_export_flags(uint); |
42 | STATIC uint xfs_qm_export_qtype_flags(uint); | ||
43 | 42 | ||
44 | /* | 43 | /* |
45 | * Turn off quota accounting and/or enforcement for all udquots and/or | 44 | * Turn off quota accounting and/or enforcement for all udquots and/or |
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv( | |||
573 | return 0; | 572 | return 0; |
574 | } | 573 | } |
575 | 574 | ||
576 | #define XFS_DQ_MASK \ | 575 | #define XFS_QC_MASK \ |
577 | (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) | 576 | (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) |
578 | 577 | ||
579 | /* | 578 | /* |
580 | * Adjust quota limits, and start/stop timers accordingly. | 579 | * Adjust quota limits, and start/stop timers accordingly. |
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim( | |||
584 | struct xfs_mount *mp, | 583 | struct xfs_mount *mp, |
585 | xfs_dqid_t id, | 584 | xfs_dqid_t id, |
586 | uint type, | 585 | uint type, |
587 | fs_disk_quota_t *newlim) | 586 | struct qc_dqblk *newlim) |
588 | { | 587 | { |
589 | struct xfs_quotainfo *q = mp->m_quotainfo; | 588 | struct xfs_quotainfo *q = mp->m_quotainfo; |
590 | struct xfs_disk_dquot *ddq; | 589 | struct xfs_disk_dquot *ddq; |
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim( | |||
593 | int error; | 592 | int error; |
594 | xfs_qcnt_t hard, soft; | 593 | xfs_qcnt_t hard, soft; |
595 | 594 | ||
596 | if (newlim->d_fieldmask & ~XFS_DQ_MASK) | 595 | if (newlim->d_fieldmask & ~XFS_QC_MASK) |
597 | return -EINVAL; | 596 | return -EINVAL; |
598 | if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) | 597 | if ((newlim->d_fieldmask & XFS_QC_MASK) == 0) |
599 | return 0; | 598 | return 0; |
600 | 599 | ||
601 | /* | 600 | /* |
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim( | |||
633 | /* | 632 | /* |
634 | * Make sure that hardlimits are >= soft limits before changing. | 633 | * Make sure that hardlimits are >= soft limits before changing. |
635 | */ | 634 | */ |
636 | hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? | 635 | hard = (newlim->d_fieldmask & QC_SPC_HARD) ? |
637 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : | 636 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : |
638 | be64_to_cpu(ddq->d_blk_hardlimit); | 637 | be64_to_cpu(ddq->d_blk_hardlimit); |
639 | soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? | 638 | soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? |
640 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : | 639 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : |
641 | be64_to_cpu(ddq->d_blk_softlimit); | 640 | be64_to_cpu(ddq->d_blk_softlimit); |
642 | if (hard == 0 || hard >= soft) { | 641 | if (hard == 0 || hard >= soft) { |
643 | ddq->d_blk_hardlimit = cpu_to_be64(hard); | 642 | ddq->d_blk_hardlimit = cpu_to_be64(hard); |
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim( | |||
650 | } else { | 649 | } else { |
651 | xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); | 650 | xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); |
652 | } | 651 | } |
653 | hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? | 652 | hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? |
654 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : | 653 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : |
655 | be64_to_cpu(ddq->d_rtb_hardlimit); | 654 | be64_to_cpu(ddq->d_rtb_hardlimit); |
656 | soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? | 655 | soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? |
657 | (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : | 656 | (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : |
658 | be64_to_cpu(ddq->d_rtb_softlimit); | 657 | be64_to_cpu(ddq->d_rtb_softlimit); |
659 | if (hard == 0 || hard >= soft) { | 658 | if (hard == 0 || hard >= soft) { |
660 | ddq->d_rtb_hardlimit = cpu_to_be64(hard); | 659 | ddq->d_rtb_hardlimit = cpu_to_be64(hard); |
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim( | |||
667 | xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); | 666 | xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); |
668 | } | 667 | } |
669 | 668 | ||
670 | hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? | 669 | hard = (newlim->d_fieldmask & QC_INO_HARD) ? |
671 | (xfs_qcnt_t) newlim->d_ino_hardlimit : | 670 | (xfs_qcnt_t) newlim->d_ino_hardlimit : |
672 | be64_to_cpu(ddq->d_ino_hardlimit); | 671 | be64_to_cpu(ddq->d_ino_hardlimit); |
673 | soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? | 672 | soft = (newlim->d_fieldmask & QC_INO_SOFT) ? |
674 | (xfs_qcnt_t) newlim->d_ino_softlimit : | 673 | (xfs_qcnt_t) newlim->d_ino_softlimit : |
675 | be64_to_cpu(ddq->d_ino_softlimit); | 674 | be64_to_cpu(ddq->d_ino_softlimit); |
676 | if (hard == 0 || hard >= soft) { | 675 | if (hard == 0 || hard >= soft) { |
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim( | |||
687 | /* | 686 | /* |
688 | * Update warnings counter(s) if requested | 687 | * Update warnings counter(s) if requested |
689 | */ | 688 | */ |
690 | if (newlim->d_fieldmask & FS_DQ_BWARNS) | 689 | if (newlim->d_fieldmask & QC_SPC_WARNS) |
691 | ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); | 690 | ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); |
692 | if (newlim->d_fieldmask & FS_DQ_IWARNS) | 691 | if (newlim->d_fieldmask & QC_INO_WARNS) |
693 | ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); | 692 | ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); |
694 | if (newlim->d_fieldmask & FS_DQ_RTBWARNS) | 693 | if (newlim->d_fieldmask & QC_RT_SPC_WARNS) |
695 | ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); | 694 | ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); |
696 | 695 | ||
697 | if (id == 0) { | 696 | if (id == 0) { |
698 | /* | 697 | /* |
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim( | |||
702 | * soft and hard limit values (already done, above), and | 701 | * soft and hard limit values (already done, above), and |
703 | * for warnings. | 702 | * for warnings. |
704 | */ | 703 | */ |
705 | if (newlim->d_fieldmask & FS_DQ_BTIMER) { | 704 | if (newlim->d_fieldmask & QC_SPC_TIMER) { |
706 | q->qi_btimelimit = newlim->d_btimer; | 705 | q->qi_btimelimit = newlim->d_spc_timer; |
707 | ddq->d_btimer = cpu_to_be32(newlim->d_btimer); | 706 | ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); |
708 | } | 707 | } |
709 | if (newlim->d_fieldmask & FS_DQ_ITIMER) { | 708 | if (newlim->d_fieldmask & QC_INO_TIMER) { |
710 | q->qi_itimelimit = newlim->d_itimer; | 709 | q->qi_itimelimit = newlim->d_ino_timer; |
711 | ddq->d_itimer = cpu_to_be32(newlim->d_itimer); | 710 | ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); |
712 | } | 711 | } |
713 | if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { | 712 | if (newlim->d_fieldmask & QC_RT_SPC_TIMER) { |
714 | q->qi_rtbtimelimit = newlim->d_rtbtimer; | 713 | q->qi_rtbtimelimit = newlim->d_rt_spc_timer; |
715 | ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); | 714 | ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); |
716 | } | 715 | } |
717 | if (newlim->d_fieldmask & FS_DQ_BWARNS) | 716 | if (newlim->d_fieldmask & QC_SPC_WARNS) |
718 | q->qi_bwarnlimit = newlim->d_bwarns; | 717 | q->qi_bwarnlimit = newlim->d_spc_warns; |
719 | if (newlim->d_fieldmask & FS_DQ_IWARNS) | 718 | if (newlim->d_fieldmask & QC_INO_WARNS) |
720 | q->qi_iwarnlimit = newlim->d_iwarns; | 719 | q->qi_iwarnlimit = newlim->d_ino_warns; |
721 | if (newlim->d_fieldmask & FS_DQ_RTBWARNS) | 720 | if (newlim->d_fieldmask & QC_RT_SPC_WARNS) |
722 | q->qi_rtbwarnlimit = newlim->d_rtbwarns; | 721 | q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; |
723 | } else { | 722 | } else { |
724 | /* | 723 | /* |
725 | * If the user is now over quota, start the timelimit. | 724 | * If the user is now over quota, start the timelimit. |
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota( | |||
824 | struct xfs_mount *mp, | 823 | struct xfs_mount *mp, |
825 | xfs_dqid_t id, | 824 | xfs_dqid_t id, |
826 | uint type, | 825 | uint type, |
827 | struct fs_disk_quota *dst) | 826 | struct qc_dqblk *dst) |
828 | { | 827 | { |
829 | struct xfs_dquot *dqp; | 828 | struct xfs_dquot *dqp; |
830 | int error; | 829 | int error; |
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota( | |||
848 | } | 847 | } |
849 | 848 | ||
850 | memset(dst, 0, sizeof(*dst)); | 849 | memset(dst, 0, sizeof(*dst)); |
851 | dst->d_version = FS_DQUOT_VERSION; | 850 | dst->d_spc_hardlimit = |
852 | dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); | 851 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); |
853 | dst->d_id = be32_to_cpu(dqp->q_core.d_id); | 852 | dst->d_spc_softlimit = |
854 | dst->d_blk_hardlimit = | 853 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); |
855 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); | ||
856 | dst->d_blk_softlimit = | ||
857 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); | ||
858 | dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); | 854 | dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); |
859 | dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); | 855 | dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); |
860 | dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); | 856 | dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); |
861 | dst->d_icount = dqp->q_res_icount; | 857 | dst->d_ino_count = dqp->q_res_icount; |
862 | dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); | 858 | dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); |
863 | dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); | 859 | dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); |
864 | dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); | 860 | dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); |
865 | dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); | 861 | dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); |
866 | dst->d_rtb_hardlimit = | 862 | dst->d_rt_spc_hardlimit = |
867 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); | 863 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); |
868 | dst->d_rtb_softlimit = | 864 | dst->d_rt_spc_softlimit = |
869 | XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); | 865 | XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); |
870 | dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); | 866 | dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); |
871 | dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); | 867 | dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); |
872 | dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); | 868 | dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); |
873 | 869 | ||
874 | /* | 870 | /* |
875 | * Internally, we don't reset all the timers when quota enforcement | 871 | * Internally, we don't reset all the timers when quota enforcement |
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota( | |||
882 | dqp->q_core.d_flags == XFS_DQ_GROUP) || | 878 | dqp->q_core.d_flags == XFS_DQ_GROUP) || |
883 | (!XFS_IS_PQUOTA_ENFORCED(mp) && | 879 | (!XFS_IS_PQUOTA_ENFORCED(mp) && |
884 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { | 880 | dqp->q_core.d_flags == XFS_DQ_PROJ)) { |
885 | dst->d_btimer = 0; | 881 | dst->d_spc_timer = 0; |
886 | dst->d_itimer = 0; | 882 | dst->d_ino_timer = 0; |
887 | dst->d_rtbtimer = 0; | 883 | dst->d_rt_spc_timer = 0; |
888 | } | 884 | } |
889 | 885 | ||
890 | #ifdef DEBUG | 886 | #ifdef DEBUG |
891 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || | 887 | if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || |
892 | (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) || | 888 | (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || |
893 | (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) && | 889 | (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && |
894 | dst->d_id != 0) { | 890 | id != 0) { |
895 | if ((dst->d_bcount > dst->d_blk_softlimit) && | 891 | if ((dst->d_space > dst->d_spc_softlimit) && |
896 | (dst->d_blk_softlimit > 0)) { | 892 | (dst->d_spc_softlimit > 0)) { |
897 | ASSERT(dst->d_btimer != 0); | 893 | ASSERT(dst->d_spc_timer != 0); |
898 | } | 894 | } |
899 | if ((dst->d_icount > dst->d_ino_softlimit) && | 895 | if ((dst->d_ino_count > dst->d_ino_softlimit) && |
900 | (dst->d_ino_softlimit > 0)) { | 896 | (dst->d_ino_softlimit > 0)) { |
901 | ASSERT(dst->d_itimer != 0); | 897 | ASSERT(dst->d_ino_timer != 0); |
902 | } | 898 | } |
903 | } | 899 | } |
904 | #endif | 900 | #endif |
@@ -908,26 +904,6 @@ out_put: | |||
908 | } | 904 | } |
909 | 905 | ||
910 | STATIC uint | 906 | STATIC uint |
911 | xfs_qm_export_qtype_flags( | ||
912 | uint flags) | ||
913 | { | ||
914 | /* | ||
915 | * Can't be more than one, or none. | ||
916 | */ | ||
917 | ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != | ||
918 | (FS_PROJ_QUOTA | FS_USER_QUOTA)); | ||
919 | ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != | ||
920 | (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); | ||
921 | ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != | ||
922 | (FS_USER_QUOTA | FS_GROUP_QUOTA)); | ||
923 | ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); | ||
924 | |||
925 | return (flags & XFS_DQ_USER) ? | ||
926 | FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? | ||
927 | FS_PROJ_QUOTA : FS_GROUP_QUOTA; | ||
928 | } | ||
929 | |||
930 | STATIC uint | ||
931 | xfs_qm_export_flags( | 907 | xfs_qm_export_flags( |
932 | uint flags) | 908 | uint flags) |
933 | { | 909 | { |
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index 7542bbeca6a1..801a84c1cdc3 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c | |||
@@ -131,7 +131,7 @@ STATIC int | |||
131 | xfs_fs_get_dqblk( | 131 | xfs_fs_get_dqblk( |
132 | struct super_block *sb, | 132 | struct super_block *sb, |
133 | struct kqid qid, | 133 | struct kqid qid, |
134 | struct fs_disk_quota *fdq) | 134 | struct qc_dqblk *qdq) |
135 | { | 135 | { |
136 | struct xfs_mount *mp = XFS_M(sb); | 136 | struct xfs_mount *mp = XFS_M(sb); |
137 | 137 | ||
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk( | |||
141 | return -ESRCH; | 141 | return -ESRCH; |
142 | 142 | ||
143 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), | 143 | return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), |
144 | xfs_quota_type(qid.type), fdq); | 144 | xfs_quota_type(qid.type), qdq); |
145 | } | 145 | } |
146 | 146 | ||
147 | STATIC int | 147 | STATIC int |
148 | xfs_fs_set_dqblk( | 148 | xfs_fs_set_dqblk( |
149 | struct super_block *sb, | 149 | struct super_block *sb, |
150 | struct kqid qid, | 150 | struct kqid qid, |
151 | struct fs_disk_quota *fdq) | 151 | struct qc_dqblk *qdq) |
152 | { | 152 | { |
153 | struct xfs_mount *mp = XFS_M(sb); | 153 | struct xfs_mount *mp = XFS_M(sb); |
154 | 154 | ||
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk( | |||
160 | return -ESRCH; | 160 | return -ESRCH; |
161 | 161 | ||
162 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), | 162 | return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), |
163 | xfs_quota_type(qid.type), fdq); | 163 | xfs_quota_type(qid.type), qdq); |
164 | } | 164 | } |
165 | 165 | ||
166 | const struct quotactl_ops xfs_quotactl_operations = { | 166 | const struct quotactl_ops xfs_quotactl_operations = { |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e3a1721c8354..7c7695940ddd 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -228,7 +228,9 @@ struct i2c_client { | |||
228 | struct device dev; /* the device structure */ | 228 | struct device dev; /* the device structure */ |
229 | int irq; /* irq issued by device */ | 229 | int irq; /* irq issued by device */ |
230 | struct list_head detected; | 230 | struct list_head detected; |
231 | #if IS_ENABLED(CONFIG_I2C_SLAVE) | ||
231 | i2c_slave_cb_t slave_cb; /* callback for slave mode */ | 232 | i2c_slave_cb_t slave_cb; /* callback for slave mode */ |
233 | #endif | ||
232 | }; | 234 | }; |
233 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) | 235 | #define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
234 | 236 | ||
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) | |||
253 | 255 | ||
254 | /* I2C slave support */ | 256 | /* I2C slave support */ |
255 | 257 | ||
258 | #if IS_ENABLED(CONFIG_I2C_SLAVE) | ||
256 | enum i2c_slave_event { | 259 | enum i2c_slave_event { |
257 | I2C_SLAVE_REQ_READ_START, | 260 | I2C_SLAVE_REQ_READ_START, |
258 | I2C_SLAVE_REQ_READ_END, | 261 | I2C_SLAVE_REQ_READ_END, |
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client, | |||
269 | { | 272 | { |
270 | return client->slave_cb(client, event, val); | 273 | return client->slave_cb(client, event, val); |
271 | } | 274 | } |
275 | #endif | ||
272 | 276 | ||
273 | /** | 277 | /** |
274 | * struct i2c_board_info - template for device creation | 278 | * struct i2c_board_info - template for device creation |
@@ -404,8 +408,10 @@ struct i2c_algorithm { | |||
404 | /* To determine what the adapter supports */ | 408 | /* To determine what the adapter supports */ |
405 | u32 (*functionality) (struct i2c_adapter *); | 409 | u32 (*functionality) (struct i2c_adapter *); |
406 | 410 | ||
411 | #if IS_ENABLED(CONFIG_I2C_SLAVE) | ||
407 | int (*reg_slave)(struct i2c_client *client); | 412 | int (*reg_slave)(struct i2c_client *client); |
408 | int (*unreg_slave)(struct i2c_client *client); | 413 | int (*unreg_slave)(struct i2c_client *client); |
414 | #endif | ||
409 | }; | 415 | }; |
410 | 416 | ||
411 | /** | 417 | /** |
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h new file mode 100644 index 000000000000..1c30014ed176 --- /dev/null +++ b/include/linux/iopoll.h | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef _LINUX_IOPOLL_H | ||
16 | #define _LINUX_IOPOLL_H | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/hrtimer.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/io.h> | ||
24 | |||
25 | /** | ||
26 | * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs | ||
27 | * @op: accessor function (takes @addr as its only argument) | ||
28 | * @addr: Address to poll | ||
29 | * @val: Variable to read the value into | ||
30 | * @cond: Break condition (usually involving @val) | ||
31 | * @sleep_us: Maximum time to sleep between reads in us (0 | ||
32 | * tight-loops). Should be less than ~20ms since usleep_range | ||
33 | * is used (see Documentation/timers/timers-howto.txt). | ||
34 | * @timeout_us: Timeout in us, 0 means never timeout | ||
35 | * | ||
36 | * Returns 0 on success and -ETIMEDOUT upon a timeout. In either | ||
37 | * case, the last read value at @addr is stored in @val. Must not | ||
38 | * be called from atomic context if sleep_us or timeout_us are used. | ||
39 | * | ||
40 | * When available, you'll probably want to use one of the specialized | ||
41 | * macros defined below rather than this macro directly. | ||
42 | */ | ||
43 | #define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ | ||
44 | ({ \ | ||
45 | ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ | ||
46 | might_sleep_if(sleep_us); \ | ||
47 | for (;;) { \ | ||
48 | (val) = op(addr); \ | ||
49 | if (cond) \ | ||
50 | break; \ | ||
51 | if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ | ||
52 | (val) = op(addr); \ | ||
53 | break; \ | ||
54 | } \ | ||
55 | if (sleep_us) \ | ||
56 | usleep_range((sleep_us >> 2) + 1, sleep_us); \ | ||
57 | } \ | ||
58 | (cond) ? 0 : -ETIMEDOUT; \ | ||
59 | }) | ||
60 | |||
61 | /** | ||
62 | * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs | ||
63 | * @op: accessor function (takes @addr as its only argument) | ||
64 | * @addr: Address to poll | ||
65 | * @val: Variable to read the value into | ||
66 | * @cond: Break condition (usually involving @val) | ||
67 | * @delay_us: Time to udelay between reads in us (0 tight-loops). Should | ||
68 | * be less than ~10us since udelay is used (see | ||
69 | * Documentation/timers/timers-howto.txt). | ||
70 | * @timeout_us: Timeout in us, 0 means never timeout | ||
71 | * | ||
72 | * Returns 0 on success and -ETIMEDOUT upon a timeout. In either | ||
73 | * case, the last read value at @addr is stored in @val. | ||
74 | * | ||
75 | * When available, you'll probably want to use one of the specialized | ||
76 | * macros defined below rather than this macro directly. | ||
77 | */ | ||
78 | #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ | ||
79 | ({ \ | ||
80 | ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ | ||
81 | for (;;) { \ | ||
82 | (val) = op(addr); \ | ||
83 | if (cond) \ | ||
84 | break; \ | ||
85 | if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ | ||
86 | (val) = op(addr); \ | ||
87 | break; \ | ||
88 | } \ | ||
89 | if (delay_us) \ | ||
90 | udelay(delay_us); \ | ||
91 | } \ | ||
92 | (cond) ? 0 : -ETIMEDOUT; \ | ||
93 | }) | ||
94 | |||
95 | |||
96 | #define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
97 | readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) | ||
98 | |||
99 | #define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
100 | readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us) | ||
101 | |||
102 | #define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
103 | readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us) | ||
104 | |||
105 | #define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
106 | readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us) | ||
107 | |||
108 | #define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
109 | readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us) | ||
110 | |||
111 | #define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
112 | readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us) | ||
113 | |||
114 | #define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
115 | readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us) | ||
116 | |||
117 | #define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
118 | readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us) | ||
119 | |||
120 | #define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
121 | readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us) | ||
122 | |||
123 | #define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
124 | readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us) | ||
125 | |||
126 | #define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
127 | readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us) | ||
128 | |||
129 | #define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
130 | readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us) | ||
131 | |||
132 | #define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
133 | readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us) | ||
134 | |||
135 | #define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
136 | readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us) | ||
137 | |||
138 | #define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ | ||
139 | readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us) | ||
140 | |||
141 | #define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ | ||
142 | readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us) | ||
143 | |||
144 | #endif /* _LINUX_IOPOLL_H */ | ||
diff --git a/include/linux/iova.h b/include/linux/iova.h index 19e81d5ccb6d..3920a19d8194 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h | |||
@@ -16,9 +16,6 @@ | |||
16 | #include <linux/rbtree.h> | 16 | #include <linux/rbtree.h> |
17 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
18 | 18 | ||
19 | /* IO virtual address start page frame number */ | ||
20 | #define IOVA_START_PFN (1) | ||
21 | |||
22 | /* iova structure */ | 19 | /* iova structure */ |
23 | struct iova { | 20 | struct iova { |
24 | struct rb_node node; | 21 | struct rb_node node; |
@@ -31,6 +28,8 @@ struct iova_domain { | |||
31 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ | 28 | spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ |
32 | struct rb_root rbroot; /* iova domain rbtree root */ | 29 | struct rb_root rbroot; /* iova domain rbtree root */ |
33 | struct rb_node *cached32_node; /* Save last alloced node */ | 30 | struct rb_node *cached32_node; /* Save last alloced node */ |
31 | unsigned long granule; /* pfn granularity for this domain */ | ||
32 | unsigned long start_pfn; /* Lower limit for this domain */ | ||
34 | unsigned long dma_32bit_pfn; | 33 | unsigned long dma_32bit_pfn; |
35 | }; | 34 | }; |
36 | 35 | ||
@@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova) | |||
39 | return iova->pfn_hi - iova->pfn_lo + 1; | 38 | return iova->pfn_hi - iova->pfn_lo + 1; |
40 | } | 39 | } |
41 | 40 | ||
41 | static inline unsigned long iova_shift(struct iova_domain *iovad) | ||
42 | { | ||
43 | return __ffs(iovad->granule); | ||
44 | } | ||
45 | |||
46 | static inline unsigned long iova_mask(struct iova_domain *iovad) | ||
47 | { | ||
48 | return iovad->granule - 1; | ||
49 | } | ||
50 | |||
51 | static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) | ||
52 | { | ||
53 | return iova & iova_mask(iovad); | ||
54 | } | ||
55 | |||
56 | static inline size_t iova_align(struct iova_domain *iovad, size_t size) | ||
57 | { | ||
58 | return ALIGN(size, iovad->granule); | ||
59 | } | ||
60 | |||
61 | static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) | ||
62 | { | ||
63 | return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); | ||
64 | } | ||
65 | |||
66 | static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) | ||
67 | { | ||
68 | return iova >> iova_shift(iovad); | ||
69 | } | ||
70 | |||
71 | int iommu_iova_cache_init(void); | ||
72 | void iommu_iova_cache_destroy(void); | ||
73 | |||
42 | struct iova *alloc_iova_mem(void); | 74 | struct iova *alloc_iova_mem(void); |
43 | void free_iova_mem(struct iova *iova); | 75 | void free_iova_mem(struct iova *iova); |
44 | void free_iova(struct iova_domain *iovad, unsigned long pfn); | 76 | void free_iova(struct iova_domain *iovad, unsigned long pfn); |
@@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
49 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, | 81 | struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, |
50 | unsigned long pfn_hi); | 82 | unsigned long pfn_hi); |
51 | void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); | 83 | void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); |
52 | void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); | 84 | void init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
85 | unsigned long start_pfn, unsigned long pfn_32bit); | ||
53 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); | 86 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); |
54 | void put_iova_domain(struct iova_domain *iovad); | 87 | void put_iova_domain(struct iova_domain *iovad); |
55 | struct iova *split_and_remove_iova(struct iova_domain *iovad, | 88 | struct iova *split_and_remove_iova(struct iova_domain *iovad, |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5449d2f4a1ef..64ce58bee6f5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -176,7 +176,7 @@ extern int _cond_resched(void); | |||
176 | */ | 176 | */ |
177 | # define might_sleep() \ | 177 | # define might_sleep() \ |
178 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) | 178 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) |
179 | # define sched_annotate_sleep() __set_current_state(TASK_RUNNING) | 179 | # define sched_annotate_sleep() (current->task_state_change = 0) |
180 | #else | 180 | #else |
181 | static inline void ___might_sleep(const char *file, int line, | 181 | static inline void ___might_sleep(const char *file, int line, |
182 | int preempt_offset) { } | 182 | int preempt_offset) { } |
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index ce5dda8958fe..b1fd675fa36f 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h | |||
@@ -59,6 +59,7 @@ enum s2mps13_reg { | |||
59 | S2MPS13_REG_B6CTRL, | 59 | S2MPS13_REG_B6CTRL, |
60 | S2MPS13_REG_B6OUT, | 60 | S2MPS13_REG_B6OUT, |
61 | S2MPS13_REG_B7CTRL, | 61 | S2MPS13_REG_B7CTRL, |
62 | S2MPS13_REG_B7SW, | ||
62 | S2MPS13_REG_B7OUT, | 63 | S2MPS13_REG_B7OUT, |
63 | S2MPS13_REG_B8CTRL, | 64 | S2MPS13_REG_B8CTRL, |
64 | S2MPS13_REG_B8OUT, | 65 | S2MPS13_REG_B8OUT, |
@@ -102,6 +103,7 @@ enum s2mps13_reg { | |||
102 | S2MPS13_REG_L26CTRL, | 103 | S2MPS13_REG_L26CTRL, |
103 | S2MPS13_REG_L27CTRL, | 104 | S2MPS13_REG_L27CTRL, |
104 | S2MPS13_REG_L28CTRL, | 105 | S2MPS13_REG_L28CTRL, |
106 | S2MPS13_REG_L29CTRL, | ||
105 | S2MPS13_REG_L30CTRL, | 107 | S2MPS13_REG_L30CTRL, |
106 | S2MPS13_REG_L31CTRL, | 108 | S2MPS13_REG_L31CTRL, |
107 | S2MPS13_REG_L32CTRL, | 109 | S2MPS13_REG_L32CTRL, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 80fc92a49649..dd5ea3016fc4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page) | |||
1070 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | 1070 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ |
1071 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ | 1071 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ |
1072 | #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ | 1072 | #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ |
1073 | #define VM_FAULT_SIGSEGV 0x0040 | ||
1073 | 1074 | ||
1074 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | 1075 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ |
1075 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | 1076 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ |
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page) | |||
1078 | 1079 | ||
1079 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ | 1080 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ |
1080 | 1081 | ||
1081 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ | 1082 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ |
1082 | VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) | 1083 | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ |
1084 | VM_FAULT_FALLBACK) | ||
1083 | 1085 | ||
1084 | /* Encode hstate index for a hwpoisoned large page */ | 1086 | /* Encode hstate index for a hwpoisoned large page */ |
1085 | #define VM_FAULT_SET_HINDEX(x) ((x) << 12) | 1087 | #define VM_FAULT_SET_HINDEX(x) ((x) << 12) |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 853698c721f7..76200984d1e2 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void) | |||
85 | oom_killer_disabled = false; | 85 | oom_killer_disabled = false; |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline bool oom_gfp_allowed(gfp_t gfp_mask) | ||
89 | { | ||
90 | return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY); | ||
91 | } | ||
92 | |||
93 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); | 88 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); |
94 | 89 | ||
95 | static inline bool task_will_free_mem(struct task_struct *task) | 90 | static inline bool task_will_free_mem(struct task_struct *task) |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4f7a61ca4b39..664de5a4ec46 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -450,11 +450,6 @@ struct perf_event { | |||
450 | #endif /* CONFIG_PERF_EVENTS */ | 450 | #endif /* CONFIG_PERF_EVENTS */ |
451 | }; | 451 | }; |
452 | 452 | ||
453 | enum perf_event_context_type { | ||
454 | task_context, | ||
455 | cpu_context, | ||
456 | }; | ||
457 | |||
458 | /** | 453 | /** |
459 | * struct perf_event_context - event context structure | 454 | * struct perf_event_context - event context structure |
460 | * | 455 | * |
@@ -462,7 +457,6 @@ enum perf_event_context_type { | |||
462 | */ | 457 | */ |
463 | struct perf_event_context { | 458 | struct perf_event_context { |
464 | struct pmu *pmu; | 459 | struct pmu *pmu; |
465 | enum perf_event_context_type type; | ||
466 | /* | 460 | /* |
467 | * Protect the states of the events in the list, | 461 | * Protect the states of the events in the list, |
468 | * nr_active, and the list: | 462 | * nr_active, and the list: |
diff --git a/include/linux/platform_data/ipmmu-vmsa.h b/include/linux/platform_data/ipmmu-vmsa.h deleted file mode 100644 index 5275b3ac6d37..000000000000 --- a/include/linux/platform_data/ipmmu-vmsa.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * IPMMU VMSA Platform Data | ||
3 | * | ||
4 | * Copyright (C) 2014 Renesas Electronics Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | */ | ||
10 | |||
11 | #ifndef __IPMMU_VMSA_H__ | ||
12 | #define __IPMMU_VMSA_H__ | ||
13 | |||
14 | struct ipmmu_vmsa_master { | ||
15 | const char *name; | ||
16 | unsigned int utlb; | ||
17 | }; | ||
18 | |||
19 | struct ipmmu_vmsa_platform_data { | ||
20 | const struct ipmmu_vmsa_master *masters; | ||
21 | unsigned int num_masters; | ||
22 | }; | ||
23 | |||
24 | #endif /* __IPMMU_VMSA_H__ */ | ||
diff --git a/include/linux/printk.h b/include/linux/printk.h index c8f170324e64..4d5bf5726578 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -10,9 +10,6 @@ | |||
10 | extern const char linux_banner[]; | 10 | extern const char linux_banner[]; |
11 | extern const char linux_proc_banner[]; | 11 | extern const char linux_proc_banner[]; |
12 | 12 | ||
13 | extern char *log_buf_addr_get(void); | ||
14 | extern u32 log_buf_len_get(void); | ||
15 | |||
16 | static inline int printk_get_level(const char *buffer) | 13 | static inline int printk_get_level(const char *buffer) |
17 | { | 14 | { |
18 | if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { | 15 | if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { |
@@ -163,6 +160,8 @@ extern int kptr_restrict; | |||
163 | 160 | ||
164 | extern void wake_up_klogd(void); | 161 | extern void wake_up_klogd(void); |
165 | 162 | ||
163 | char *log_buf_addr_get(void); | ||
164 | u32 log_buf_len_get(void); | ||
166 | void log_buf_kexec_setup(void); | 165 | void log_buf_kexec_setup(void); |
167 | void __init setup_log_buf(int early); | 166 | void __init setup_log_buf(int early); |
168 | void dump_stack_set_arch_desc(const char *fmt, ...); | 167 | void dump_stack_set_arch_desc(const char *fmt, ...); |
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void) | |||
198 | { | 197 | { |
199 | } | 198 | } |
200 | 199 | ||
200 | static inline char *log_buf_addr_get(void) | ||
201 | { | ||
202 | return NULL; | ||
203 | } | ||
204 | |||
205 | static inline u32 log_buf_len_get(void) | ||
206 | { | ||
207 | return 0; | ||
208 | } | ||
209 | |||
201 | static inline void log_buf_kexec_setup(void) | 210 | static inline void log_buf_kexec_setup(void) |
202 | { | 211 | { |
203 | } | 212 | } |
diff --git a/include/linux/quota.h b/include/linux/quota.h index 50978b781a19..097d7eb2441e 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -321,6 +321,49 @@ struct dquot_operations { | |||
321 | 321 | ||
322 | struct path; | 322 | struct path; |
323 | 323 | ||
324 | /* Structure for communicating via ->get_dqblk() & ->set_dqblk() */ | ||
325 | struct qc_dqblk { | ||
326 | int d_fieldmask; /* mask of fields to change in ->set_dqblk() */ | ||
327 | u64 d_spc_hardlimit; /* absolute limit on used space */ | ||
328 | u64 d_spc_softlimit; /* preferred limit on used space */ | ||
329 | u64 d_ino_hardlimit; /* maximum # allocated inodes */ | ||
330 | u64 d_ino_softlimit; /* preferred inode limit */ | ||
331 | u64 d_space; /* Space owned by the user */ | ||
332 | u64 d_ino_count; /* # inodes owned by the user */ | ||
333 | s64 d_ino_timer; /* zero if within inode limits */ | ||
334 | /* if not, we refuse service */ | ||
335 | s64 d_spc_timer; /* similar to above; for space */ | ||
336 | int d_ino_warns; /* # warnings issued wrt num inodes */ | ||
337 | int d_spc_warns; /* # warnings issued wrt used space */ | ||
338 | u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */ | ||
339 | u64 d_rt_spc_softlimit; /* preferred limit on RT space */ | ||
340 | u64 d_rt_space; /* realtime space owned */ | ||
341 | s64 d_rt_spc_timer; /* similar to above; for RT space */ | ||
342 | int d_rt_spc_warns; /* # warnings issued wrt RT space */ | ||
343 | }; | ||
344 | |||
345 | /* Field specifiers for ->set_dqblk() in struct qc_dqblk */ | ||
346 | #define QC_INO_SOFT (1<<0) | ||
347 | #define QC_INO_HARD (1<<1) | ||
348 | #define QC_SPC_SOFT (1<<2) | ||
349 | #define QC_SPC_HARD (1<<3) | ||
350 | #define QC_RT_SPC_SOFT (1<<4) | ||
351 | #define QC_RT_SPC_HARD (1<<5) | ||
352 | #define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \ | ||
353 | QC_RT_SPC_SOFT | QC_RT_SPC_HARD) | ||
354 | #define QC_SPC_TIMER (1<<6) | ||
355 | #define QC_INO_TIMER (1<<7) | ||
356 | #define QC_RT_SPC_TIMER (1<<8) | ||
357 | #define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER) | ||
358 | #define QC_SPC_WARNS (1<<9) | ||
359 | #define QC_INO_WARNS (1<<10) | ||
360 | #define QC_RT_SPC_WARNS (1<<11) | ||
361 | #define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS) | ||
362 | #define QC_SPACE (1<<12) | ||
363 | #define QC_INO_COUNT (1<<13) | ||
364 | #define QC_RT_SPACE (1<<14) | ||
365 | #define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE) | ||
366 | |||
324 | /* Operations handling requests from userspace */ | 367 | /* Operations handling requests from userspace */ |
325 | struct quotactl_ops { | 368 | struct quotactl_ops { |
326 | int (*quota_on)(struct super_block *, int, int, struct path *); | 369 | int (*quota_on)(struct super_block *, int, int, struct path *); |
@@ -329,8 +372,8 @@ struct quotactl_ops { | |||
329 | int (*quota_sync)(struct super_block *, int); | 372 | int (*quota_sync)(struct super_block *, int); |
330 | int (*get_info)(struct super_block *, int, struct if_dqinfo *); | 373 | int (*get_info)(struct super_block *, int, struct if_dqinfo *); |
331 | int (*set_info)(struct super_block *, int, struct if_dqinfo *); | 374 | int (*set_info)(struct super_block *, int, struct if_dqinfo *); |
332 | int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); | 375 | int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); |
333 | int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *); | 376 | int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); |
334 | int (*get_xstate)(struct super_block *, struct fs_quota_stat *); | 377 | int (*get_xstate)(struct super_block *, struct fs_quota_stat *); |
335 | int (*set_xstate)(struct super_block *, unsigned int, int); | 378 | int (*set_xstate)(struct super_block *, unsigned int, int); |
336 | int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); | 379 | int (*get_xstatev)(struct super_block *, struct fs_quota_statv *); |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f23538a6e411..29e3455f7d41 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type); | |||
98 | int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); | 98 | int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); |
99 | int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); | 99 | int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); |
100 | int dquot_get_dqblk(struct super_block *sb, struct kqid id, | 100 | int dquot_get_dqblk(struct super_block *sb, struct kqid id, |
101 | struct fs_disk_quota *di); | 101 | struct qc_dqblk *di); |
102 | int dquot_set_dqblk(struct super_block *sb, struct kqid id, | 102 | int dquot_set_dqblk(struct super_block *sb, struct kqid id, |
103 | struct fs_disk_quota *di); | 103 | struct qc_dqblk *di); |
104 | 104 | ||
105 | int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); | 105 | int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); |
106 | int dquot_transfer(struct inode *inode, struct iattr *iattr); | 106 | int dquot_transfer(struct inode *inode, struct iattr *iattr); |
diff --git a/include/net/ip.h b/include/net/ip.h index 0bb620702929..f7cbd703d15d 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -39,11 +39,12 @@ struct inet_skb_parm { | |||
39 | struct ip_options opt; /* Compiled IP options */ | 39 | struct ip_options opt; /* Compiled IP options */ |
40 | unsigned char flags; | 40 | unsigned char flags; |
41 | 41 | ||
42 | #define IPSKB_FORWARDED 1 | 42 | #define IPSKB_FORWARDED BIT(0) |
43 | #define IPSKB_XFRM_TUNNEL_SIZE 2 | 43 | #define IPSKB_XFRM_TUNNEL_SIZE BIT(1) |
44 | #define IPSKB_XFRM_TRANSFORMED 4 | 44 | #define IPSKB_XFRM_TRANSFORMED BIT(2) |
45 | #define IPSKB_FRAG_COMPLETE 8 | 45 | #define IPSKB_FRAG_COMPLETE BIT(3) |
46 | #define IPSKB_REROUTED 16 | 46 | #define IPSKB_REROUTED BIT(4) |
47 | #define IPSKB_DOREDIRECT BIT(5) | ||
47 | 48 | ||
48 | u16 frag_max_size; | 49 | u16 frag_max_size; |
49 | }; | 50 | }; |
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h index a8f5c32d174b..2c7befb10f13 100644 --- a/include/trace/events/iommu.h +++ b/include/trace/events/iommu.h | |||
@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain, | |||
83 | TP_ARGS(dev) | 83 | TP_ARGS(dev) |
84 | ); | 84 | ); |
85 | 85 | ||
86 | DECLARE_EVENT_CLASS(iommu_map_unmap, | 86 | TRACE_EVENT(map, |
87 | 87 | ||
88 | TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), | 88 | TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), |
89 | 89 | ||
@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, | |||
92 | TP_STRUCT__entry( | 92 | TP_STRUCT__entry( |
93 | __field(u64, iova) | 93 | __field(u64, iova) |
94 | __field(u64, paddr) | 94 | __field(u64, paddr) |
95 | __field(int, size) | 95 | __field(size_t, size) |
96 | ), | 96 | ), |
97 | 97 | ||
98 | TP_fast_assign( | 98 | TP_fast_assign( |
@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, | |||
101 | __entry->size = size; | 101 | __entry->size = size; |
102 | ), | 102 | ), |
103 | 103 | ||
104 | TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", | 104 | TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", |
105 | __entry->iova, __entry->paddr, __entry->size | 105 | __entry->iova, __entry->paddr, __entry->size |
106 | ) | 106 | ) |
107 | ); | 107 | ); |
108 | 108 | ||
109 | DEFINE_EVENT(iommu_map_unmap, map, | 109 | TRACE_EVENT(unmap, |
110 | 110 | ||
111 | TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), | 111 | TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), |
112 | |||
113 | TP_ARGS(iova, paddr, size) | ||
114 | ); | ||
115 | 112 | ||
116 | DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, | 113 | TP_ARGS(iova, size, unmapped_size), |
117 | 114 | ||
118 | TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), | 115 | TP_STRUCT__entry( |
116 | __field(u64, iova) | ||
117 | __field(size_t, size) | ||
118 | __field(size_t, unmapped_size) | ||
119 | ), | ||
119 | 120 | ||
120 | TP_ARGS(iova, paddr, size), | 121 | TP_fast_assign( |
122 | __entry->iova = iova; | ||
123 | __entry->size = size; | ||
124 | __entry->unmapped_size = unmapped_size; | ||
125 | ), | ||
121 | 126 | ||
122 | TP_printk("IOMMU: iova=0x%016llx size=0x%x", | 127 | TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", |
123 | __entry->iova, __entry->size | 128 | __entry->iova, __entry->size, __entry->unmapped_size |
124 | ) | 129 | ) |
125 | ); | 130 | ); |
126 | 131 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 088ac0b1b106..536edc2be307 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
150 | int ufd = attr->map_fd; | 150 | int ufd = attr->map_fd; |
151 | struct fd f = fdget(ufd); | 151 | struct fd f = fdget(ufd); |
152 | struct bpf_map *map; | 152 | struct bpf_map *map; |
153 | void *key, *value; | 153 | void *key, *value, *ptr; |
154 | int err; | 154 | int err; |
155 | 155 | ||
156 | if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) | 156 | if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) |
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
169 | if (copy_from_user(key, ukey, map->key_size) != 0) | 169 | if (copy_from_user(key, ukey, map->key_size) != 0) |
170 | goto free_key; | 170 | goto free_key; |
171 | 171 | ||
172 | err = -ENOENT; | 172 | err = -ENOMEM; |
173 | rcu_read_lock(); | 173 | value = kmalloc(map->value_size, GFP_USER); |
174 | value = map->ops->map_lookup_elem(map, key); | ||
175 | if (!value) | 174 | if (!value) |
176 | goto err_unlock; | 175 | goto free_key; |
176 | |||
177 | rcu_read_lock(); | ||
178 | ptr = map->ops->map_lookup_elem(map, key); | ||
179 | if (ptr) | ||
180 | memcpy(value, ptr, map->value_size); | ||
181 | rcu_read_unlock(); | ||
182 | |||
183 | err = -ENOENT; | ||
184 | if (!ptr) | ||
185 | goto free_value; | ||
177 | 186 | ||
178 | err = -EFAULT; | 187 | err = -EFAULT; |
179 | if (copy_to_user(uvalue, value, map->value_size) != 0) | 188 | if (copy_to_user(uvalue, value, map->value_size) != 0) |
180 | goto err_unlock; | 189 | goto free_value; |
181 | 190 | ||
182 | err = 0; | 191 | err = 0; |
183 | 192 | ||
184 | err_unlock: | 193 | free_value: |
185 | rcu_read_unlock(); | 194 | kfree(value); |
186 | free_key: | 195 | free_key: |
187 | kfree(key); | 196 | kfree(key); |
188 | err_put: | 197 | err_put: |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index bb263d0caab3..04cfe8ace520 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb) | |||
1909 | * | 1909 | * |
1910 | * And don't kill the default root. | 1910 | * And don't kill the default root. |
1911 | */ | 1911 | */ |
1912 | if (css_has_online_children(&root->cgrp.self) || | 1912 | if (!list_empty(&root->cgrp.self.children) || |
1913 | root == &cgrp_dfl_root) | 1913 | root == &cgrp_dfl_root) |
1914 | cgroup_put(&root->cgrp); | 1914 | cgroup_put(&root->cgrp); |
1915 | else | 1915 | else |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 882f835a0d85..19efcf13375a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -6776,7 +6776,6 @@ skip_type: | |||
6776 | __perf_event_init_context(&cpuctx->ctx); | 6776 | __perf_event_init_context(&cpuctx->ctx); |
6777 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); | 6777 | lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); |
6778 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); | 6778 | lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); |
6779 | cpuctx->ctx.type = cpu_context; | ||
6780 | cpuctx->ctx.pmu = pmu; | 6779 | cpuctx->ctx.pmu = pmu; |
6781 | 6780 | ||
6782 | __perf_cpu_hrtimer_init(cpuctx, cpu); | 6781 | __perf_cpu_hrtimer_init(cpuctx, cpu); |
@@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open, | |||
7420 | * task or CPU context: | 7419 | * task or CPU context: |
7421 | */ | 7420 | */ |
7422 | if (move_group) { | 7421 | if (move_group) { |
7423 | if (group_leader->ctx->type != ctx->type) | 7422 | /* |
7423 | * Make sure we're both on the same task, or both | ||
7424 | * per-cpu events. | ||
7425 | */ | ||
7426 | if (group_leader->ctx->task != ctx->task) | ||
7427 | goto err_context; | ||
7428 | |||
7429 | /* | ||
7430 | * Make sure we're both events for the same CPU; | ||
7431 | * grouping events for different CPUs is broken; since | ||
7432 | * you can never concurrently schedule them anyhow. | ||
7433 | */ | ||
7434 | if (group_leader->cpu != event->cpu) | ||
7424 | goto err_context; | 7435 | goto err_context; |
7425 | } else { | 7436 | } else { |
7426 | if (group_leader->ctx != ctx) | 7437 | if (group_leader->ctx != ctx) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c0accc00566e..e628cb11b560 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7292,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset) | |||
7292 | * since we will exit with TASK_RUNNING make sure we enter with it, | 7292 | * since we will exit with TASK_RUNNING make sure we enter with it, |
7293 | * otherwise we will destroy state. | 7293 | * otherwise we will destroy state. |
7294 | */ | 7294 | */ |
7295 | if (WARN_ONCE(current->state != TASK_RUNNING, | 7295 | WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, |
7296 | "do not call blocking ops when !TASK_RUNNING; " | 7296 | "do not call blocking ops when !TASK_RUNNING; " |
7297 | "state=%lx set at [<%p>] %pS\n", | 7297 | "state=%lx set at [<%p>] %pS\n", |
7298 | current->state, | 7298 | current->state, |
7299 | (void *)current->task_state_change, | 7299 | (void *)current->task_state_change, |
7300 | (void *)current->task_state_change)) | 7300 | (void *)current->task_state_change); |
7301 | __set_current_state(TASK_RUNNING); | ||
7302 | 7301 | ||
7303 | ___might_sleep(file, line, preempt_offset); | 7302 | ___might_sleep(file, line, preempt_offset); |
7304 | } | 7303 | } |
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | |||
296 | return -ENOMEM; | 296 | return -ENOMEM; |
297 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | 297 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) |
298 | return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; | 298 | return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; |
299 | if (ret & VM_FAULT_SIGBUS) | 299 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
300 | return -EFAULT; | 300 | return -EFAULT; |
301 | BUG(); | 301 | BUG(); |
302 | } | 302 | } |
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | |||
571 | return -ENOMEM; | 571 | return -ENOMEM; |
572 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | 572 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) |
573 | return -EHWPOISON; | 573 | return -EHWPOISON; |
574 | if (ret & VM_FAULT_SIGBUS) | 574 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) |
575 | return -EFAULT; | 575 | return -EFAULT; |
576 | BUG(); | 576 | BUG(); |
577 | } | 577 | } |
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
376 | else | 376 | else |
377 | ret = VM_FAULT_WRITE; | 377 | ret = VM_FAULT_WRITE; |
378 | put_page(page); | 378 | put_page(page); |
379 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); | 379 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
380 | /* | 380 | /* |
381 | * We must loop because handle_mm_fault() may back out if there's | 381 | * We must loop because handle_mm_fault() may back out if there's |
382 | * any difficulty e.g. if pte accessed bit gets updated concurrently. | 382 | * any difficulty e.g. if pte accessed bit gets updated concurrently. |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 851924fa5170..683b4782019b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
1477 | 1477 | ||
1478 | pr_info("Task in "); | 1478 | pr_info("Task in "); |
1479 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); | 1479 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); |
1480 | pr_info(" killed as a result of limit of "); | 1480 | pr_cont(" killed as a result of limit of "); |
1481 | pr_cont_cgroup_path(memcg->css.cgroup); | 1481 | pr_cont_cgroup_path(memcg->css.cgroup); |
1482 | pr_info("\n"); | 1482 | pr_cont("\n"); |
1483 | 1483 | ||
1484 | rcu_read_unlock(); | 1484 | rcu_read_unlock(); |
1485 | 1485 | ||
diff --git a/mm/memory.c b/mm/memory.c index 54f3a9b00956..2c3536cc6c63 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2632 | 2632 | ||
2633 | /* Check if we need to add a guard page to the stack */ | 2633 | /* Check if we need to add a guard page to the stack */ |
2634 | if (check_stack_guard_page(vma, address) < 0) | 2634 | if (check_stack_guard_page(vma, address) < 0) |
2635 | return VM_FAULT_SIGBUS; | 2635 | return VM_FAULT_SIGSEGV; |
2636 | 2636 | ||
2637 | /* Use the zero-page for reads */ | 2637 | /* Use the zero-page for reads */ |
2638 | if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { | 2638 | if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7633c503a116..8e20f9c2fa5a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2332,12 +2332,21 @@ static inline struct page * | |||
2332 | __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | 2332 | __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, |
2333 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2333 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2334 | nodemask_t *nodemask, struct zone *preferred_zone, | 2334 | nodemask_t *nodemask, struct zone *preferred_zone, |
2335 | int classzone_idx, int migratetype) | 2335 | int classzone_idx, int migratetype, unsigned long *did_some_progress) |
2336 | { | 2336 | { |
2337 | struct page *page; | 2337 | struct page *page; |
2338 | 2338 | ||
2339 | /* Acquire the per-zone oom lock for each zone */ | 2339 | *did_some_progress = 0; |
2340 | |||
2341 | if (oom_killer_disabled) | ||
2342 | return NULL; | ||
2343 | |||
2344 | /* | ||
2345 | * Acquire the per-zone oom lock for each zone. If that | ||
2346 | * fails, somebody else is making progress for us. | ||
2347 | */ | ||
2340 | if (!oom_zonelist_trylock(zonelist, gfp_mask)) { | 2348 | if (!oom_zonelist_trylock(zonelist, gfp_mask)) { |
2349 | *did_some_progress = 1; | ||
2341 | schedule_timeout_uninterruptible(1); | 2350 | schedule_timeout_uninterruptible(1); |
2342 | return NULL; | 2351 | return NULL; |
2343 | } | 2352 | } |
@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2363 | goto out; | 2372 | goto out; |
2364 | 2373 | ||
2365 | if (!(gfp_mask & __GFP_NOFAIL)) { | 2374 | if (!(gfp_mask & __GFP_NOFAIL)) { |
2375 | /* Coredumps can quickly deplete all memory reserves */ | ||
2376 | if (current->flags & PF_DUMPCORE) | ||
2377 | goto out; | ||
2366 | /* The OOM killer will not help higher order allocs */ | 2378 | /* The OOM killer will not help higher order allocs */ |
2367 | if (order > PAGE_ALLOC_COSTLY_ORDER) | 2379 | if (order > PAGE_ALLOC_COSTLY_ORDER) |
2368 | goto out; | 2380 | goto out; |
2369 | /* The OOM killer does not needlessly kill tasks for lowmem */ | 2381 | /* The OOM killer does not needlessly kill tasks for lowmem */ |
2370 | if (high_zoneidx < ZONE_NORMAL) | 2382 | if (high_zoneidx < ZONE_NORMAL) |
2371 | goto out; | 2383 | goto out; |
2384 | /* The OOM killer does not compensate for light reclaim */ | ||
2385 | if (!(gfp_mask & __GFP_FS)) | ||
2386 | goto out; | ||
2372 | /* | 2387 | /* |
2373 | * GFP_THISNODE contains __GFP_NORETRY and we never hit this. | 2388 | * GFP_THISNODE contains __GFP_NORETRY and we never hit this. |
2374 | * Sanity check for bare calls of __GFP_THISNODE, not real OOM. | 2389 | * Sanity check for bare calls of __GFP_THISNODE, not real OOM. |
@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, | |||
2381 | } | 2396 | } |
2382 | /* Exhausted what can be done so it's blamo time */ | 2397 | /* Exhausted what can be done so it's blamo time */ |
2383 | out_of_memory(zonelist, gfp_mask, order, nodemask, false); | 2398 | out_of_memory(zonelist, gfp_mask, order, nodemask, false); |
2384 | 2399 | *did_some_progress = 1; | |
2385 | out: | 2400 | out: |
2386 | oom_zonelist_unlock(zonelist, gfp_mask); | 2401 | oom_zonelist_unlock(zonelist, gfp_mask); |
2387 | return page; | 2402 | return page; |
@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2658 | (gfp_mask & GFP_THISNODE) == GFP_THISNODE) | 2673 | (gfp_mask & GFP_THISNODE) == GFP_THISNODE) |
2659 | goto nopage; | 2674 | goto nopage; |
2660 | 2675 | ||
2661 | restart: | 2676 | retry: |
2662 | if (!(gfp_mask & __GFP_NO_KSWAPD)) | 2677 | if (!(gfp_mask & __GFP_NO_KSWAPD)) |
2663 | wake_all_kswapds(order, zonelist, high_zoneidx, | 2678 | wake_all_kswapds(order, zonelist, high_zoneidx, |
2664 | preferred_zone, nodemask); | 2679 | preferred_zone, nodemask); |
@@ -2681,7 +2696,6 @@ restart: | |||
2681 | classzone_idx = zonelist_zone_idx(preferred_zoneref); | 2696 | classzone_idx = zonelist_zone_idx(preferred_zoneref); |
2682 | } | 2697 | } |
2683 | 2698 | ||
2684 | rebalance: | ||
2685 | /* This is the last chance, in general, before the goto nopage. */ | 2699 | /* This is the last chance, in general, before the goto nopage. */ |
2686 | page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, | 2700 | page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, |
2687 | high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, | 2701 | high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, |
@@ -2788,54 +2802,28 @@ rebalance: | |||
2788 | if (page) | 2802 | if (page) |
2789 | goto got_pg; | 2803 | goto got_pg; |
2790 | 2804 | ||
2791 | /* | ||
2792 | * If we failed to make any progress reclaiming, then we are | ||
2793 | * running out of options and have to consider going OOM | ||
2794 | */ | ||
2795 | if (!did_some_progress) { | ||
2796 | if (oom_gfp_allowed(gfp_mask)) { | ||
2797 | if (oom_killer_disabled) | ||
2798 | goto nopage; | ||
2799 | /* Coredumps can quickly deplete all memory reserves */ | ||
2800 | if ((current->flags & PF_DUMPCORE) && | ||
2801 | !(gfp_mask & __GFP_NOFAIL)) | ||
2802 | goto nopage; | ||
2803 | page = __alloc_pages_may_oom(gfp_mask, order, | ||
2804 | zonelist, high_zoneidx, | ||
2805 | nodemask, preferred_zone, | ||
2806 | classzone_idx, migratetype); | ||
2807 | if (page) | ||
2808 | goto got_pg; | ||
2809 | |||
2810 | if (!(gfp_mask & __GFP_NOFAIL)) { | ||
2811 | /* | ||
2812 | * The oom killer is not called for high-order | ||
2813 | * allocations that may fail, so if no progress | ||
2814 | * is being made, there are no other options and | ||
2815 | * retrying is unlikely to help. | ||
2816 | */ | ||
2817 | if (order > PAGE_ALLOC_COSTLY_ORDER) | ||
2818 | goto nopage; | ||
2819 | /* | ||
2820 | * The oom killer is not called for lowmem | ||
2821 | * allocations to prevent needlessly killing | ||
2822 | * innocent tasks. | ||
2823 | */ | ||
2824 | if (high_zoneidx < ZONE_NORMAL) | ||
2825 | goto nopage; | ||
2826 | } | ||
2827 | |||
2828 | goto restart; | ||
2829 | } | ||
2830 | } | ||
2831 | |||
2832 | /* Check if we should retry the allocation */ | 2805 | /* Check if we should retry the allocation */ |
2833 | pages_reclaimed += did_some_progress; | 2806 | pages_reclaimed += did_some_progress; |
2834 | if (should_alloc_retry(gfp_mask, order, did_some_progress, | 2807 | if (should_alloc_retry(gfp_mask, order, did_some_progress, |
2835 | pages_reclaimed)) { | 2808 | pages_reclaimed)) { |
2809 | /* | ||
2810 | * If we fail to make progress by freeing individual | ||
2811 | * pages, but the allocation wants us to keep going, | ||
2812 | * start OOM killing tasks. | ||
2813 | */ | ||
2814 | if (!did_some_progress) { | ||
2815 | page = __alloc_pages_may_oom(gfp_mask, order, zonelist, | ||
2816 | high_zoneidx, nodemask, | ||
2817 | preferred_zone, classzone_idx, | ||
2818 | migratetype,&did_some_progress); | ||
2819 | if (page) | ||
2820 | goto got_pg; | ||
2821 | if (!did_some_progress) | ||
2822 | goto nopage; | ||
2823 | } | ||
2836 | /* Wait for some write requests to complete then retry */ | 2824 | /* Wait for some write requests to complete then retry */ |
2837 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); | 2825 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
2838 | goto rebalance; | 2826 | goto retry; |
2839 | } else { | 2827 | } else { |
2840 | /* | 2828 | /* |
2841 | * High-order allocations do not necessarily loop after | 2829 | * High-order allocations do not necessarily loop after |
diff --git a/mm/vmscan.c b/mm/vmscan.c index ab2505c3ef54..dcd90c891d8e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, | |||
2656 | * should make reasonable progress. | 2656 | * should make reasonable progress. |
2657 | */ | 2657 | */ |
2658 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 2658 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
2659 | gfp_mask, nodemask) { | 2659 | gfp_zone(gfp_mask), nodemask) { |
2660 | if (zone_idx(zone) > ZONE_NORMAL) | 2660 | if (zone_idx(zone) > ZONE_NORMAL) |
2661 | continue; | 2661 | continue; |
2662 | 2662 | ||
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 515569ffde8a..589aafd01fc5 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) | |||
46 | snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", | 46 | snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", |
47 | ds->index, ds->pd->sw_addr); | 47 | ds->index, ds->pd->sw_addr); |
48 | ds->slave_mii_bus->parent = ds->master_dev; | 48 | ds->slave_mii_bus->parent = ds->master_dev; |
49 | ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; | ||
49 | } | 50 | } |
50 | 51 | ||
51 | 52 | ||
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 3a83ce5efa80..787b3c294ce6 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb) | |||
129 | * We now generate an ICMP HOST REDIRECT giving the route | 129 | * We now generate an ICMP HOST REDIRECT giving the route |
130 | * we calculated. | 130 | * we calculated. |
131 | */ | 131 | */ |
132 | if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) | 132 | if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr && |
133 | !skb_sec_path(skb)) | ||
133 | ip_rt_send_redirect(skb); | 134 | ip_rt_send_redirect(skb); |
134 | 135 | ||
135 | skb->priority = rt_tos2priority(iph->tos); | 136 | skb->priority = rt_tos2priority(iph->tos); |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index c0d82f78d364..2a3720fb5a5f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb) | |||
966 | 966 | ||
967 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); | 967 | sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); |
968 | if (sk != NULL) { | 968 | if (sk != NULL) { |
969 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | ||
970 | |||
969 | pr_debug("rcv on socket %p\n", sk); | 971 | pr_debug("rcv on socket %p\n", sk); |
970 | ping_queue_rcv_skb(sk, skb_get(skb)); | 972 | if (skb2) |
973 | ping_queue_rcv_skb(sk, skb2); | ||
971 | sock_put(sk); | 974 | sock_put(sk); |
972 | return true; | 975 | return true; |
973 | } | 976 | } |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6a2155b02602..d58dd0ec3e53 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1554 | 1554 | ||
1555 | do_cache = res->fi && !itag; | 1555 | do_cache = res->fi && !itag; |
1556 | if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && | 1556 | if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) && |
1557 | skb->protocol == htons(ETH_P_IP) && | ||
1557 | (IN_DEV_SHARED_MEDIA(out_dev) || | 1558 | (IN_DEV_SHARED_MEDIA(out_dev) || |
1558 | inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) { | 1559 | inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) |
1559 | flags |= RTCF_DOREDIRECT; | 1560 | IPCB(skb)->flags |= IPSKB_DOREDIRECT; |
1560 | do_cache = false; | ||
1561 | } | ||
1562 | 1561 | ||
1563 | if (skb->protocol != htons(ETH_P_IP)) { | 1562 | if (skb->protocol != htons(ETH_P_IP)) { |
1564 | /* Not IP (i.e. ARP). Do not create route, if it is | 1563 | /* Not IP (i.e. ARP). Do not create route, if it is |
@@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, | |||
2303 | r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; | 2302 | r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; |
2304 | if (rt->rt_flags & RTCF_NOTIFY) | 2303 | if (rt->rt_flags & RTCF_NOTIFY) |
2305 | r->rtm_flags |= RTM_F_NOTIFY; | 2304 | r->rtm_flags |= RTM_F_NOTIFY; |
2305 | if (IPCB(skb)->flags & IPSKB_DOREDIRECT) | ||
2306 | r->rtm_flags |= RTCF_DOREDIRECT; | ||
2306 | 2307 | ||
2307 | if (nla_put_be32(skb, RTA_DST, dst)) | 2308 | if (nla_put_be32(skb, RTA_DST, dst)) |
2308 | goto nla_put_failure; | 2309 | goto nla_put_failure; |
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 7927db0a9279..4a000f1dd757 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c | |||
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin | |||
99 | s_slot = cb->args[0]; | 99 | s_slot = cb->args[0]; |
100 | num = s_num = cb->args[1]; | 100 | num = s_num = cb->args[1]; |
101 | 101 | ||
102 | for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) { | 102 | for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) { |
103 | struct sock *sk; | 103 | struct sock *sk; |
104 | struct hlist_nulls_node *node; | 104 | struct hlist_nulls_node *node; |
105 | struct udp_hslot *hslot = &table->hash[slot]; | 105 | struct udp_hslot *hslot = &table->hash[slot]; |
106 | 106 | ||
107 | num = 0; | ||
108 | |||
107 | if (hlist_nulls_empty(&hslot->head)) | 109 | if (hlist_nulls_empty(&hslot->head)) |
108 | continue; | 110 | continue; |
109 | 111 | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index b2d1838897c9..f1c6d5e98322 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst, | |||
659 | return 0; | 659 | return 0; |
660 | } | 660 | } |
661 | 661 | ||
662 | static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, | ||
663 | struct net *net) | ||
664 | { | ||
665 | if (atomic_read(&rt->rt6i_ref) != 1) { | ||
666 | /* This route is used as dummy address holder in some split | ||
667 | * nodes. It is not leaked, but it still holds other resources, | ||
668 | * which must be released in time. So, scan ascendant nodes | ||
669 | * and replace dummy references to this route with references | ||
670 | * to still alive ones. | ||
671 | */ | ||
672 | while (fn) { | ||
673 | if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { | ||
674 | fn->leaf = fib6_find_prefix(net, fn); | ||
675 | atomic_inc(&fn->leaf->rt6i_ref); | ||
676 | rt6_release(rt); | ||
677 | } | ||
678 | fn = fn->parent; | ||
679 | } | ||
680 | /* No more references are possible at this point. */ | ||
681 | BUG_ON(atomic_read(&rt->rt6i_ref) != 1); | ||
682 | } | ||
683 | } | ||
684 | |||
662 | /* | 685 | /* |
663 | * Insert routing information in a node. | 686 | * Insert routing information in a node. |
664 | */ | 687 | */ |
@@ -807,11 +830,12 @@ add: | |||
807 | rt->dst.rt6_next = iter->dst.rt6_next; | 830 | rt->dst.rt6_next = iter->dst.rt6_next; |
808 | atomic_inc(&rt->rt6i_ref); | 831 | atomic_inc(&rt->rt6i_ref); |
809 | inet6_rt_notify(RTM_NEWROUTE, rt, info); | 832 | inet6_rt_notify(RTM_NEWROUTE, rt, info); |
810 | rt6_release(iter); | ||
811 | if (!(fn->fn_flags & RTN_RTINFO)) { | 833 | if (!(fn->fn_flags & RTN_RTINFO)) { |
812 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; | 834 | info->nl_net->ipv6.rt6_stats->fib_route_nodes++; |
813 | fn->fn_flags |= RTN_RTINFO; | 835 | fn->fn_flags |= RTN_RTINFO; |
814 | } | 836 | } |
837 | fib6_purge_rt(iter, fn, info->nl_net); | ||
838 | rt6_release(iter); | ||
815 | } | 839 | } |
816 | 840 | ||
817 | return 0; | 841 | return 0; |
@@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1322 | fn = fib6_repair_tree(net, fn); | 1346 | fn = fib6_repair_tree(net, fn); |
1323 | } | 1347 | } |
1324 | 1348 | ||
1325 | if (atomic_read(&rt->rt6i_ref) != 1) { | 1349 | fib6_purge_rt(rt, fn, net); |
1326 | /* This route is used as dummy address holder in some split | ||
1327 | * nodes. It is not leaked, but it still holds other resources, | ||
1328 | * which must be released in time. So, scan ascendant nodes | ||
1329 | * and replace dummy references to this route with references | ||
1330 | * to still alive ones. | ||
1331 | */ | ||
1332 | while (fn) { | ||
1333 | if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) { | ||
1334 | fn->leaf = fib6_find_prefix(net, fn); | ||
1335 | atomic_inc(&fn->leaf->rt6i_ref); | ||
1336 | rt6_release(rt); | ||
1337 | } | ||
1338 | fn = fn->parent; | ||
1339 | } | ||
1340 | /* No more references are possible at this point. */ | ||
1341 | BUG_ON(atomic_read(&rt->rt6i_ref) != 1); | ||
1342 | } | ||
1343 | 1350 | ||
1344 | inet6_rt_notify(RTM_DELROUTE, rt, info); | 1351 | inet6_rt_notify(RTM_DELROUTE, rt, info); |
1345 | rt6_release(rt); | 1352 | rt6_release(rt); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 166e33bed222..495965358d22 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1242,12 +1242,16 @@ restart: | |||
1242 | rt = net->ipv6.ip6_null_entry; | 1242 | rt = net->ipv6.ip6_null_entry; |
1243 | else if (rt->dst.error) { | 1243 | else if (rt->dst.error) { |
1244 | rt = net->ipv6.ip6_null_entry; | 1244 | rt = net->ipv6.ip6_null_entry; |
1245 | } else if (rt == net->ipv6.ip6_null_entry) { | 1245 | goto out; |
1246 | } | ||
1247 | |||
1248 | if (rt == net->ipv6.ip6_null_entry) { | ||
1246 | fn = fib6_backtrack(fn, &fl6->saddr); | 1249 | fn = fib6_backtrack(fn, &fl6->saddr); |
1247 | if (fn) | 1250 | if (fn) |
1248 | goto restart; | 1251 | goto restart; |
1249 | } | 1252 | } |
1250 | 1253 | ||
1254 | out: | ||
1251 | dst_hold(&rt->dst); | 1255 | dst_hold(&rt->dst); |
1252 | 1256 | ||
1253 | read_unlock_bh(&table->tb6_lock); | 1257 | read_unlock_bh(&table->tb6_lock); |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 5f983644373a..48bf5a06847b 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
130 | { | 130 | { |
131 | struct flowi6 *fl6 = &fl->u.ip6; | 131 | struct flowi6 *fl6 = &fl->u.ip6; |
132 | int onlyproto = 0; | 132 | int onlyproto = 0; |
133 | u16 offset = skb_network_header_len(skb); | ||
134 | const struct ipv6hdr *hdr = ipv6_hdr(skb); | 133 | const struct ipv6hdr *hdr = ipv6_hdr(skb); |
134 | u16 offset = sizeof(*hdr); | ||
135 | struct ipv6_opt_hdr *exthdr; | 135 | struct ipv6_opt_hdr *exthdr; |
136 | const unsigned char *nh = skb_network_header(skb); | 136 | const unsigned char *nh = skb_network_header(skb); |
137 | u8 nexthdr = nh[IP6CB(skb)->nhoff]; | 137 | u16 nhoff = IP6CB(skb)->nhoff; |
138 | int oif = 0; | 138 | int oif = 0; |
139 | u8 nexthdr; | ||
140 | |||
141 | if (!nhoff) | ||
142 | nhoff = offsetof(struct ipv6hdr, nexthdr); | ||
143 | |||
144 | nexthdr = nh[nhoff]; | ||
139 | 145 | ||
140 | if (skb_dst(skb)) | 146 | if (skb_dst(skb)) |
141 | oif = skb_dst(skb)->dev->ifindex; | 147 | oif = skb_dst(skb)->dev->ifindex; |
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c index 612a5ddaf93b..799bafc2af39 100644 --- a/net/llc/sysctl_net_llc.c +++ b/net/llc/sysctl_net_llc.c | |||
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = { | |||
18 | { | 18 | { |
19 | .procname = "ack", | 19 | .procname = "ack", |
20 | .data = &sysctl_llc2_ack_timeout, | 20 | .data = &sysctl_llc2_ack_timeout, |
21 | .maxlen = sizeof(long), | 21 | .maxlen = sizeof(sysctl_llc2_ack_timeout), |
22 | .mode = 0644, | 22 | .mode = 0644, |
23 | .proc_handler = proc_dointvec_jiffies, | 23 | .proc_handler = proc_dointvec_jiffies, |
24 | }, | 24 | }, |
25 | { | 25 | { |
26 | .procname = "busy", | 26 | .procname = "busy", |
27 | .data = &sysctl_llc2_busy_timeout, | 27 | .data = &sysctl_llc2_busy_timeout, |
28 | .maxlen = sizeof(long), | 28 | .maxlen = sizeof(sysctl_llc2_busy_timeout), |
29 | .mode = 0644, | 29 | .mode = 0644, |
30 | .proc_handler = proc_dointvec_jiffies, | 30 | .proc_handler = proc_dointvec_jiffies, |
31 | }, | 31 | }, |
32 | { | 32 | { |
33 | .procname = "p", | 33 | .procname = "p", |
34 | .data = &sysctl_llc2_p_timeout, | 34 | .data = &sysctl_llc2_p_timeout, |
35 | .maxlen = sizeof(long), | 35 | .maxlen = sizeof(sysctl_llc2_p_timeout), |
36 | .mode = 0644, | 36 | .mode = 0644, |
37 | .proc_handler = proc_dointvec_jiffies, | 37 | .proc_handler = proc_dointvec_jiffies, |
38 | }, | 38 | }, |
39 | { | 39 | { |
40 | .procname = "rej", | 40 | .procname = "rej", |
41 | .data = &sysctl_llc2_rej_timeout, | 41 | .data = &sysctl_llc2_rej_timeout, |
42 | .maxlen = sizeof(long), | 42 | .maxlen = sizeof(sysctl_llc2_rej_timeout), |
43 | .mode = 0644, | 43 | .mode = 0644, |
44 | .proc_handler = proc_dointvec_jiffies, | 44 | .proc_handler = proc_dointvec_jiffies, |
45 | }, | 45 | }, |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 4c5192e0d66c..4a95fe3cffbc 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
86 | } | 86 | } |
87 | } | 87 | } |
88 | 88 | ||
89 | /* tear down aggregation sessions and remove STAs */ | ||
90 | mutex_lock(&local->sta_mtx); | ||
91 | list_for_each_entry(sta, &local->sta_list, list) { | ||
92 | if (sta->uploaded) { | ||
93 | enum ieee80211_sta_state state; | ||
94 | |||
95 | state = sta->sta_state; | ||
96 | for (; state > IEEE80211_STA_NOTEXIST; state--) | ||
97 | WARN_ON(drv_sta_state(local, sta->sdata, sta, | ||
98 | state, state - 1)); | ||
99 | } | ||
100 | } | ||
101 | mutex_unlock(&local->sta_mtx); | ||
102 | |||
103 | /* remove all interfaces that were created in the driver */ | 89 | /* remove all interfaces that were created in the driver */ |
104 | list_for_each_entry(sdata, &local->interfaces, list) { | 90 | list_for_each_entry(sdata, &local->interfaces, list) { |
105 | if (!ieee80211_sdata_running(sdata)) | 91 | if (!ieee80211_sdata_running(sdata)) |
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
111 | case NL80211_IFTYPE_STATION: | 97 | case NL80211_IFTYPE_STATION: |
112 | ieee80211_mgd_quiesce(sdata); | 98 | ieee80211_mgd_quiesce(sdata); |
113 | break; | 99 | break; |
100 | case NL80211_IFTYPE_WDS: | ||
101 | /* tear down aggregation sessions and remove STAs */ | ||
102 | mutex_lock(&local->sta_mtx); | ||
103 | sta = sdata->u.wds.sta; | ||
104 | if (sta && sta->uploaded) { | ||
105 | enum ieee80211_sta_state state; | ||
106 | |||
107 | state = sta->sta_state; | ||
108 | for (; state > IEEE80211_STA_NOTEXIST; state--) | ||
109 | WARN_ON(drv_sta_state(local, sta->sdata, | ||
110 | sta, state, | ||
111 | state - 1)); | ||
112 | } | ||
113 | mutex_unlock(&local->sta_mtx); | ||
114 | break; | ||
114 | default: | 115 | default: |
115 | break; | 116 | break; |
116 | } | 117 | } |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 683b10f46505..d69ca513848e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, | |||
272 | else if (rate && rate->flags & IEEE80211_RATE_ERP_G) | 272 | else if (rate && rate->flags & IEEE80211_RATE_ERP_G) |
273 | channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; | 273 | channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; |
274 | else if (rate) | 274 | else if (rate) |
275 | channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; | 275 | channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; |
276 | else | 276 | else |
277 | channel_flags |= IEEE80211_CHAN_2GHZ; | 277 | channel_flags |= IEEE80211_CHAN_2GHZ; |
278 | put_unaligned_le16(channel_flags, pos); | 278 | put_unaligned_le16(channel_flags, pos); |
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 84c8219c3e1c..f59adf8a4cd7 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, | |||
180 | } | 180 | } |
181 | 181 | ||
182 | bpf_size = bpf_len * sizeof(*bpf_ops); | 182 | bpf_size = bpf_len * sizeof(*bpf_ops); |
183 | if (bpf_size != nla_len(tb[TCA_BPF_OPS])) { | ||
184 | ret = -EINVAL; | ||
185 | goto errout; | ||
186 | } | ||
187 | |||
183 | bpf_ops = kzalloc(bpf_size, GFP_KERNEL); | 188 | bpf_ops = kzalloc(bpf_size, GFP_KERNEL); |
184 | if (bpf_ops == NULL) { | 189 | if (bpf_ops == NULL) { |
185 | ret = -ENOMEM; | 190 | ret = -ENOMEM; |
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, | |||
215 | struct cls_bpf_head *head) | 220 | struct cls_bpf_head *head) |
216 | { | 221 | { |
217 | unsigned int i = 0x80000000; | 222 | unsigned int i = 0x80000000; |
223 | u32 handle; | ||
218 | 224 | ||
219 | do { | 225 | do { |
220 | if (++head->hgen == 0x7FFFFFFF) | 226 | if (++head->hgen == 0x7FFFFFFF) |
221 | head->hgen = 1; | 227 | head->hgen = 1; |
222 | } while (--i > 0 && cls_bpf_get(tp, head->hgen)); | 228 | } while (--i > 0 && cls_bpf_get(tp, head->hgen)); |
223 | if (i == 0) | 229 | |
230 | if (unlikely(i == 0)) { | ||
224 | pr_err("Insufficient number of handles\n"); | 231 | pr_err("Insufficient number of handles\n"); |
232 | handle = 0; | ||
233 | } else { | ||
234 | handle = head->hgen; | ||
235 | } | ||
225 | 236 | ||
226 | return i; | 237 | return handle; |
227 | } | 238 | } |
228 | 239 | ||
229 | static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | 240 | static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f791edd64d6c..26d06dbcc1c8 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc, | |||
1182 | asoc->peer.peer_hmacs = new->peer.peer_hmacs; | 1182 | asoc->peer.peer_hmacs = new->peer.peer_hmacs; |
1183 | new->peer.peer_hmacs = NULL; | 1183 | new->peer.peer_hmacs = NULL; |
1184 | 1184 | ||
1185 | sctp_auth_key_put(asoc->asoc_shared_key); | ||
1186 | sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); | 1185 | sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); |
1187 | } | 1186 | } |
1188 | 1187 | ||
diff --git a/net/socket.c b/net/socket.c index a2c33a4dc7ba..418795caa897 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | |||
869 | static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, | 869 | static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, |
870 | struct sock_iocb *siocb) | 870 | struct sock_iocb *siocb) |
871 | { | 871 | { |
872 | if (!is_sync_kiocb(iocb)) | ||
873 | BUG(); | ||
874 | |||
875 | siocb->kiocb = iocb; | 872 | siocb->kiocb = iocb; |
876 | iocb->private = siocb; | 873 | iocb->private = siocb; |
877 | return siocb; | 874 | return siocb; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7ca4b5133123..8887c6e5fca8 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
2854 | if (!rdev->ops->get_key) | 2854 | if (!rdev->ops->get_key) |
2855 | return -EOPNOTSUPP; | 2855 | return -EOPNOTSUPP; |
2856 | 2856 | ||
2857 | if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) | ||
2858 | return -ENOENT; | ||
2859 | |||
2857 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | 2860 | msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); |
2858 | if (!msg) | 2861 | if (!msg) |
2859 | return -ENOMEM; | 2862 | return -ENOMEM; |
@@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) | |||
2873 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) | 2876 | nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr)) |
2874 | goto nla_put_failure; | 2877 | goto nla_put_failure; |
2875 | 2878 | ||
2876 | if (pairwise && mac_addr && | ||
2877 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) | ||
2878 | return -ENOENT; | ||
2879 | |||
2880 | err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, | 2879 | err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie, |
2881 | get_key_callback); | 2880 | get_key_callback); |
2882 | 2881 | ||
@@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) | |||
3047 | wdev_lock(dev->ieee80211_ptr); | 3046 | wdev_lock(dev->ieee80211_ptr); |
3048 | err = nl80211_key_allowed(dev->ieee80211_ptr); | 3047 | err = nl80211_key_allowed(dev->ieee80211_ptr); |
3049 | 3048 | ||
3050 | if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && | 3049 | if (key.type == NL80211_KEYTYPE_GROUP && mac_addr && |
3051 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) | 3050 | !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) |
3052 | err = -ENOENT; | 3051 | err = -ENOENT; |
3053 | 3052 | ||
diff --git a/net/wireless/util.c b/net/wireless/util.c index d0ac795445b7..5488c3662f7d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) | |||
308 | goto out; | 308 | goto out; |
309 | } | 309 | } |
310 | 310 | ||
311 | if (ieee80211_is_mgmt(fc)) { | ||
312 | if (ieee80211_has_order(fc)) | ||
313 | hdrlen += IEEE80211_HT_CTL_LEN; | ||
314 | goto out; | ||
315 | } | ||
316 | |||
311 | if (ieee80211_is_ctl(fc)) { | 317 | if (ieee80211_is_ctl(fc)) { |
312 | /* | 318 | /* |
313 | * ACK and CTS are 10 bytes, all others 16. To see how | 319 | * ACK and CTS are 10 bytes, all others 16. To see how |
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c index e286b42307f3..6299ee95cd11 100644 --- a/samples/bpf/test_maps.c +++ b/samples/bpf/test_maps.c | |||
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data) | |||
69 | 69 | ||
70 | /* iterate over two elements */ | 70 | /* iterate over two elements */ |
71 | assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && | 71 | assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 && |
72 | next_key == 2); | 72 | (next_key == 1 || next_key == 2)); |
73 | assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && | 73 | assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 && |
74 | next_key == 1); | 74 | (next_key == 1 || next_key == 2)); |
75 | assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && | 75 | assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 && |
76 | errno == ENOENT); | 76 | errno == ENOENT); |
77 | 77 | ||
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c index ec667f158f19..5d905d90d504 100644 --- a/sound/core/seq/seq_dummy.c +++ b/sound/core/seq/seq_dummy.c | |||
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port { | |||
82 | static int my_client = -1; | 82 | static int my_client = -1; |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events | ||
86 | * to subscribers. | ||
87 | * Note: this callback is called only after all subscribers are removed. | ||
88 | */ | ||
89 | static int | ||
90 | dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info) | ||
91 | { | ||
92 | struct snd_seq_dummy_port *p; | ||
93 | int i; | ||
94 | struct snd_seq_event ev; | ||
95 | |||
96 | p = private_data; | ||
97 | memset(&ev, 0, sizeof(ev)); | ||
98 | if (p->duplex) | ||
99 | ev.source.port = p->connect; | ||
100 | else | ||
101 | ev.source.port = p->port; | ||
102 | ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS; | ||
103 | ev.type = SNDRV_SEQ_EVENT_CONTROLLER; | ||
104 | for (i = 0; i < 16; i++) { | ||
105 | ev.data.control.channel = i; | ||
106 | ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF; | ||
107 | snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); | ||
108 | ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS; | ||
109 | snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0); | ||
110 | } | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * event input callback - just redirect events to subscribers | 85 | * event input callback - just redirect events to subscribers |
116 | */ | 86 | */ |
117 | static int | 87 | static int |
@@ -175,7 +145,6 @@ create_port(int idx, int type) | |||
175 | | SNDRV_SEQ_PORT_TYPE_PORT; | 145 | | SNDRV_SEQ_PORT_TYPE_PORT; |
176 | memset(&pcb, 0, sizeof(pcb)); | 146 | memset(&pcb, 0, sizeof(pcb)); |
177 | pcb.owner = THIS_MODULE; | 147 | pcb.owner = THIS_MODULE; |
178 | pcb.unuse = dummy_unuse; | ||
179 | pcb.event_input = dummy_input; | 148 | pcb.event_input = dummy_input; |
180 | pcb.private_free = dummy_free; | 149 | pcb.private_free = dummy_free; |
181 | pcb.private_data = rec; | 150 | pcb.private_data = rec; |
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c index 7752860f7230..4c23381727a1 100644 --- a/sound/soc/adi/axi-i2s.c +++ b/sound/soc/adi/axi-i2s.c | |||
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev) | |||
240 | if (ret) | 240 | if (ret) |
241 | goto err_clk_disable; | 241 | goto err_clk_disable; |
242 | 242 | ||
243 | return 0; | ||
244 | |||
243 | err_clk_disable: | 245 | err_clk_disable: |
244 | clk_disable_unprepare(i2s->clk); | 246 | clk_disable_unprepare(i2s->clk); |
245 | return ret; | 247 | return ret; |
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c index e5f2fb884bf3..30c673cdc12e 100644 --- a/sound/soc/codecs/pcm512x.c +++ b/sound/soc/codecs/pcm512x.c | |||
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0); | |||
188 | static const char * const pcm512x_dsp_program_texts[] = { | 188 | static const char * const pcm512x_dsp_program_texts[] = { |
189 | "FIR interpolation with de-emphasis", | 189 | "FIR interpolation with de-emphasis", |
190 | "Low latency IIR with de-emphasis", | 190 | "Low latency IIR with de-emphasis", |
191 | "Fixed process flow", | ||
192 | "High attenuation with de-emphasis", | 191 | "High attenuation with de-emphasis", |
192 | "Fixed process flow", | ||
193 | "Ringing-less low latency FIR", | 193 | "Ringing-less low latency FIR", |
194 | }; | 194 | }; |
195 | 195 | ||
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index 2cd4fe463102..1d1c7f8a9af2 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c | |||
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream, | |||
861 | RT286_I2S_CTRL1, 0x0018, d_len_code << 3); | 861 | RT286_I2S_CTRL1, 0x0018, d_len_code << 3); |
862 | dev_dbg(codec->dev, "format val = 0x%x\n", val); | 862 | dev_dbg(codec->dev, "format val = 0x%x\n", val); |
863 | 863 | ||
864 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 864 | snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); |
865 | snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val); | 865 | snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); |
866 | else | ||
867 | snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val); | ||
868 | 866 | ||
869 | return 0; | 867 | return 0; |
870 | } | 868 | } |
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index c0fbe1881439..918ada9738b0 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c | |||
@@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w, | |||
2083 | struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); | 2083 | struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); |
2084 | 2084 | ||
2085 | switch (event) { | 2085 | switch (event) { |
2086 | case SND_SOC_DAPM_POST_PMU: | 2086 | case SND_SOC_DAPM_PRE_PMU: |
2087 | regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); | 2087 | regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2); |
2088 | break; | ||
2089 | |||
2090 | case SND_SOC_DAPM_POST_PMU: | ||
2088 | regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0); | 2091 | regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0); |
2089 | break; | 2092 | break; |
2093 | |||
2090 | default: | 2094 | default: |
2091 | return 0; | 2095 | return 0; |
2092 | } | 2096 | } |
@@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w, | |||
2101 | struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); | 2105 | struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); |
2102 | 2106 | ||
2103 | switch (event) { | 2107 | switch (event) { |
2104 | case SND_SOC_DAPM_POST_PMU: | 2108 | case SND_SOC_DAPM_PRE_PMU: |
2105 | regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); | 2109 | regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2); |
2110 | break; | ||
2111 | |||
2112 | case SND_SOC_DAPM_POST_PMU: | ||
2106 | regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0); | 2113 | regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0); |
2107 | break; | 2114 | break; |
2115 | |||
2108 | default: | 2116 | default: |
2109 | return 0; | 2117 | return 0; |
2110 | } | 2118 | } |
@@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w, | |||
2212 | 2220 | ||
2213 | static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { | 2221 | static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { |
2214 | SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, | 2222 | SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, |
2215 | 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU), | 2223 | 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | |
2224 | SND_SOC_DAPM_POST_PMU), | ||
2216 | SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, | 2225 | SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT, |
2217 | 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU), | 2226 | 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU | |
2227 | SND_SOC_DAPM_POST_PMU), | ||
2218 | 2228 | ||
2219 | /* Input Side */ | 2229 | /* Input Side */ |
2220 | /* micbias */ | 2230 | /* micbias */ |
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c index 1d1205702d23..9f2dced046de 100644 --- a/sound/soc/codecs/ts3a227e.c +++ b/sound/soc/codecs/ts3a227e.c | |||
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c, | |||
254 | struct ts3a227e *ts3a227e; | 254 | struct ts3a227e *ts3a227e; |
255 | struct device *dev = &i2c->dev; | 255 | struct device *dev = &i2c->dev; |
256 | int ret; | 256 | int ret; |
257 | unsigned int acc_reg; | ||
257 | 258 | ||
258 | ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); | 259 | ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); |
259 | if (ts3a227e == NULL) | 260 | if (ts3a227e == NULL) |
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c, | |||
283 | INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, | 284 | INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, |
284 | ADC_COMPLETE_INT_DISABLE); | 285 | ADC_COMPLETE_INT_DISABLE); |
285 | 286 | ||
287 | /* Read jack status because chip might not trigger interrupt at boot. */ | ||
288 | regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg); | ||
289 | ts3a227e_new_jack_state(ts3a227e, acc_reg); | ||
290 | ts3a227e_jack_report(ts3a227e); | ||
291 | |||
286 | return 0; | 292 | return 0; |
287 | } | 293 | } |
288 | 294 | ||
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 4d2d2b1380d5..75b87c5c0f04 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c | |||
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = { | |||
1076 | { "Right Capture PGA", NULL, "Right Capture Mux" }, | 1076 | { "Right Capture PGA", NULL, "Right Capture Mux" }, |
1077 | { "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, | 1077 | { "Right Capture PGA", NULL, "Right Capture Inverting Mux" }, |
1078 | 1078 | ||
1079 | { "AIFOUTL", "Left", "ADCL" }, | 1079 | { "AIFOUTL Mux", "Left", "ADCL" }, |
1080 | { "AIFOUTL", "Right", "ADCR" }, | 1080 | { "AIFOUTL Mux", "Right", "ADCR" }, |
1081 | { "AIFOUTR", "Left", "ADCL" }, | 1081 | { "AIFOUTR Mux", "Left", "ADCL" }, |
1082 | { "AIFOUTR", "Right", "ADCR" }, | 1082 | { "AIFOUTR Mux", "Right", "ADCR" }, |
1083 | |||
1084 | { "AIFOUTL", NULL, "AIFOUTL Mux" }, | ||
1085 | { "AIFOUTR", NULL, "AIFOUTR Mux" }, | ||
1083 | 1086 | ||
1084 | { "ADCL", NULL, "CLK_DSP" }, | 1087 | { "ADCL", NULL, "CLK_DSP" }, |
1085 | { "ADCL", NULL, "Left Capture PGA" }, | 1088 | { "ADCL", NULL, "Left Capture PGA" }, |
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = { | |||
1089 | }; | 1092 | }; |
1090 | 1093 | ||
1091 | static const struct snd_soc_dapm_route dac_intercon[] = { | 1094 | static const struct snd_soc_dapm_route dac_intercon[] = { |
1092 | { "DACL", "Right", "AIFINR" }, | 1095 | { "DACL Mux", "Left", "AIFINL" }, |
1093 | { "DACL", "Left", "AIFINL" }, | 1096 | { "DACL Mux", "Right", "AIFINR" }, |
1097 | |||
1098 | { "DACR Mux", "Left", "AIFINL" }, | ||
1099 | { "DACR Mux", "Right", "AIFINR" }, | ||
1100 | |||
1101 | { "DACL", NULL, "DACL Mux" }, | ||
1094 | { "DACL", NULL, "CLK_DSP" }, | 1102 | { "DACL", NULL, "CLK_DSP" }, |
1095 | 1103 | ||
1096 | { "DACR", "Right", "AIFINR" }, | 1104 | { "DACR", NULL, "DACR Mux" }, |
1097 | { "DACR", "Left", "AIFINL" }, | ||
1098 | { "DACR", NULL, "CLK_DSP" }, | 1105 | { "DACR", NULL, "CLK_DSP" }, |
1099 | 1106 | ||
1100 | { "Charge pump", NULL, "SYSCLK" }, | 1107 | { "Charge pump", NULL, "SYSCLK" }, |
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 031a1ae71d94..a96eb497a379 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c | |||
@@ -556,7 +556,7 @@ static struct { | |||
556 | { 22050, 2 }, | 556 | { 22050, 2 }, |
557 | { 24000, 2 }, | 557 | { 24000, 2 }, |
558 | { 16000, 3 }, | 558 | { 16000, 3 }, |
559 | { 11250, 4 }, | 559 | { 11025, 4 }, |
560 | { 12000, 4 }, | 560 | { 12000, 4 }, |
561 | { 8000, 5 }, | 561 | { 8000, 5 }, |
562 | }; | 562 | }; |
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h index 91a550f4a10d..5e793bbb6b02 100644 --- a/sound/soc/fsl/fsl_esai.h +++ b/sound/soc/fsl/fsl_esai.h | |||
@@ -302,7 +302,7 @@ | |||
302 | #define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) | 302 | #define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT) |
303 | #define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) | 303 | #define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK) |
304 | #define ESAI_xCCR_xDC_SHIFT 9 | 304 | #define ESAI_xCCR_xDC_SHIFT 9 |
305 | #define ESAI_xCCR_xDC_WIDTH 4 | 305 | #define ESAI_xCCR_xDC_WIDTH 5 |
306 | #define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) | 306 | #define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT) |
307 | #define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) | 307 | #define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK) |
308 | #define ESAI_xCCR_xPSR_SHIFT 8 | 308 | #define ESAI_xCCR_xPSR_SHIFT 8 |
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index a65f17d57ffb..059496ed9ad7 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev) | |||
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | ssi_private->irq = platform_get_irq(pdev, 0); | 1364 | ssi_private->irq = platform_get_irq(pdev, 0); |
1365 | if (!ssi_private->irq) { | 1365 | if (ssi_private->irq < 0) { |
1366 | dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); | 1366 | dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); |
1367 | return -ENXIO; | 1367 | return ssi_private->irq; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | /* Are the RX and the TX clocks locked? */ | 1370 | /* Are the RX and the TX clocks locked? */ |
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c index 4caacb05a623..cd146d4fa805 100644 --- a/sound/soc/fsl/imx-wm8962.c +++ b/sound/soc/fsl/imx-wm8962.c | |||
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev) | |||
257 | if (ret) | 257 | if (ret) |
258 | goto clk_fail; | 258 | goto clk_fail; |
259 | data->card.num_links = 1; | 259 | data->card.num_links = 1; |
260 | data->card.owner = THIS_MODULE; | ||
260 | data->card.dai_link = &data->dai; | 261 | data->card.dai_link = &data->dai; |
261 | data->card.dapm_widgets = imx_wm8962_dapm_widgets; | 262 | data->card.dapm_widgets = imx_wm8962_dapm_widgets; |
262 | data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); | 263 | data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets); |
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index fb9240fdc9b7..7fe3009b1c43 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node, | |||
452 | } | 452 | } |
453 | 453 | ||
454 | /* Decrease the reference count of the device nodes */ | 454 | /* Decrease the reference count of the device nodes */ |
455 | static int asoc_simple_card_unref(struct platform_device *pdev) | 455 | static int asoc_simple_card_unref(struct snd_soc_card *card) |
456 | { | 456 | { |
457 | struct snd_soc_card *card = platform_get_drvdata(pdev); | ||
458 | struct snd_soc_dai_link *dai_link; | 457 | struct snd_soc_dai_link *dai_link; |
459 | int num_links; | 458 | int num_links; |
460 | 459 | ||
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev) | |||
556 | return ret; | 555 | return ret; |
557 | 556 | ||
558 | err: | 557 | err: |
559 | asoc_simple_card_unref(pdev); | 558 | asoc_simple_card_unref(&priv->snd_card); |
560 | return ret; | 559 | return ret; |
561 | } | 560 | } |
562 | 561 | ||
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev) | |||
572 | snd_soc_jack_free_gpios(&simple_card_mic_jack, 1, | 571 | snd_soc_jack_free_gpios(&simple_card_mic_jack, 1, |
573 | &simple_card_mic_jack_gpio); | 572 | &simple_card_mic_jack_gpio); |
574 | 573 | ||
575 | return asoc_simple_card_unref(pdev); | 574 | return asoc_simple_card_unref(card); |
576 | } | 575 | } |
577 | 576 | ||
578 | static const struct of_device_id asoc_simple_of_match[] = { | 577 | static const struct of_device_id asoc_simple_of_match[] = { |
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c index ef2e8b5766a1..b3f9489794a6 100644 --- a/sound/soc/intel/sst-firmware.c +++ b/sound/soc/intel/sst-firmware.c | |||
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba | |||
706 | struct list_head *block_list) | 706 | struct list_head *block_list) |
707 | { | 707 | { |
708 | struct sst_mem_block *block, *tmp; | 708 | struct sst_mem_block *block, *tmp; |
709 | struct sst_block_allocator ba_tmp = *ba; | ||
709 | u32 end = ba->offset + ba->size, block_end; | 710 | u32 end = ba->offset + ba->size, block_end; |
710 | int err; | 711 | int err; |
711 | 712 | ||
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba | |||
730 | if (ba->offset >= block->offset && ba->offset < block_end) { | 731 | if (ba->offset >= block->offset && ba->offset < block_end) { |
731 | 732 | ||
732 | /* align ba to block boundary */ | 733 | /* align ba to block boundary */ |
733 | ba->size -= block_end - ba->offset; | 734 | ba_tmp.size -= block_end - ba->offset; |
734 | ba->offset = block_end; | 735 | ba_tmp.offset = block_end; |
735 | err = block_alloc_contiguous(dsp, ba, block_list); | 736 | err = block_alloc_contiguous(dsp, &ba_tmp, block_list); |
736 | if (err < 0) | 737 | if (err < 0) |
737 | return -ENOMEM; | 738 | return -ENOMEM; |
738 | 739 | ||
@@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba | |||
767 | list_move(&block->list, &dsp->used_block_list); | 768 | list_move(&block->list, &dsp->used_block_list); |
768 | list_add(&block->module_list, block_list); | 769 | list_add(&block->module_list, block_list); |
769 | /* align ba to block boundary */ | 770 | /* align ba to block boundary */ |
770 | ba->size -= block_end - ba->offset; | 771 | ba_tmp.size -= block_end - ba->offset; |
771 | ba->offset = block_end; | 772 | ba_tmp.offset = block_end; |
772 | 773 | ||
773 | err = block_alloc_contiguous(dsp, ba, block_list); | 774 | err = block_alloc_contiguous(dsp, &ba_tmp, block_list); |
774 | if (err < 0) | 775 | if (err < 0) |
775 | return -ENOMEM; | 776 | return -ENOMEM; |
776 | 777 | ||
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 3f8c48231364..5bf14040c24a 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c | |||
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream) | |||
1228 | struct sst_dsp *sst = hsw->dsp; | 1228 | struct sst_dsp *sst = hsw->dsp; |
1229 | unsigned long flags; | 1229 | unsigned long flags; |
1230 | 1230 | ||
1231 | if (!stream) { | ||
1232 | dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n"); | ||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1231 | /* dont free DSP streams that are not commited */ | 1236 | /* dont free DSP streams that are not commited */ |
1232 | if (!stream->commited) | 1237 | if (!stream->commited) |
1233 | goto out; | 1238 | goto out; |
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream) | |||
1415 | u32 header; | 1420 | u32 header; |
1416 | int ret; | 1421 | int ret; |
1417 | 1422 | ||
1423 | if (!stream) { | ||
1424 | dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n"); | ||
1425 | return 0; | ||
1426 | } | ||
1427 | |||
1428 | if (stream->commited) { | ||
1429 | dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n"); | ||
1430 | return 0; | ||
1431 | } | ||
1432 | |||
1418 | trace_ipc_request("stream alloc", stream->host_id); | 1433 | trace_ipc_request("stream alloc", stream->host_id); |
1419 | 1434 | ||
1420 | header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); | 1435 | header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM); |
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream, | |||
1519 | { | 1534 | { |
1520 | int ret; | 1535 | int ret; |
1521 | 1536 | ||
1537 | if (!stream) { | ||
1538 | dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n"); | ||
1539 | return 0; | ||
1540 | } | ||
1541 | |||
1522 | trace_ipc_request("stream pause", stream->reply.stream_hw_id); | 1542 | trace_ipc_request("stream pause", stream->reply.stream_hw_id); |
1523 | 1543 | ||
1524 | ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, | 1544 | ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE, |
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream, | |||
1535 | { | 1555 | { |
1536 | int ret; | 1556 | int ret; |
1537 | 1557 | ||
1558 | if (!stream) { | ||
1559 | dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n"); | ||
1560 | return 0; | ||
1561 | } | ||
1562 | |||
1538 | trace_ipc_request("stream resume", stream->reply.stream_hw_id); | 1563 | trace_ipc_request("stream resume", stream->reply.stream_hw_id); |
1539 | 1564 | ||
1540 | ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, | 1565 | ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME, |
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream) | |||
1550 | { | 1575 | { |
1551 | int ret, tries = 10; | 1576 | int ret, tries = 10; |
1552 | 1577 | ||
1578 | if (!stream) { | ||
1579 | dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n"); | ||
1580 | return 0; | ||
1581 | } | ||
1582 | |||
1553 | /* dont reset streams that are not commited */ | 1583 | /* dont reset streams that are not commited */ |
1554 | if (!stream->commited) | 1584 | if (!stream->commited) |
1555 | return 0; | 1585 | return 0; |
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index 8b79cafab1e2..c7eb9dd67f60 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c | |||
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, | |||
434 | case SND_SOC_DAIFMT_CBM_CFS: | 434 | case SND_SOC_DAIFMT_CBM_CFS: |
435 | /* McBSP slave. FS clock as output */ | 435 | /* McBSP slave. FS clock as output */ |
436 | regs->srgr2 |= FSGM; | 436 | regs->srgr2 |= FSGM; |
437 | regs->pcr0 |= FSXM; | 437 | regs->pcr0 |= FSXM | FSRM; |
438 | break; | 438 | break; |
439 | case SND_SOC_DAIFMT_CBM_CFM: | 439 | case SND_SOC_DAIFMT_CBM_CFM: |
440 | /* McBSP slave */ | 440 | /* McBSP slave */ |
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index 13d8507333b8..dcc26eda0539 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c | |||
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = { | |||
335 | SNDRV_PCM_FMTBIT_S24_LE), | 335 | SNDRV_PCM_FMTBIT_S24_LE), |
336 | }, | 336 | }, |
337 | .ops = &rockchip_i2s_dai_ops, | 337 | .ops = &rockchip_i2s_dai_ops, |
338 | .symmetric_rates = 1, | ||
338 | }; | 339 | }; |
339 | 340 | ||
340 | static const struct snd_soc_component_driver rockchip_i2s_component = { | 341 | static const struct snd_soc_component_driver rockchip_i2s_component = { |
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 590a82f01d0b..025c38fbe3c0 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c | |||
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) | |||
659 | rtd->dai_link->stream_name); | 659 | rtd->dai_link->stream_name); |
660 | 660 | ||
661 | ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, | 661 | ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, |
662 | 1, 0, &be_pcm); | 662 | rtd->dai_link->dpcm_playback, |
663 | rtd->dai_link->dpcm_capture, &be_pcm); | ||
663 | if (ret < 0) { | 664 | if (ret < 0) { |
664 | dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", | 665 | dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n", |
665 | rtd->dai_link->name); | 666 | rtd->dai_link->name); |
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) | |||
668 | 669 | ||
669 | rtd->pcm = be_pcm; | 670 | rtd->pcm = be_pcm; |
670 | rtd->fe_compr = 1; | 671 | rtd->fe_compr = 1; |
671 | be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; | 672 | if (rtd->dai_link->dpcm_playback) |
672 | be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; | 673 | be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; |
674 | else if (rtd->dai_link->dpcm_capture) | ||
675 | be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; | ||
673 | memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); | 676 | memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); |
674 | } else | 677 | } else |
675 | memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); | 678 | memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); |
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c index 790ceba6ad3f..28431d1bbcf5 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c | |||
@@ -5,7 +5,10 @@ | |||
5 | * ANY CHANGES MADE HERE WILL BE LOST! | 5 | * ANY CHANGES MADE HERE WILL BE LOST! |
6 | * | 6 | * |
7 | */ | 7 | */ |
8 | 8 | #include <stdbool.h> | |
9 | #ifndef HAS_BOOL | ||
10 | # define HAS_BOOL 1 | ||
11 | #endif | ||
9 | #line 1 "Context.xs" | 12 | #line 1 "Context.xs" |
10 | /* | 13 | /* |
11 | * Context.xs. XS interfaces for perf script. | 14 | * Context.xs. XS interfaces for perf script. |
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 79999ceaf2be..01bc4e23a2cf 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops) | |||
177 | goto out_free_ops; | 177 | goto out_free_ops; |
178 | 178 | ||
179 | ops->locked.ins = ins__find(name); | 179 | ops->locked.ins = ins__find(name); |
180 | free(name); | ||
181 | |||
180 | if (ops->locked.ins == NULL) | 182 | if (ops->locked.ins == NULL) |
181 | goto out_free_ops; | 183 | goto out_free_ops; |
182 | 184 | ||
183 | if (!ops->locked.ins->ops) | 185 | if (!ops->locked.ins->ops) |
184 | return 0; | 186 | return 0; |
185 | 187 | ||
186 | if (ops->locked.ins->ops->parse) | 188 | if (ops->locked.ins->ops->parse && |
187 | ops->locked.ins->ops->parse(ops->locked.ops); | 189 | ops->locked.ins->ops->parse(ops->locked.ops) < 0) |
190 | goto out_free_ops; | ||
188 | 191 | ||
189 | return 0; | 192 | return 0; |
190 | 193 | ||
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size, | |||
208 | 211 | ||
209 | static void lock__delete(struct ins_operands *ops) | 212 | static void lock__delete(struct ins_operands *ops) |
210 | { | 213 | { |
214 | struct ins *ins = ops->locked.ins; | ||
215 | |||
216 | if (ins && ins->ops->free) | ||
217 | ins->ops->free(ops->locked.ops); | ||
218 | else | ||
219 | ins__delete(ops->locked.ops); | ||
220 | |||
211 | zfree(&ops->locked.ops); | 221 | zfree(&ops->locked.ops); |
212 | zfree(&ops->target.raw); | 222 | zfree(&ops->target.raw); |
213 | zfree(&ops->target.name); | 223 | zfree(&ops->target.name); |
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl) | |||
531 | if (!dl->ins->ops) | 541 | if (!dl->ins->ops) |
532 | return; | 542 | return; |
533 | 543 | ||
534 | if (dl->ins->ops->parse) | 544 | if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0) |
535 | dl->ins->ops->parse(&dl->ops); | 545 | dl->ins = NULL; |
536 | } | 546 | } |
537 | 547 | ||
538 | static int disasm_line__parse(char *line, char **namep, char **rawp) | 548 | static int disasm_line__parse(char *line, char **namep, char **rawp) |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index cbab1fb77b1d..2e507b5025a3 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -1445,7 +1445,7 @@ int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused, | |||
1445 | case ENOENT: | 1445 | case ENOENT: |
1446 | scnprintf(buf, size, "%s", | 1446 | scnprintf(buf, size, "%s", |
1447 | "Error:\tUnable to find debugfs\n" | 1447 | "Error:\tUnable to find debugfs\n" |
1448 | "Hint:\tWas your kernel was compiled with debugfs support?\n" | 1448 | "Hint:\tWas your kernel compiled with debugfs support?\n" |
1449 | "Hint:\tIs the debugfs filesystem mounted?\n" | 1449 | "Hint:\tIs the debugfs filesystem mounted?\n" |
1450 | "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); | 1450 | "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); |
1451 | break; | 1451 | break; |
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 6951a9d42339..0e42438b1e59 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
@@ -116,6 +116,22 @@ struct thread; | |||
116 | #define map__for_each_symbol(map, pos, n) \ | 116 | #define map__for_each_symbol(map, pos, n) \ |
117 | dso__for_each_symbol(map->dso, pos, n, map->type) | 117 | dso__for_each_symbol(map->dso, pos, n, map->type) |
118 | 118 | ||
119 | /* map__for_each_symbol_with_name - iterate over the symbols in the given map | ||
120 | * that have the given name | ||
121 | * | ||
122 | * @map: the 'struct map *' in which symbols itereated | ||
123 | * @sym_name: the symbol name | ||
124 | * @pos: the 'struct symbol *' to use as a loop cursor | ||
125 | * @filter: to use when loading the DSO | ||
126 | */ | ||
127 | #define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \ | ||
128 | for (pos = map__find_symbol_by_name(map, sym_name, filter); \ | ||
129 | pos && strcmp(pos->name, sym_name) == 0; \ | ||
130 | pos = symbol__next_by_name(pos)) | ||
131 | |||
132 | #define map__for_each_symbol_by_name(map, sym_name, pos) \ | ||
133 | __map__for_each_symbol_by_name(map, sym_name, (pos), NULL) | ||
134 | |||
119 | typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); | 135 | typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); |
120 | 136 | ||
121 | void map__init(struct map *map, enum map_type type, | 137 | void map__init(struct map *map, enum map_type type, |
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 94a717bf007d..919937eb0be2 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, | |||
446 | } | 446 | } |
447 | 447 | ||
448 | for (i = 0; i < ntevs; i++) { | 448 | for (i = 0; i < ntevs; i++) { |
449 | if (tevs[i].point.address) { | 449 | if (tevs[i].point.address && !tevs[i].point.retprobe) { |
450 | tmp = strdup(reloc_sym->name); | 450 | tmp = strdup(reloc_sym->name); |
451 | if (!tmp) | 451 | if (!tmp) |
452 | return -ENOMEM; | 452 | return -ENOMEM; |
@@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, | |||
2193 | return ret; | 2193 | return ret; |
2194 | } | 2194 | } |
2195 | 2195 | ||
2196 | static char *looking_function_name; | 2196 | static int find_probe_functions(struct map *map, char *name) |
2197 | static int num_matched_functions; | ||
2198 | |||
2199 | static int probe_function_filter(struct map *map __maybe_unused, | ||
2200 | struct symbol *sym) | ||
2201 | { | 2197 | { |
2202 | if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && | 2198 | int found = 0; |
2203 | strcmp(looking_function_name, sym->name) == 0) { | 2199 | struct symbol *sym; |
2204 | num_matched_functions++; | 2200 | |
2205 | return 0; | 2201 | map__for_each_symbol_by_name(map, name, sym) { |
2202 | if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) | ||
2203 | found++; | ||
2206 | } | 2204 | } |
2207 | return 1; | 2205 | |
2206 | return found; | ||
2208 | } | 2207 | } |
2209 | 2208 | ||
2210 | #define strdup_or_goto(str, label) \ | 2209 | #define strdup_or_goto(str, label) \ |
@@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, | |||
2222 | struct kmap *kmap = NULL; | 2221 | struct kmap *kmap = NULL; |
2223 | struct ref_reloc_sym *reloc_sym = NULL; | 2222 | struct ref_reloc_sym *reloc_sym = NULL; |
2224 | struct symbol *sym; | 2223 | struct symbol *sym; |
2225 | struct rb_node *nd; | ||
2226 | struct probe_trace_event *tev; | 2224 | struct probe_trace_event *tev; |
2227 | struct perf_probe_point *pp = &pev->point; | 2225 | struct perf_probe_point *pp = &pev->point; |
2228 | struct probe_trace_point *tp; | 2226 | struct probe_trace_point *tp; |
2227 | int num_matched_functions; | ||
2229 | int ret, i; | 2228 | int ret, i; |
2230 | 2229 | ||
2231 | /* Init maps of given executable or kernel */ | 2230 | /* Init maps of given executable or kernel */ |
@@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, | |||
2242 | * Load matched symbols: Since the different local symbols may have | 2241 | * Load matched symbols: Since the different local symbols may have |
2243 | * same name but different addresses, this lists all the symbols. | 2242 | * same name but different addresses, this lists all the symbols. |
2244 | */ | 2243 | */ |
2245 | num_matched_functions = 0; | 2244 | num_matched_functions = find_probe_functions(map, pp->function); |
2246 | looking_function_name = pp->function; | 2245 | if (num_matched_functions == 0) { |
2247 | ret = map__load(map, probe_function_filter); | ||
2248 | if (ret || num_matched_functions == 0) { | ||
2249 | pr_err("Failed to find symbol %s in %s\n", pp->function, | 2246 | pr_err("Failed to find symbol %s in %s\n", pp->function, |
2250 | target ? : "kernel"); | 2247 | target ? : "kernel"); |
2251 | ret = -ENOENT; | 2248 | ret = -ENOENT; |
@@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, | |||
2257 | goto out; | 2254 | goto out; |
2258 | } | 2255 | } |
2259 | 2256 | ||
2260 | if (!pev->uprobes) { | 2257 | if (!pev->uprobes && !pp->retprobe) { |
2261 | kmap = map__kmap(map); | 2258 | kmap = map__kmap(map); |
2262 | reloc_sym = kmap->ref_reloc_sym; | 2259 | reloc_sym = kmap->ref_reloc_sym; |
2263 | if (!reloc_sym) { | 2260 | if (!reloc_sym) { |
@@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, | |||
2275 | } | 2272 | } |
2276 | 2273 | ||
2277 | ret = 0; | 2274 | ret = 0; |
2278 | map__for_each_symbol(map, sym, nd) { | 2275 | |
2276 | map__for_each_symbol_by_name(map, pp->function, sym) { | ||
2279 | tev = (*tevs) + ret; | 2277 | tev = (*tevs) + ret; |
2280 | tp = &tev->point; | 2278 | tp = &tev->point; |
2281 | if (ret == num_matched_functions) { | 2279 | if (ret == num_matched_functions) { |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index c24c5b83156c..a194702a0a2f 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, | |||
396 | const char *name) | 396 | const char *name) |
397 | { | 397 | { |
398 | struct rb_node *n; | 398 | struct rb_node *n; |
399 | struct symbol_name_rb_node *s; | ||
399 | 400 | ||
400 | if (symbols == NULL) | 401 | if (symbols == NULL) |
401 | return NULL; | 402 | return NULL; |
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, | |||
403 | n = symbols->rb_node; | 404 | n = symbols->rb_node; |
404 | 405 | ||
405 | while (n) { | 406 | while (n) { |
406 | struct symbol_name_rb_node *s; | ||
407 | int cmp; | 407 | int cmp; |
408 | 408 | ||
409 | s = rb_entry(n, struct symbol_name_rb_node, rb_node); | 409 | s = rb_entry(n, struct symbol_name_rb_node, rb_node); |
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, | |||
414 | else if (cmp > 0) | 414 | else if (cmp > 0) |
415 | n = n->rb_right; | 415 | n = n->rb_right; |
416 | else | 416 | else |
417 | return &s->sym; | 417 | break; |
418 | } | 418 | } |
419 | 419 | ||
420 | return NULL; | 420 | if (n == NULL) |
421 | return NULL; | ||
422 | |||
423 | /* return first symbol that has same name (if any) */ | ||
424 | for (n = rb_prev(n); n; n = rb_prev(n)) { | ||
425 | struct symbol_name_rb_node *tmp; | ||
426 | |||
427 | tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); | ||
428 | if (strcmp(tmp->sym.name, s->sym.name)) | ||
429 | break; | ||
430 | |||
431 | s = tmp; | ||
432 | } | ||
433 | |||
434 | return &s->sym; | ||
421 | } | 435 | } |
422 | 436 | ||
423 | struct symbol *dso__find_symbol(struct dso *dso, | 437 | struct symbol *dso__find_symbol(struct dso *dso, |
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym) | |||
436 | return symbols__next(sym); | 450 | return symbols__next(sym); |
437 | } | 451 | } |
438 | 452 | ||
453 | struct symbol *symbol__next_by_name(struct symbol *sym) | ||
454 | { | ||
455 | struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); | ||
456 | struct rb_node *n = rb_next(&s->rb_node); | ||
457 | |||
458 | return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; | ||
459 | } | ||
460 | |||
461 | /* | ||
462 | * Teturns first symbol that matched with @name. | ||
463 | */ | ||
439 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, | 464 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, |
440 | const char *name) | 465 | const char *name) |
441 | { | 466 | { |
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 9d602e9c6f59..1650dcb3a67b 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, | |||
231 | u64 addr); | 231 | u64 addr); |
232 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, | 232 | struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, |
233 | const char *name); | 233 | const char *name); |
234 | struct symbol *symbol__next_by_name(struct symbol *sym); | ||
234 | 235 | ||
235 | struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); | 236 | struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); |
236 | struct symbol *dso__next_symbol(struct symbol *sym); | 237 | struct symbol *dso__next_symbol(struct symbol *sym); |